1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * blockops.S: Common block zero optimized rou 3 * blockops.S: Common block zero optimized routines. 4 * 4 * 5 * Copyright (C) 1996 David S. Miller (davem@c 5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 */ 6 */ 7 7 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/linkage.h> 9 #include <linux/linkage.h> 10 #include <asm/page.h> 10 #include <asm/page.h> 11 11 12 /* Zero out 64 bytes of memory at (buf 12 /* Zero out 64 bytes of memory at (buf + offset). 13 * Assumes %g1 contains zero. 13 * Assumes %g1 contains zero. 14 */ 14 */ 15 #define BLAST_BLOCK(buf, offset) \ 15 #define BLAST_BLOCK(buf, offset) \ 16 std %g0, [buf + offset + 0x38]; \ 16 std %g0, [buf + offset + 0x38]; \ 17 std %g0, [buf + offset + 0x30]; \ 17 std %g0, [buf + offset + 0x30]; \ 18 std %g0, [buf + offset + 0x28]; \ 18 std %g0, [buf + offset + 0x28]; \ 19 std %g0, [buf + offset + 0x20]; \ 19 std %g0, [buf + offset + 0x20]; \ 20 std %g0, [buf + offset + 0x18]; \ 20 std %g0, [buf + offset + 0x18]; \ 21 std %g0, [buf + offset + 0x10]; \ 21 std %g0, [buf + offset + 0x10]; \ 22 std %g0, [buf + offset + 0x08]; \ 22 std %g0, [buf + offset + 0x08]; \ 23 std %g0, [buf + offset + 0x00]; 23 std %g0, [buf + offset + 0x00]; 24 24 25 /* Copy 32 bytes of memory at (src + o 25 /* Copy 32 bytes of memory at (src + offset) to 26 * (dst + offset). 26 * (dst + offset). 27 */ 27 */ 28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, 28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 29 ldd [src + offset + 0x18], t0; \ 29 ldd [src + offset + 0x18], t0; \ 30 ldd [src + offset + 0x10], t2; \ 30 ldd [src + offset + 0x10], t2; \ 31 ldd [src + offset + 0x08], t4; \ 31 ldd [src + offset + 0x08], t4; \ 32 ldd [src + offset + 0x00], t6; \ 32 ldd [src + offset + 0x00], t6; \ 33 std t0, [dst + offset + 0x18]; \ 33 std t0, [dst + offset + 0x18]; \ 34 std t2, [dst + offset + 0x10]; \ 34 std t2, [dst + offset + 0x10]; \ 35 std t4, [dst + offset + 0x08]; \ 35 std t4, [dst + offset + 0x08]; \ 36 std t6, [dst + offset + 0x00]; 36 std t6, [dst + offset + 0x00]; 37 37 38 /* Profiling evidence indicates that m 38 /* Profiling evidence indicates that memset() is 39 * commonly called for blocks of size 39 * commonly called for blocks of size PAGE_SIZE, 40 * and (2 * PAGE_SIZE) (for kernel sta 40 * and (2 * PAGE_SIZE) (for kernel stacks) 41 * and with a second arg of zero. We 41 * and with a second arg of zero. We assume in 42 * all of these cases that the buffer 42 * all of these cases that the buffer is aligned 43 * on at least an 8 byte boundary. 43 * on at least an 8 byte boundary. 44 * 44 * 45 * Therefore we special case them to m 45 * Therefore we special case them to make them 46 * as fast as possible. 46 * as fast as possible. 47 */ 47 */ 48 48 49 .text 49 .text 50 ENTRY(bzero_1page) 50 ENTRY(bzero_1page) 51 /* NOTE: If you change the number of insns of 51 /* NOTE: If you change the number of insns of this routine, please check 52 * arch/sparc/mm/hypersparc.S */ 52 * arch/sparc/mm/hypersparc.S */ 53 /* %o0 = buf */ 53 /* %o0 = buf */ 54 or %g0, %g0, %g1 54 or %g0, %g0, %g1 55 or %o0, %g0, %o1 55 or %o0, %g0, %o1 56 or %g0, (PAGE_SIZE >> 8), %g2 56 or %g0, (PAGE_SIZE >> 8), %g2 57 1: 57 1: 58 BLAST_BLOCK(%o0, 0x00) 58 BLAST_BLOCK(%o0, 0x00) 59 BLAST_BLOCK(%o0, 0x40) 59 BLAST_BLOCK(%o0, 0x40) 60 BLAST_BLOCK(%o0, 0x80) 60 BLAST_BLOCK(%o0, 0x80) 61 BLAST_BLOCK(%o0, 0xc0) 61 BLAST_BLOCK(%o0, 0xc0) 62 subcc %g2, 1, %g2 62 subcc %g2, 1, %g2 63 bne 1b 63 bne 1b 64 add %o0, 0x100, %o0 64 add %o0, 0x100, %o0 65 65 66 retl 66 retl 67 nop 67 nop 68 ENDPROC(bzero_1page) 68 ENDPROC(bzero_1page) 69 EXPORT_SYMBOL(bzero_1page) 69 EXPORT_SYMBOL(bzero_1page) 70 70 71 ENTRY(__copy_1page) 71 ENTRY(__copy_1page) 72 /* NOTE: If you change the number of insns of 72 /* NOTE: If you change the number of insns of this routine, please check 73 * arch/sparc/mm/hypersparc.S */ 73 * arch/sparc/mm/hypersparc.S */ 74 /* %o0 = dst, %o1 = src */ 74 /* %o0 = dst, %o1 = src */ 75 or %g0, (PAGE_SIZE >> 8), %g1 75 or %g0, (PAGE_SIZE >> 8), %g1 76 1: 76 1: 77 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, 77 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 78 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, 78 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 79 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, 79 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 80 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, 80 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 81 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, 81 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 82 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, 82 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 83 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, 83 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 84 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, 84 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 85 subcc %g1, 1, %g1 85 subcc %g1, 1, %g1 86 add %o0, 0x100, %o0 86 add %o0, 0x100, %o0 87 bne 1b 87 bne 1b 88 add %o1, 0x100, %o1 88 add %o1, 0x100, %o1 89 89 90 retl 90 retl 91 nop 91 nop 92 ENDPROC(__copy_1page) 92 ENDPROC(__copy_1page) 93 EXPORT_SYMBOL(__copy_1page) 93 EXPORT_SYMBOL(__copy_1page)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.