1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 /* 1 /* 3 * blockops.S: Common block zero optimized rou 2 * blockops.S: Common block zero optimized routines. 4 * 3 * 5 * Copyright (C) 1996 David S. Miller (davem@c 4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 6 */ 5 */ 7 6 8 #include <linux/export.h> << 9 #include <linux/linkage.h> 7 #include <linux/linkage.h> 10 #include <asm/page.h> 8 #include <asm/page.h> >> 9 #include <asm/export.h> 11 10 12 /* Zero out 64 bytes of memory at (buf 11 /* Zero out 64 bytes of memory at (buf + offset). 13 * Assumes %g1 contains zero. 12 * Assumes %g1 contains zero. 14 */ 13 */ 15 #define BLAST_BLOCK(buf, offset) \ 14 #define BLAST_BLOCK(buf, offset) \ 16 std %g0, [buf + offset + 0x38]; \ 15 std %g0, [buf + offset + 0x38]; \ 17 std %g0, [buf + offset + 0x30]; \ 16 std %g0, [buf + offset + 0x30]; \ 18 std %g0, [buf + offset + 0x28]; \ 17 std %g0, [buf + offset + 0x28]; \ 19 std %g0, [buf + offset + 0x20]; \ 18 std %g0, [buf + offset + 0x20]; \ 20 std %g0, [buf + offset + 0x18]; \ 19 std %g0, [buf + offset + 0x18]; \ 21 std %g0, [buf + offset + 0x10]; \ 20 std %g0, [buf + offset + 0x10]; \ 22 std %g0, [buf + offset + 0x08]; \ 21 std %g0, [buf + offset + 0x08]; \ 23 std %g0, [buf + offset + 0x00]; 22 std %g0, [buf + offset + 0x00]; 24 23 25 /* Copy 32 bytes of memory at (src + o 24 /* Copy 32 bytes of memory at (src + offset) to 26 * (dst + offset). 25 * (dst + offset). 27 */ 26 */ 28 #define MIRROR_BLOCK(dst, src, offset, t0, t1, 27 #define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 29 ldd [src + offset + 0x18], t0; \ 28 ldd [src + offset + 0x18], t0; \ 30 ldd [src + offset + 0x10], t2; \ 29 ldd [src + offset + 0x10], t2; \ 31 ldd [src + offset + 0x08], t4; \ 30 ldd [src + offset + 0x08], t4; \ 32 ldd [src + offset + 0x00], t6; \ 31 ldd [src + offset + 0x00], t6; \ 33 std t0, [dst + offset + 0x18]; \ 32 std t0, [dst + offset + 0x18]; \ 34 std t2, [dst + offset + 0x10]; \ 33 std t2, [dst + offset + 0x10]; \ 35 std t4, [dst + offset + 0x08]; \ 34 std t4, [dst + offset + 0x08]; \ 36 std t6, [dst + offset + 0x00]; 35 std t6, [dst + offset + 0x00]; 37 36 38 /* Profiling evidence indicates that m 37 /* Profiling evidence indicates that memset() is 39 * commonly called for blocks of size 38 * commonly called for blocks of size PAGE_SIZE, 40 * and (2 * PAGE_SIZE) (for kernel sta 39 * and (2 * PAGE_SIZE) (for kernel stacks) 41 * and with a second arg of zero. We 40 * and with a second arg of zero. We assume in 42 * all of these cases that the buffer 41 * all of these cases that the buffer is aligned 43 * on at least an 8 byte boundary. 42 * on at least an 8 byte boundary. 44 * 43 * 45 * Therefore we special case them to m 44 * Therefore we special case them to make them 46 * as fast as possible. 45 * as fast as possible. 47 */ 46 */ 48 47 49 .text 48 .text 50 ENTRY(bzero_1page) 49 ENTRY(bzero_1page) 51 /* NOTE: If you change the number of insns of 50 /* NOTE: If you change the number of insns of this routine, please check 52 * arch/sparc/mm/hypersparc.S */ 51 * arch/sparc/mm/hypersparc.S */ 53 /* %o0 = buf */ 52 /* %o0 = buf */ 54 or %g0, %g0, %g1 53 or %g0, %g0, %g1 55 or %o0, %g0, %o1 54 or %o0, %g0, %o1 56 or %g0, (PAGE_SIZE >> 8), %g2 55 or %g0, (PAGE_SIZE >> 8), %g2 57 1: 56 1: 58 BLAST_BLOCK(%o0, 0x00) 57 BLAST_BLOCK(%o0, 0x00) 59 BLAST_BLOCK(%o0, 0x40) 58 BLAST_BLOCK(%o0, 0x40) 60 BLAST_BLOCK(%o0, 0x80) 59 BLAST_BLOCK(%o0, 0x80) 61 BLAST_BLOCK(%o0, 0xc0) 60 BLAST_BLOCK(%o0, 0xc0) 62 subcc %g2, 1, %g2 61 subcc %g2, 1, %g2 63 bne 1b 62 bne 1b 64 add %o0, 0x100, %o0 63 add %o0, 0x100, %o0 65 64 66 retl 65 retl 67 nop 66 nop 68 ENDPROC(bzero_1page) 67 ENDPROC(bzero_1page) 69 EXPORT_SYMBOL(bzero_1page) 68 EXPORT_SYMBOL(bzero_1page) 70 69 71 ENTRY(__copy_1page) 70 ENTRY(__copy_1page) 72 /* NOTE: If you change the number of insns of 71 /* NOTE: If you change the number of insns of this routine, please check 73 * arch/sparc/mm/hypersparc.S */ 72 * arch/sparc/mm/hypersparc.S */ 74 /* %o0 = dst, %o1 = src */ 73 /* %o0 = dst, %o1 = src */ 75 or %g0, (PAGE_SIZE >> 8), %g1 74 or %g0, (PAGE_SIZE >> 8), %g1 76 1: 75 1: 77 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, 76 MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 78 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, 77 MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 79 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, 78 MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 80 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, 79 MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 81 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, 80 MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 82 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, 81 MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 83 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, 82 MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 84 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, 83 MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5) 85 subcc %g1, 1, %g1 84 subcc %g1, 1, %g1 86 add %o0, 0x100, %o0 85 add %o0, 0x100, %o0 87 bne 1b 86 bne 1b 88 add %o1, 0x100, %o1 87 add %o1, 0x100, %o1 89 88 90 retl 89 retl 91 nop 90 nop 92 ENDPROC(__copy_1page) 91 ENDPROC(__copy_1page) 93 EXPORT_SYMBOL(__copy_1page) 92 EXPORT_SYMBOL(__copy_1page)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.