1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Copyright (C) 2013 Regents of the Universit !! 2 * linux/arch/alpha/lib/memset.S >> 3 * >> 4 * This is an efficient (and small) implementation of the C library "memset()" >> 5 * function for the alpha. >> 6 * >> 7 * (C) Copyright 1996 Linus Torvalds >> 8 * >> 9 * This routine is "moral-ware": you are free to use it any way you wish, and >> 10 * the only obligation I put on you is a moral one: if you make any improvements >> 11 * to the routine, please send me your improvements for me to use similarly. >> 12 * >> 13 * The scheduling comments are according to the EV5 documentation (and done by >> 14 * hand, so they might well be incorrect, please do tell me about it..) 4 */ 15 */ 5 16 >> 17 .set noat >> 18 .set noreorder >> 19 .text >> 20 .globl memset >> 21 .globl __memset >> 22 .globl __memsetw >> 23 .globl __constant_c_memset >> 24 .ent __memset >> 25 .align 5 >> 26 __memset: >> 27 .frame $30,0,$26,0 >> 28 .prologue 0 >> 29 >> 30 and $17,255,$1 /* E1 */ >> 31 insbl $17,1,$17 /* .. E0 */ >> 32 bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ >> 33 sll $17,16,$1 /* E1 (p-c latency, next cycle) */ >> 34 >> 35 bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ >> 36 sll $17,32,$1 /* E1 (p-c latency, next cycle) */ >> 37 bis $17,$1,$17 /* E0 (p-c latency, next cycle) */ >> 38 ldq_u $31,0($30) /* .. E1 */ >> 39 >> 40 .align 5 >> 41 __constant_c_memset: >> 42 addq $18,$16,$6 /* E0 */ >> 43 bis $16,$16,$0 /* .. E1 */ >> 44 xor $16,$6,$1 /* E0 */ >> 45 ble $18,end /* .. E1 */ >> 46 >> 47 bic $1,7,$1 /* E0 */ >> 48 beq $1,within_one_quad /* .. E1 (note EV5 zero-latency forwarding) */ >> 49 and $16,7,$3 /* E0 */ >> 50 beq $3,aligned /* .. E1 (note EV5 zero-latency forwarding) */ >> 51 >> 52 ldq_u $4,0($16) /* E0 */ >> 53 bis $16,$16,$5 /* .. E1 */ >> 54 insql $17,$16,$2 /* E0 */ >> 55 subq $3,8,$3 /* .. E1 */ >> 56 >> 57 addq $18,$3,$18 /* E0 $18 is new count ($3 is negative) */ >> 58 mskql $4,$16,$4 /* .. E1 (and possible load stall) */ >> 59 subq $16,$3,$16 /* E0 $16 is new aligned destination */ >> 60 bis $2,$4,$1 /* .. E1 */ >> 61 >> 62 bis $31,$31,$31 /* E0 */ >> 63 ldq_u $31,0($30) /* .. E1 */ >> 64 stq_u $1,0($5) /* E0 */ >> 65 bis $31,$31,$31 /* .. E1 */ >> 66 >> 67 .align 4 >> 68 aligned: >> 69 sra $18,3,$3 /* E0 */ >> 70 and $18,7,$18 /* .. E1 */ >> 71 bis $16,$16,$5 /* E0 */ >> 72 beq $3,no_quad /* .. E1 */ >> 73 >> 74 .align 3 >> 75 loop: >> 76 stq $17,0($5) /* E0 */ >> 77 subq $3,1,$3 /* .. E1 */ >> 78 addq $5,8,$5 /* E0 */ >> 79 bne $3,loop /* .. E1 */ >> 80 >> 81 no_quad: >> 82 bis $31,$31,$31 /* E0 */ >> 83 beq $18,end /* .. E1 */ >> 84 ldq $7,0($5) /* E0 */ >> 85 mskqh $7,$6,$2 /* .. E1 (and load stall) */ >> 86 >> 87 insqh $17,$6,$4 /* E0 */ >> 88 bis $2,$4,$1 /* .. E1 */ >> 89 stq $1,0($5) /* E0 */ >> 90 ret $31,($26),1 /* .. E1 */ >> 91 >> 92 .align 3 >> 93 within_one_quad: >> 94 ldq_u $1,0($16) /* E0 */ >> 95 insql $17,$16,$2 /* E1 */ >> 96 mskql $1,$16,$4 /* E0 (after load stall) */ >> 97 bis $2,$4,$2 /* E0 */ >> 98 >> 99 mskql $2,$6,$4 /* E0 */ >> 100 mskqh $1,$6,$2 /* .. E1 */ >> 101 bis $2,$4,$1 /* E0 */ >> 102 stq_u $1,0($16) /* E0 */ >> 103 >> 104 end: >> 105 ret $31,($26),1 /* E1 */ >> 106 .end __memset >> 107 >> 108 .align 5 >> 109 .ent __memsetw >> 110 __memsetw: >> 111 .prologue 0 >> 112 >> 113 inswl $17,0,$1 /* E0 */ >> 114 inswl $17,2,$2 /* E0 */ >> 115 inswl $17,4,$3 /* E0 */ >> 116 or $1,$2,$1 /* .. E1 */ >> 117 inswl $17,6,$4 /* E0 */ >> 118 or $1,$3,$1 /* .. E1 */ >> 119 or $1,$4,$17 /* E0 */ >> 120 br __constant_c_memset /* .. E1 */ 6 121 7 #include <linux/linkage.h> !! 122 .end __memsetw 8 #include <asm/asm.h> << 9 123 10 /* void *memset(void *, int, size_t) */ !! 124 memset = __memset 11 SYM_FUNC_START(__memset) << 12 move t0, a0 /* Preserve return value << 13 << 14 /* Defer to byte-oriented fill for sma << 15 sltiu a3, a2, 16 << 16 bnez a3, 4f << 17 << 18 /* << 19 * Round to nearest XLEN-aligned addre << 20 * greater than or equal to start addr << 21 */ << 22 addi a3, t0, SZREG-1 << 23 andi a3, a3, ~(SZREG-1) << 24 beq a3, t0, 2f /* Skip if already ali << 25 /* Handle initial misalignment */ << 26 sub a4, a3, t0 << 27 1: << 28 sb a1, 0(t0) << 29 addi t0, t0, 1 << 30 bltu t0, a3, 1b << 31 sub a2, a2, a4 /* Update count */ << 32 << 33 2: /* Duff's device with 32 XLEN stores per it << 34 /* Broadcast value into all bytes */ << 35 andi a1, a1, 0xff << 36 slli a3, a1, 8 << 37 or a1, a3, a1 << 38 slli a3, a1, 16 << 39 or a1, a3, a1 << 40 #ifdef CONFIG_64BIT << 41 slli a3, a1, 32 << 42 or a1, a3, a1 << 43 #endif << 44 << 45 /* Calculate end address */ << 46 andi a4, a2, ~(SZREG-1) << 47 add a3, t0, a4 << 48 << 49 andi a4, a4, 31*SZREG /* Calculate re << 50 beqz a4, 3f /* Shortcut if << 51 neg a4, a4 << 52 addi a4, a4, 32*SZREG /* Calculate in << 53 << 54 /* Adjust start address with offset */ << 55 sub t0, t0, a4 << 56 << 57 /* Jump into loop body */ << 58 /* Assumes 32-bit instruction lengths << 59 la a5, 3f << 60 #ifdef CONFIG_64BIT << 61 srli a4, a4, 1 << 62 #endif << 63 add a5, a5, a4 << 64 jr a5 << 65 3: << 66 REG_S a1, 0(t0) << 67 REG_S a1, SZREG(t0) << 68 REG_S a1, 2*SZREG(t0) << 69 REG_S a1, 3*SZREG(t0) << 70 REG_S a1, 4*SZREG(t0) << 71 REG_S a1, 5*SZREG(t0) << 72 REG_S a1, 6*SZREG(t0) << 73 REG_S a1, 7*SZREG(t0) << 74 REG_S a1, 8*SZREG(t0) << 75 REG_S a1, 9*SZREG(t0) << 76 REG_S a1, 10*SZREG(t0) << 77 REG_S a1, 11*SZREG(t0) << 78 REG_S a1, 12*SZREG(t0) << 79 REG_S a1, 13*SZREG(t0) << 80 REG_S a1, 14*SZREG(t0) << 81 REG_S a1, 15*SZREG(t0) << 82 REG_S a1, 16*SZREG(t0) << 83 REG_S a1, 17*SZREG(t0) << 84 REG_S a1, 18*SZREG(t0) << 85 REG_S a1, 19*SZREG(t0) << 86 REG_S a1, 20*SZREG(t0) << 87 REG_S a1, 21*SZREG(t0) << 88 REG_S a1, 22*SZREG(t0) << 89 REG_S a1, 23*SZREG(t0) << 90 REG_S a1, 24*SZREG(t0) << 91 REG_S a1, 25*SZREG(t0) << 92 REG_S a1, 26*SZREG(t0) << 93 REG_S a1, 27*SZREG(t0) << 94 REG_S a1, 28*SZREG(t0) << 95 REG_S a1, 29*SZREG(t0) << 96 REG_S a1, 30*SZREG(t0) << 97 REG_S a1, 31*SZREG(t0) << 98 addi t0, t0, 32*SZREG << 99 bltu t0, a3, 3b << 100 andi a2, a2, SZREG-1 /* Update count << 101 << 102 4: << 103 /* Handle trailing misalignment */ << 104 beqz a2, 6f << 105 add a3, t0, a2 << 106 5: << 107 sb a1, 0(t0) << 108 addi t0, t0, 1 << 109 bltu t0, a3, 5b << 110 6: << 111 ret << 112 SYM_FUNC_END(__memset) << 113 SYM_FUNC_ALIAS_WEAK(memset, __memset) << 114 SYM_FUNC_ALIAS(__pi_memset, __memset) << 115 SYM_FUNC_ALIAS(__pi___memset, __memset) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.