1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * Copyright (C) 2012 Intel Corporation 3 * Copyright (C) 2012 Intel Corporation 4 * Copyright (C) 2017 Linaro Ltd. <ard.biesheu 4 * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> 5 */ 5 */ 6 6 7 #include <arm_neon.h> 7 #include <arm_neon.h> 8 #include "neon.h" 8 #include "neon.h" 9 9 10 #ifdef CONFIG_ARM 10 #ifdef CONFIG_ARM 11 /* 11 /* 12 * AArch32 does not provide this intrinsic nat 12 * AArch32 does not provide this intrinsic natively because it does not 13 * implement the underlying instruction. AArch 13 * implement the underlying instruction. AArch32 only provides a 64-bit 14 * wide vtbl.8 instruction, so use that instea 14 * wide vtbl.8 instruction, so use that instead. 15 */ 15 */ 16 static uint8x16_t vqtbl1q_u8(uint8x16_t a, uin 16 static uint8x16_t vqtbl1q_u8(uint8x16_t a, uint8x16_t b) 17 { 17 { 18 union { 18 union { 19 uint8x16_t val; 19 uint8x16_t val; 20 uint8x8x2_t pair; 20 uint8x8x2_t pair; 21 } __a = { a }; 21 } __a = { a }; 22 22 23 return vcombine_u8(vtbl2_u8(__a.pair, 23 return vcombine_u8(vtbl2_u8(__a.pair, vget_low_u8(b)), 24 vtbl2_u8(__a.pair, 24 vtbl2_u8(__a.pair, vget_high_u8(b))); 25 } 25 } 26 #endif 26 #endif 27 27 28 void __raid6_2data_recov_neon(int bytes, uint8 28 void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, 29 uint8_t *dq, con 29 uint8_t *dq, const uint8_t *pbmul, 30 const uint8_t *q 30 const uint8_t *qmul) 31 { 31 { 32 uint8x16_t pm0 = vld1q_u8(pbmul); 32 uint8x16_t pm0 = vld1q_u8(pbmul); 33 uint8x16_t pm1 = vld1q_u8(pbmul + 16); 33 uint8x16_t pm1 = vld1q_u8(pbmul + 16); 34 uint8x16_t qm0 = vld1q_u8(qmul); 34 uint8x16_t qm0 = vld1q_u8(qmul); 35 uint8x16_t qm1 = vld1q_u8(qmul + 16); 35 uint8x16_t qm1 = vld1q_u8(qmul + 16); 36 uint8x16_t x0f = vdupq_n_u8(0x0f); 36 uint8x16_t x0f = vdupq_n_u8(0x0f); 37 37 38 /* 38 /* 39 * while ( bytes-- ) { 39 * while ( bytes-- ) { 40 * uint8_t px, qx, db; 40 * uint8_t px, qx, db; 41 * 41 * 42 * px = *p ^ *dp; 42 * px = *p ^ *dp; 43 * qx = qmul[*q ^ *dq]; 43 * qx = qmul[*q ^ *dq]; 44 * *dq++ = db = pbmul[px] ^ qx; 44 * *dq++ = db = pbmul[px] ^ qx; 45 * *dp++ = db ^ px; 45 * *dp++ = db ^ px; 46 * p++; q++; 46 * p++; q++; 47 * } 47 * } 48 */ 48 */ 49 49 50 while (bytes) { 50 while (bytes) { 51 uint8x16_t vx, vy, px, qx, db; 51 uint8x16_t vx, vy, px, qx, db; 52 52 53 px = veorq_u8(vld1q_u8(p), vld 53 px = veorq_u8(vld1q_u8(p), vld1q_u8(dp)); 54 vx = veorq_u8(vld1q_u8(q), vld 54 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); 55 55 56 vy = vshrq_n_u8(vx, 4); 56 vy = vshrq_n_u8(vx, 4); 57 vx = vqtbl1q_u8(qm0, vandq_u8( 57 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f)); 58 vy = vqtbl1q_u8(qm1, vy); 58 vy = vqtbl1q_u8(qm1, vy); 59 qx = veorq_u8(vx, vy); 59 qx = veorq_u8(vx, vy); 60 60 61 vy = vshrq_n_u8(px, 4); 61 vy = vshrq_n_u8(px, 4); 62 vx = vqtbl1q_u8(pm0, vandq_u8( 62 vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f)); 63 vy = vqtbl1q_u8(pm1, vy); 63 vy = vqtbl1q_u8(pm1, vy); 64 vx = veorq_u8(vx, vy); 64 vx = veorq_u8(vx, vy); 65 db = veorq_u8(vx, qx); 65 db = veorq_u8(vx, qx); 66 66 67 vst1q_u8(dq, db); 67 vst1q_u8(dq, db); 68 vst1q_u8(dp, veorq_u8(db, px)) 68 vst1q_u8(dp, veorq_u8(db, px)); 69 69 70 bytes -= 16; 70 bytes -= 16; 71 p += 16; 71 p += 16; 72 q += 16; 72 q += 16; 73 dp += 16; 73 dp += 16; 74 dq += 16; 74 dq += 16; 75 } 75 } 76 } 76 } 77 77 78 void __raid6_datap_recov_neon(int bytes, uint8 78 void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, 79 const uint8_t *q 79 const uint8_t *qmul) 80 { 80 { 81 uint8x16_t qm0 = vld1q_u8(qmul); 81 uint8x16_t qm0 = vld1q_u8(qmul); 82 uint8x16_t qm1 = vld1q_u8(qmul + 16); 82 uint8x16_t qm1 = vld1q_u8(qmul + 16); 83 uint8x16_t x0f = vdupq_n_u8(0x0f); 83 uint8x16_t x0f = vdupq_n_u8(0x0f); 84 84 85 /* 85 /* 86 * while (bytes--) { 86 * while (bytes--) { 87 * *p++ ^= *dq = qmul[*q ^ *dq]; 87 * *p++ ^= *dq = qmul[*q ^ *dq]; 88 * q++; dq++; 88 * q++; dq++; 89 * } 89 * } 90 */ 90 */ 91 91 92 while (bytes) { 92 while (bytes) { 93 uint8x16_t vx, vy; 93 uint8x16_t vx, vy; 94 94 95 vx = veorq_u8(vld1q_u8(q), vld 95 vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); 96 96 97 vy = vshrq_n_u8(vx, 4); 97 vy = vshrq_n_u8(vx, 4); 98 vx = vqtbl1q_u8(qm0, vandq_u8( 98 vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f)); 99 vy = vqtbl1q_u8(qm1, vy); 99 vy = vqtbl1q_u8(qm1, vy); 100 vx = veorq_u8(vx, vy); 100 vx = veorq_u8(vx, vy); 101 vy = veorq_u8(vx, vld1q_u8(p)) 101 vy = veorq_u8(vx, vld1q_u8(p)); 102 102 103 vst1q_u8(dq, vx); 103 vst1q_u8(dq, vx); 104 vst1q_u8(p, vy); 104 vst1q_u8(p, vy); 105 105 106 bytes -= 16; 106 bytes -= 16; 107 p += 16; 107 p += 16; 108 q += 16; 108 q += 16; 109 dq += 16; 109 dq += 16; 110 } 110 } 111 } 111 } 112 112
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.