1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * Copyright (C) 2012 Intel Corporation 3 * Copyright (C) 2012 Intel Corporation 4 */ 4 */ 5 5 >> 6 #ifdef CONFIG_AS_SSSE3 >> 7 6 #include <linux/raid/pq.h> 8 #include <linux/raid/pq.h> 7 #include "x86.h" 9 #include "x86.h" 8 10 9 static int raid6_has_ssse3(void) 11 static int raid6_has_ssse3(void) 10 { 12 { 11 return boot_cpu_has(X86_FEATURE_XMM) & 13 return boot_cpu_has(X86_FEATURE_XMM) && 12 boot_cpu_has(X86_FEATURE_XMM2) 14 boot_cpu_has(X86_FEATURE_XMM2) && 13 boot_cpu_has(X86_FEATURE_SSSE3 15 boot_cpu_has(X86_FEATURE_SSSE3); 14 } 16 } 15 17 16 static void raid6_2data_recov_ssse3(int disks, 18 static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, 17 int failb, void **ptrs) 19 int failb, void **ptrs) 18 { 20 { 19 u8 *p, *q, *dp, *dq; 21 u8 *p, *q, *dp, *dq; 20 const u8 *pbmul; /* P multiplie 22 const u8 *pbmul; /* P multiplier table for B data */ 21 const u8 *qmul; /* Q multiplie 23 const u8 *qmul; /* Q multiplier table (for both) */ 22 static const u8 __aligned(16) x0f[16] 24 static const u8 __aligned(16) x0f[16] = { 23 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 25 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 24 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 26 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; 25 27 26 p = (u8 *)ptrs[disks-2]; 28 p = (u8 *)ptrs[disks-2]; 27 q = (u8 *)ptrs[disks-1]; 29 q = (u8 *)ptrs[disks-1]; 28 30 29 /* Compute syndrome with zero for the 31 /* Compute syndrome with zero for the missing data pages 30 Use the dead data pages as temporar 32 Use the dead data pages as temporary storage for 31 delta p and delta q */ 33 delta p and delta q */ 32 dp = (u8 *)ptrs[faila]; 34 dp = (u8 *)ptrs[faila]; 33 ptrs[faila] = (void *)raid6_empty_zero 35 ptrs[faila] = (void *)raid6_empty_zero_page; 34 ptrs[disks-2] = dp; 36 ptrs[disks-2] = dp; 35 dq = (u8 *)ptrs[failb]; 37 dq = (u8 *)ptrs[failb]; 36 ptrs[failb] = (void *)raid6_empty_zero 38 ptrs[failb] = (void *)raid6_empty_zero_page; 37 ptrs[disks-1] = dq; 39 ptrs[disks-1] = dq; 38 40 39 raid6_call.gen_syndrome(disks, bytes, 41 raid6_call.gen_syndrome(disks, bytes, ptrs); 40 42 41 /* Restore pointer table */ 43 /* Restore pointer table */ 42 ptrs[faila] = dp; 44 ptrs[faila] = dp; 43 ptrs[failb] = dq; 45 ptrs[failb] = dq; 44 ptrs[disks-2] = p; 46 ptrs[disks-2] = p; 45 ptrs[disks-1] = q; 47 ptrs[disks-1] = q; 46 48 47 /* Now, pick the proper data tables */ 49 /* Now, pick the proper data tables */ 48 pbmul = raid6_vgfmul[raid6_gfexi[failb 50 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]]; 49 qmul = raid6_vgfmul[raid6_gfinv[raid6 51 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ 50 raid6_gfexp[failb]]]; 52 raid6_gfexp[failb]]]; 51 53 52 kernel_fpu_begin(); 54 kernel_fpu_begin(); 53 55 54 asm volatile("movdqa %0,%%xmm7" : : "m 56 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0])); 55 57 56 #ifdef CONFIG_X86_64 58 #ifdef CONFIG_X86_64 57 asm volatile("movdqa %0,%%xmm6" : : "m 59 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0])); 58 asm volatile("movdqa %0,%%xmm14" : : " 60 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0])); 59 asm volatile("movdqa %0,%%xmm15" : : " 61 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16])); 60 #endif 62 #endif 61 63 62 /* Now do it... */ 64 /* Now do it... */ 63 while (bytes) { 65 while (bytes) { 64 #ifdef CONFIG_X86_64 66 #ifdef CONFIG_X86_64 65 /* xmm6, xmm14, xmm15 */ 67 /* xmm6, xmm14, xmm15 */ 66 68 67 asm volatile("movdqa %0,%%xmm1 69 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); 68 asm volatile("movdqa %0,%%xmm9 70 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); 69 asm volatile("movdqa %0,%%xmm0 71 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0])); 70 asm volatile("movdqa %0,%%xmm8 72 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16])); 71 asm volatile("pxor %0,%%xmm1 73 asm volatile("pxor %0,%%xmm1" : : "m" (dq[0])); 72 asm volatile("pxor %0,%%xmm9 74 asm volatile("pxor %0,%%xmm9" : : "m" (dq[16])); 73 asm volatile("pxor %0,%%xmm0 75 asm volatile("pxor %0,%%xmm0" : : "m" (dp[0])); 74 asm volatile("pxor %0,%%xmm8 76 asm volatile("pxor %0,%%xmm8" : : "m" (dp[16])); 75 77 76 /* xmm0/8 = px */ 78 /* xmm0/8 = px */ 77 79 78 asm volatile("movdqa %xmm6,%xm 80 asm volatile("movdqa %xmm6,%xmm4"); 79 asm volatile("movdqa %0,%%xmm5 81 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); 80 asm volatile("movdqa %xmm6,%xm 82 asm volatile("movdqa %xmm6,%xmm12"); 81 asm volatile("movdqa %xmm5,%xm 83 asm volatile("movdqa %xmm5,%xmm13"); 82 asm volatile("movdqa %xmm1,%xm 84 asm volatile("movdqa %xmm1,%xmm3"); 83 asm volatile("movdqa %xmm9,%xm 85 asm volatile("movdqa %xmm9,%xmm11"); 84 asm volatile("movdqa %xmm0,%xm 86 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */ 85 asm volatile("movdqa %xmm8,%xm 87 asm volatile("movdqa %xmm8,%xmm10"); 86 asm volatile("psraw $4,%xmm1" 88 asm volatile("psraw $4,%xmm1"); 87 asm volatile("psraw $4,%xmm9" 89 asm volatile("psraw $4,%xmm9"); 88 asm volatile("pand %xmm7,%xm 90 asm volatile("pand %xmm7,%xmm3"); 89 asm volatile("pand %xmm7,%xm 91 asm volatile("pand %xmm7,%xmm11"); 90 asm volatile("pand %xmm7,%xm 92 asm volatile("pand %xmm7,%xmm1"); 91 asm volatile("pand %xmm7,%xm 93 asm volatile("pand %xmm7,%xmm9"); 92 asm volatile("pshufb %xmm3,%xm 94 asm volatile("pshufb %xmm3,%xmm4"); 93 asm volatile("pshufb %xmm11,%x 95 asm volatile("pshufb %xmm11,%xmm12"); 94 asm volatile("pshufb %xmm1,%xm 96 asm volatile("pshufb %xmm1,%xmm5"); 95 asm volatile("pshufb %xmm9,%xm 97 asm volatile("pshufb %xmm9,%xmm13"); 96 asm volatile("pxor %xmm4,%xm 98 asm volatile("pxor %xmm4,%xmm5"); 97 asm volatile("pxor %xmm12,%x 99 asm volatile("pxor %xmm12,%xmm13"); 98 100 99 /* xmm5/13 = qx */ 101 /* xmm5/13 = qx */ 100 102 101 asm volatile("movdqa %xmm14,%x 103 asm volatile("movdqa %xmm14,%xmm4"); 102 asm volatile("movdqa %xmm15,%x 104 asm volatile("movdqa %xmm15,%xmm1"); 103 asm volatile("movdqa %xmm14,%x 105 asm volatile("movdqa %xmm14,%xmm12"); 104 asm volatile("movdqa %xmm15,%x 106 asm volatile("movdqa %xmm15,%xmm9"); 105 asm volatile("movdqa %xmm2,%xm 107 asm volatile("movdqa %xmm2,%xmm3"); 106 asm volatile("movdqa %xmm10,%x 108 asm volatile("movdqa %xmm10,%xmm11"); 107 asm volatile("psraw $4,%xmm2" 109 asm volatile("psraw $4,%xmm2"); 108 asm volatile("psraw $4,%xmm10 110 asm volatile("psraw $4,%xmm10"); 109 asm volatile("pand %xmm7,%xm 111 asm volatile("pand %xmm7,%xmm3"); 110 asm volatile("pand %xmm7,%xm 112 asm volatile("pand %xmm7,%xmm11"); 111 asm volatile("pand %xmm7,%xm 113 asm volatile("pand %xmm7,%xmm2"); 112 asm volatile("pand %xmm7,%xm 114 asm volatile("pand %xmm7,%xmm10"); 113 asm volatile("pshufb %xmm3,%xm 115 asm volatile("pshufb %xmm3,%xmm4"); 114 asm volatile("pshufb %xmm11,%x 116 asm volatile("pshufb %xmm11,%xmm12"); 115 asm volatile("pshufb %xmm2,%xm 117 asm volatile("pshufb %xmm2,%xmm1"); 116 asm volatile("pshufb %xmm10,%x 118 asm volatile("pshufb %xmm10,%xmm9"); 117 asm volatile("pxor %xmm4,%xm 119 asm volatile("pxor %xmm4,%xmm1"); 118 asm volatile("pxor %xmm12,%x 120 asm volatile("pxor %xmm12,%xmm9"); 119 121 120 /* xmm1/9 = pbmul[px] */ 122 /* xmm1/9 = pbmul[px] */ 121 asm volatile("pxor %xmm5,%xm 123 asm volatile("pxor %xmm5,%xmm1"); 122 asm volatile("pxor %xmm13,%x 124 asm volatile("pxor %xmm13,%xmm9"); 123 /* xmm1/9 = db = DQ */ 125 /* xmm1/9 = db = DQ */ 124 asm volatile("movdqa %%xmm1,%0 126 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0])); 125 asm volatile("movdqa %%xmm9,%0 127 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16])); 126 128 127 asm volatile("pxor %xmm1,%xm 129 asm volatile("pxor %xmm1,%xmm0"); 128 asm volatile("pxor %xmm9,%xm 130 asm volatile("pxor %xmm9,%xmm8"); 129 asm volatile("movdqa %%xmm0,%0 131 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0])); 130 asm volatile("movdqa %%xmm8,%0 132 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16])); 131 133 132 bytes -= 32; 134 bytes -= 32; 133 p += 32; 135 p += 32; 134 q += 32; 136 q += 32; 135 dp += 32; 137 dp += 32; 136 dq += 32; 138 dq += 32; 137 #else 139 #else 138 asm volatile("movdqa %0,%%xmm1 140 asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); 139 asm volatile("movdqa %0,%%xmm0 141 asm volatile("movdqa %0,%%xmm0" : : "m" (*p)); 140 asm volatile("pxor %0,%%xmm1 142 asm volatile("pxor %0,%%xmm1" : : "m" (*dq)); 141 asm volatile("pxor %0,%%xmm0 143 asm volatile("pxor %0,%%xmm0" : : "m" (*dp)); 142 144 143 /* 1 = dq ^ q 145 /* 1 = dq ^ q 144 * 0 = dp ^ p 146 * 0 = dp ^ p 145 */ 147 */ 146 asm volatile("movdqa %0,%%xmm4 148 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0])); 147 asm volatile("movdqa %0,%%xmm5 149 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); 148 150 149 asm volatile("movdqa %xmm1,%xm 151 asm volatile("movdqa %xmm1,%xmm3"); 150 asm volatile("psraw $4,%xmm1" 152 asm volatile("psraw $4,%xmm1"); 151 asm volatile("pand %xmm7,%xm 153 asm volatile("pand %xmm7,%xmm3"); 152 asm volatile("pand %xmm7,%xm 154 asm volatile("pand %xmm7,%xmm1"); 153 asm volatile("pshufb %xmm3,%xm 155 asm volatile("pshufb %xmm3,%xmm4"); 154 asm volatile("pshufb %xmm1,%xm 156 asm volatile("pshufb %xmm1,%xmm5"); 155 asm volatile("pxor %xmm4,%xm 157 asm volatile("pxor %xmm4,%xmm5"); 156 158 157 asm volatile("movdqa %xmm0,%xm 159 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */ 158 160 159 /* xmm5 = qx */ 161 /* xmm5 = qx */ 160 162 161 asm volatile("movdqa %0,%%xmm4 163 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0])); 162 asm volatile("movdqa %0,%%xmm1 164 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16])); 163 asm volatile("movdqa %xmm2,%xm 165 asm volatile("movdqa %xmm2,%xmm3"); 164 asm volatile("psraw $4,%xmm2" 166 asm volatile("psraw $4,%xmm2"); 165 asm volatile("pand %xmm7,%xm 167 asm volatile("pand %xmm7,%xmm3"); 166 asm volatile("pand %xmm7,%xm 168 asm volatile("pand %xmm7,%xmm2"); 167 asm volatile("pshufb %xmm3,%xm 169 asm volatile("pshufb %xmm3,%xmm4"); 168 asm volatile("pshufb %xmm2,%xm 170 asm volatile("pshufb %xmm2,%xmm1"); 169 asm volatile("pxor %xmm4,%xm 171 asm volatile("pxor %xmm4,%xmm1"); 170 172 171 /* xmm1 = pbmul[px] */ 173 /* xmm1 = pbmul[px] */ 172 asm volatile("pxor %xmm5,%xm 174 asm volatile("pxor %xmm5,%xmm1"); 173 /* xmm1 = db = DQ */ 175 /* xmm1 = db = DQ */ 174 asm volatile("movdqa %%xmm1,%0 176 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq)); 175 177 176 asm volatile("pxor %xmm1,%xm 178 asm volatile("pxor %xmm1,%xmm0"); 177 asm volatile("movdqa %%xmm0,%0 179 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp)); 178 180 179 bytes -= 16; 181 bytes -= 16; 180 p += 16; 182 p += 16; 181 q += 16; 183 q += 16; 182 dp += 16; 184 dp += 16; 183 dq += 16; 185 dq += 16; 184 #endif 186 #endif 185 } 187 } 186 188 187 kernel_fpu_end(); 189 kernel_fpu_end(); 188 } 190 } 189 191 190 192 191 static void raid6_datap_recov_ssse3(int disks, 193 static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, 192 void **ptrs) 194 void **ptrs) 193 { 195 { 194 u8 *p, *q, *dq; 196 u8 *p, *q, *dq; 195 const u8 *qmul; /* Q multiplie 197 const u8 *qmul; /* Q multiplier table */ 196 static const u8 __aligned(16) x0f[16] 198 static const u8 __aligned(16) x0f[16] = { 197 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 199 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 198 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 200 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; 199 201 200 p = (u8 *)ptrs[disks-2]; 202 p = (u8 *)ptrs[disks-2]; 201 q = (u8 *)ptrs[disks-1]; 203 q = (u8 *)ptrs[disks-1]; 202 204 203 /* Compute syndrome with zero for the 205 /* Compute syndrome with zero for the missing data page 204 Use the dead data page as temporary 206 Use the dead data page as temporary storage for delta q */ 205 dq = (u8 *)ptrs[faila]; 207 dq = (u8 *)ptrs[faila]; 206 ptrs[faila] = (void *)raid6_empty_zero 208 ptrs[faila] = (void *)raid6_empty_zero_page; 207 ptrs[disks-1] = dq; 209 ptrs[disks-1] = dq; 208 210 209 raid6_call.gen_syndrome(disks, bytes, 211 raid6_call.gen_syndrome(disks, bytes, ptrs); 210 212 211 /* Restore pointer table */ 213 /* Restore pointer table */ 212 ptrs[faila] = dq; 214 ptrs[faila] = dq; 213 ptrs[disks-1] = q; 215 ptrs[disks-1] = q; 214 216 215 /* Now, pick the proper data tables */ 217 /* Now, pick the proper data tables */ 216 qmul = raid6_vgfmul[raid6_gfinv[raid6 218 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]]; 217 219 218 kernel_fpu_begin(); 220 kernel_fpu_begin(); 219 221 220 asm volatile("movdqa %0, %%xmm7" : : " 222 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0])); 221 223 222 while (bytes) { 224 while (bytes) { 223 #ifdef CONFIG_X86_64 225 #ifdef CONFIG_X86_64 224 asm volatile("movdqa %0, %%xmm 226 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); 225 asm volatile("movdqa %0, %%xmm 227 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16])); 226 asm volatile("pxor %0, %%xmm3" 228 asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); 227 asm volatile("movdqa %0, %%xmm 229 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); 228 230 229 /* xmm3 = q[0] ^ dq[0] */ 231 /* xmm3 = q[0] ^ dq[0] */ 230 232 231 asm volatile("pxor %0, %%xmm4" 233 asm volatile("pxor %0, %%xmm4" : : "m" (q[16])); 232 asm volatile("movdqa %0, %%xmm 234 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); 233 235 234 /* xmm4 = q[16] ^ dq[16] */ 236 /* xmm4 = q[16] ^ dq[16] */ 235 237 236 asm volatile("movdqa %xmm3, %x 238 asm volatile("movdqa %xmm3, %xmm6"); 237 asm volatile("movdqa %xmm4, %x 239 asm volatile("movdqa %xmm4, %xmm8"); 238 240 239 /* xmm4 = xmm8 = q[16] ^ dq[16 241 /* xmm4 = xmm8 = q[16] ^ dq[16] */ 240 242 241 asm volatile("psraw $4, %xmm3" 243 asm volatile("psraw $4, %xmm3"); 242 asm volatile("pand %xmm7, %xmm 244 asm volatile("pand %xmm7, %xmm6"); 243 asm volatile("pand %xmm7, %xmm 245 asm volatile("pand %xmm7, %xmm3"); 244 asm volatile("pshufb %xmm6, %x 246 asm volatile("pshufb %xmm6, %xmm0"); 245 asm volatile("pshufb %xmm3, %x 247 asm volatile("pshufb %xmm3, %xmm1"); 246 asm volatile("movdqa %0, %%xmm 248 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0])); 247 asm volatile("pxor %xmm0, %xmm 249 asm volatile("pxor %xmm0, %xmm1"); 248 asm volatile("movdqa %0, %%xmm 250 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16])); 249 251 250 /* xmm1 = qmul[q[0] ^ dq[0]] * 252 /* xmm1 = qmul[q[0] ^ dq[0]] */ 251 253 252 asm volatile("psraw $4, %xmm4" 254 asm volatile("psraw $4, %xmm4"); 253 asm volatile("pand %xmm7, %xmm 255 asm volatile("pand %xmm7, %xmm8"); 254 asm volatile("pand %xmm7, %xmm 256 asm volatile("pand %xmm7, %xmm4"); 255 asm volatile("pshufb %xmm8, %x 257 asm volatile("pshufb %xmm8, %xmm10"); 256 asm volatile("pshufb %xmm4, %x 258 asm volatile("pshufb %xmm4, %xmm11"); 257 asm volatile("movdqa %0, %%xmm 259 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); 258 asm volatile("pxor %xmm10, %xm 260 asm volatile("pxor %xmm10, %xmm11"); 259 asm volatile("movdqa %0, %%xmm 261 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16])); 260 262 261 /* xmm11 = qmul[q[16] ^ dq[16] 263 /* xmm11 = qmul[q[16] ^ dq[16]] */ 262 264 263 asm volatile("pxor %xmm1, %xmm 265 asm volatile("pxor %xmm1, %xmm2"); 264 266 265 /* xmm2 = p[0] ^ qmul[q[0] ^ d 267 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */ 266 268 267 asm volatile("pxor %xmm11, %xm 269 asm volatile("pxor %xmm11, %xmm12"); 268 270 269 /* xmm12 = p[16] ^ qmul[q[16] 271 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */ 270 272 271 asm volatile("movdqa %%xmm1, % 273 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); 272 asm volatile("movdqa %%xmm11, 274 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16])); 273 275 274 asm volatile("movdqa %%xmm2, % 276 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); 275 asm volatile("movdqa %%xmm12, 277 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16])); 276 278 277 bytes -= 32; 279 bytes -= 32; 278 p += 32; 280 p += 32; 279 q += 32; 281 q += 32; 280 dq += 32; 282 dq += 32; 281 283 282 #else 284 #else 283 asm volatile("movdqa %0, %%xmm 285 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); 284 asm volatile("movdqa %0, %%xmm 286 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); 285 asm volatile("pxor %0, %%xmm3" 287 asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); 286 asm volatile("movdqa %0, %%xmm 288 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); 287 289 288 /* xmm3 = *q ^ *dq */ 290 /* xmm3 = *q ^ *dq */ 289 291 290 asm volatile("movdqa %xmm3, %x 292 asm volatile("movdqa %xmm3, %xmm6"); 291 asm volatile("movdqa %0, %%xmm 293 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); 292 asm volatile("psraw $4, %xmm3" 294 asm volatile("psraw $4, %xmm3"); 293 asm volatile("pand %xmm7, %xmm 295 asm volatile("pand %xmm7, %xmm6"); 294 asm volatile("pand %xmm7, %xmm 296 asm volatile("pand %xmm7, %xmm3"); 295 asm volatile("pshufb %xmm6, %x 297 asm volatile("pshufb %xmm6, %xmm0"); 296 asm volatile("pshufb %xmm3, %x 298 asm volatile("pshufb %xmm3, %xmm1"); 297 asm volatile("pxor %xmm0, %xmm 299 asm volatile("pxor %xmm0, %xmm1"); 298 300 299 /* xmm1 = qmul[*q ^ *dq */ 301 /* xmm1 = qmul[*q ^ *dq */ 300 302 301 asm volatile("pxor %xmm1, %xmm 303 asm volatile("pxor %xmm1, %xmm2"); 302 304 303 /* xmm2 = *p ^ qmul[*q ^ *dq] 305 /* xmm2 = *p ^ qmul[*q ^ *dq] */ 304 306 305 asm volatile("movdqa %%xmm1, % 307 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); 306 asm volatile("movdqa %%xmm2, % 308 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); 307 309 308 bytes -= 16; 310 bytes -= 16; 309 p += 16; 311 p += 16; 310 q += 16; 312 q += 16; 311 dq += 16; 313 dq += 16; 312 #endif 314 #endif 313 } 315 } 314 316 315 kernel_fpu_end(); 317 kernel_fpu_end(); 316 } 318 } 317 319 318 const struct raid6_recov_calls raid6_recov_sss 320 const struct raid6_recov_calls raid6_recov_ssse3 = { 319 .data2 = raid6_2data_recov_ssse3, 321 .data2 = raid6_2data_recov_ssse3, 320 .datap = raid6_datap_recov_ssse3, 322 .datap = raid6_datap_recov_ssse3, 321 .valid = raid6_has_ssse3, 323 .valid = raid6_has_ssse3, 322 #ifdef CONFIG_X86_64 324 #ifdef CONFIG_X86_64 323 .name = "ssse3x2", 325 .name = "ssse3x2", 324 #else 326 #else 325 .name = "ssse3x1", 327 .name = "ssse3x1", 326 #endif 328 #endif 327 .priority = 1, 329 .priority = 1, 328 }; 330 }; >> 331 >> 332 #else >> 333 #warning "your version of binutils lacks SSSE3 support" >> 334 #endif 329 335
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.