1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * Copyright (C) 2012 Intel Corporation 2 * Copyright (C) 2012 Intel Corporation >> 3 * >> 4 * This program is free software; you can redistribute it and/or >> 5 * modify it under the terms of the GNU General Public License >> 6 * as published by the Free Software Foundation; version 2 >> 7 * of the License. 4 */ 8 */ 5 9 6 #include <linux/raid/pq.h> 10 #include <linux/raid/pq.h> 7 #include "x86.h" 11 #include "x86.h" 8 12 9 static int raid6_has_ssse3(void) 13 static int raid6_has_ssse3(void) 10 { 14 { 11 return boot_cpu_has(X86_FEATURE_XMM) & 15 return boot_cpu_has(X86_FEATURE_XMM) && 12 boot_cpu_has(X86_FEATURE_XMM2) 16 boot_cpu_has(X86_FEATURE_XMM2) && 13 boot_cpu_has(X86_FEATURE_SSSE3 17 boot_cpu_has(X86_FEATURE_SSSE3); 14 } 18 } 15 19 16 static void raid6_2data_recov_ssse3(int disks, 20 static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, 17 int failb, void **ptrs) 21 int failb, void **ptrs) 18 { 22 { 19 u8 *p, *q, *dp, *dq; 23 u8 *p, *q, *dp, *dq; 20 const u8 *pbmul; /* P multiplie 24 const u8 *pbmul; /* P multiplier table for B data */ 21 const u8 *qmul; /* Q multiplie 25 const u8 *qmul; /* Q multiplier table (for both) */ 22 static const u8 __aligned(16) x0f[16] 26 static const u8 __aligned(16) x0f[16] = { 23 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 27 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 24 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 28 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; 25 29 26 p = (u8 *)ptrs[disks-2]; 30 p = (u8 *)ptrs[disks-2]; 27 q = (u8 *)ptrs[disks-1]; 31 q = (u8 *)ptrs[disks-1]; 28 32 29 /* Compute syndrome with zero for the 33 /* Compute syndrome with zero for the missing data pages 30 Use the dead data pages as temporar 34 Use the dead data pages as temporary storage for 31 delta p and delta q */ 35 delta p and delta q */ 32 dp = (u8 *)ptrs[faila]; 36 dp = (u8 *)ptrs[faila]; 33 ptrs[faila] = (void *)raid6_empty_zero 37 ptrs[faila] = (void *)raid6_empty_zero_page; 34 ptrs[disks-2] = dp; 38 ptrs[disks-2] = dp; 35 dq = (u8 *)ptrs[failb]; 39 dq = (u8 *)ptrs[failb]; 36 ptrs[failb] = (void *)raid6_empty_zero 40 ptrs[failb] = (void *)raid6_empty_zero_page; 37 ptrs[disks-1] = dq; 41 ptrs[disks-1] = dq; 38 42 39 raid6_call.gen_syndrome(disks, bytes, 43 raid6_call.gen_syndrome(disks, bytes, ptrs); 40 44 41 /* Restore pointer table */ 45 /* Restore pointer table */ 42 ptrs[faila] = dp; 46 ptrs[faila] = dp; 43 ptrs[failb] = dq; 47 ptrs[failb] = dq; 44 ptrs[disks-2] = p; 48 ptrs[disks-2] = p; 45 ptrs[disks-1] = q; 49 ptrs[disks-1] = q; 46 50 47 /* Now, pick the proper data tables */ 51 /* Now, pick the proper data tables */ 48 pbmul = raid6_vgfmul[raid6_gfexi[failb 52 pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]]; 49 qmul = raid6_vgfmul[raid6_gfinv[raid6 53 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ 50 raid6_gfexp[failb]]]; 54 raid6_gfexp[failb]]]; 51 55 52 kernel_fpu_begin(); 56 kernel_fpu_begin(); 53 57 54 asm volatile("movdqa %0,%%xmm7" : : "m 58 asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0])); 55 59 56 #ifdef CONFIG_X86_64 60 #ifdef CONFIG_X86_64 57 asm volatile("movdqa %0,%%xmm6" : : "m 61 asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0])); 58 asm volatile("movdqa %0,%%xmm14" : : " 62 asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0])); 59 asm volatile("movdqa %0,%%xmm15" : : " 63 asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16])); 60 #endif 64 #endif 61 65 62 /* Now do it... */ 66 /* Now do it... */ 63 while (bytes) { 67 while (bytes) { 64 #ifdef CONFIG_X86_64 68 #ifdef CONFIG_X86_64 65 /* xmm6, xmm14, xmm15 */ 69 /* xmm6, xmm14, xmm15 */ 66 70 67 asm volatile("movdqa %0,%%xmm1 71 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); 68 asm volatile("movdqa %0,%%xmm9 72 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); 69 asm volatile("movdqa %0,%%xmm0 73 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0])); 70 asm volatile("movdqa %0,%%xmm8 74 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16])); 71 asm volatile("pxor %0,%%xmm1 75 asm volatile("pxor %0,%%xmm1" : : "m" (dq[0])); 72 asm volatile("pxor %0,%%xmm9 76 asm volatile("pxor %0,%%xmm9" : : "m" (dq[16])); 73 asm volatile("pxor %0,%%xmm0 77 asm volatile("pxor %0,%%xmm0" : : "m" (dp[0])); 74 asm volatile("pxor %0,%%xmm8 78 asm volatile("pxor %0,%%xmm8" : : "m" (dp[16])); 75 79 76 /* xmm0/8 = px */ 80 /* xmm0/8 = px */ 77 81 78 asm volatile("movdqa %xmm6,%xm 82 asm volatile("movdqa %xmm6,%xmm4"); 79 asm volatile("movdqa %0,%%xmm5 83 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); 80 asm volatile("movdqa %xmm6,%xm 84 asm volatile("movdqa %xmm6,%xmm12"); 81 asm volatile("movdqa %xmm5,%xm 85 asm volatile("movdqa %xmm5,%xmm13"); 82 asm volatile("movdqa %xmm1,%xm 86 asm volatile("movdqa %xmm1,%xmm3"); 83 asm volatile("movdqa %xmm9,%xm 87 asm volatile("movdqa %xmm9,%xmm11"); 84 asm volatile("movdqa %xmm0,%xm 88 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */ 85 asm volatile("movdqa %xmm8,%xm 89 asm volatile("movdqa %xmm8,%xmm10"); 86 asm volatile("psraw $4,%xmm1" 90 asm volatile("psraw $4,%xmm1"); 87 asm volatile("psraw $4,%xmm9" 91 asm volatile("psraw $4,%xmm9"); 88 asm volatile("pand %xmm7,%xm 92 asm volatile("pand %xmm7,%xmm3"); 89 asm volatile("pand %xmm7,%xm 93 asm volatile("pand %xmm7,%xmm11"); 90 asm volatile("pand %xmm7,%xm 94 asm volatile("pand %xmm7,%xmm1"); 91 asm volatile("pand %xmm7,%xm 95 asm volatile("pand %xmm7,%xmm9"); 92 asm volatile("pshufb %xmm3,%xm 96 asm volatile("pshufb %xmm3,%xmm4"); 93 asm volatile("pshufb %xmm11,%x 97 asm volatile("pshufb %xmm11,%xmm12"); 94 asm volatile("pshufb %xmm1,%xm 98 asm volatile("pshufb %xmm1,%xmm5"); 95 asm volatile("pshufb %xmm9,%xm 99 asm volatile("pshufb %xmm9,%xmm13"); 96 asm volatile("pxor %xmm4,%xm 100 asm volatile("pxor %xmm4,%xmm5"); 97 asm volatile("pxor %xmm12,%x 101 asm volatile("pxor %xmm12,%xmm13"); 98 102 99 /* xmm5/13 = qx */ 103 /* xmm5/13 = qx */ 100 104 101 asm volatile("movdqa %xmm14,%x 105 asm volatile("movdqa %xmm14,%xmm4"); 102 asm volatile("movdqa %xmm15,%x 106 asm volatile("movdqa %xmm15,%xmm1"); 103 asm volatile("movdqa %xmm14,%x 107 asm volatile("movdqa %xmm14,%xmm12"); 104 asm volatile("movdqa %xmm15,%x 108 asm volatile("movdqa %xmm15,%xmm9"); 105 asm volatile("movdqa %xmm2,%xm 109 asm volatile("movdqa %xmm2,%xmm3"); 106 asm volatile("movdqa %xmm10,%x 110 asm volatile("movdqa %xmm10,%xmm11"); 107 asm volatile("psraw $4,%xmm2" 111 asm volatile("psraw $4,%xmm2"); 108 asm volatile("psraw $4,%xmm10 112 asm volatile("psraw $4,%xmm10"); 109 asm volatile("pand %xmm7,%xm 113 asm volatile("pand %xmm7,%xmm3"); 110 asm volatile("pand %xmm7,%xm 114 asm volatile("pand %xmm7,%xmm11"); 111 asm volatile("pand %xmm7,%xm 115 asm volatile("pand %xmm7,%xmm2"); 112 asm volatile("pand %xmm7,%xm 116 asm volatile("pand %xmm7,%xmm10"); 113 asm volatile("pshufb %xmm3,%xm 117 asm volatile("pshufb %xmm3,%xmm4"); 114 asm volatile("pshufb %xmm11,%x 118 asm volatile("pshufb %xmm11,%xmm12"); 115 asm volatile("pshufb %xmm2,%xm 119 asm volatile("pshufb %xmm2,%xmm1"); 116 asm volatile("pshufb %xmm10,%x 120 asm volatile("pshufb %xmm10,%xmm9"); 117 asm volatile("pxor %xmm4,%xm 121 asm volatile("pxor %xmm4,%xmm1"); 118 asm volatile("pxor %xmm12,%x 122 asm volatile("pxor %xmm12,%xmm9"); 119 123 120 /* xmm1/9 = pbmul[px] */ 124 /* xmm1/9 = pbmul[px] */ 121 asm volatile("pxor %xmm5,%xm 125 asm volatile("pxor %xmm5,%xmm1"); 122 asm volatile("pxor %xmm13,%x 126 asm volatile("pxor %xmm13,%xmm9"); 123 /* xmm1/9 = db = DQ */ 127 /* xmm1/9 = db = DQ */ 124 asm volatile("movdqa %%xmm1,%0 128 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0])); 125 asm volatile("movdqa %%xmm9,%0 129 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16])); 126 130 127 asm volatile("pxor %xmm1,%xm 131 asm volatile("pxor %xmm1,%xmm0"); 128 asm volatile("pxor %xmm9,%xm 132 asm volatile("pxor %xmm9,%xmm8"); 129 asm volatile("movdqa %%xmm0,%0 133 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0])); 130 asm volatile("movdqa %%xmm8,%0 134 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16])); 131 135 132 bytes -= 32; 136 bytes -= 32; 133 p += 32; 137 p += 32; 134 q += 32; 138 q += 32; 135 dp += 32; 139 dp += 32; 136 dq += 32; 140 dq += 32; 137 #else 141 #else 138 asm volatile("movdqa %0,%%xmm1 142 asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); 139 asm volatile("movdqa %0,%%xmm0 143 asm volatile("movdqa %0,%%xmm0" : : "m" (*p)); 140 asm volatile("pxor %0,%%xmm1 144 asm volatile("pxor %0,%%xmm1" : : "m" (*dq)); 141 asm volatile("pxor %0,%%xmm0 145 asm volatile("pxor %0,%%xmm0" : : "m" (*dp)); 142 146 143 /* 1 = dq ^ q 147 /* 1 = dq ^ q 144 * 0 = dp ^ p 148 * 0 = dp ^ p 145 */ 149 */ 146 asm volatile("movdqa %0,%%xmm4 150 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0])); 147 asm volatile("movdqa %0,%%xmm5 151 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16])); 148 152 149 asm volatile("movdqa %xmm1,%xm 153 asm volatile("movdqa %xmm1,%xmm3"); 150 asm volatile("psraw $4,%xmm1" 154 asm volatile("psraw $4,%xmm1"); 151 asm volatile("pand %xmm7,%xm 155 asm volatile("pand %xmm7,%xmm3"); 152 asm volatile("pand %xmm7,%xm 156 asm volatile("pand %xmm7,%xmm1"); 153 asm volatile("pshufb %xmm3,%xm 157 asm volatile("pshufb %xmm3,%xmm4"); 154 asm volatile("pshufb %xmm1,%xm 158 asm volatile("pshufb %xmm1,%xmm5"); 155 asm volatile("pxor %xmm4,%xm 159 asm volatile("pxor %xmm4,%xmm5"); 156 160 157 asm volatile("movdqa %xmm0,%xm 161 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */ 158 162 159 /* xmm5 = qx */ 163 /* xmm5 = qx */ 160 164 161 asm volatile("movdqa %0,%%xmm4 165 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0])); 162 asm volatile("movdqa %0,%%xmm1 166 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16])); 163 asm volatile("movdqa %xmm2,%xm 167 asm volatile("movdqa %xmm2,%xmm3"); 164 asm volatile("psraw $4,%xmm2" 168 asm volatile("psraw $4,%xmm2"); 165 asm volatile("pand %xmm7,%xm 169 asm volatile("pand %xmm7,%xmm3"); 166 asm volatile("pand %xmm7,%xm 170 asm volatile("pand %xmm7,%xmm2"); 167 asm volatile("pshufb %xmm3,%xm 171 asm volatile("pshufb %xmm3,%xmm4"); 168 asm volatile("pshufb %xmm2,%xm 172 asm volatile("pshufb %xmm2,%xmm1"); 169 asm volatile("pxor %xmm4,%xm 173 asm volatile("pxor %xmm4,%xmm1"); 170 174 171 /* xmm1 = pbmul[px] */ 175 /* xmm1 = pbmul[px] */ 172 asm volatile("pxor %xmm5,%xm 176 asm volatile("pxor %xmm5,%xmm1"); 173 /* xmm1 = db = DQ */ 177 /* xmm1 = db = DQ */ 174 asm volatile("movdqa %%xmm1,%0 178 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq)); 175 179 176 asm volatile("pxor %xmm1,%xm 180 asm volatile("pxor %xmm1,%xmm0"); 177 asm volatile("movdqa %%xmm0,%0 181 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp)); 178 182 179 bytes -= 16; 183 bytes -= 16; 180 p += 16; 184 p += 16; 181 q += 16; 185 q += 16; 182 dp += 16; 186 dp += 16; 183 dq += 16; 187 dq += 16; 184 #endif 188 #endif 185 } 189 } 186 190 187 kernel_fpu_end(); 191 kernel_fpu_end(); 188 } 192 } 189 193 190 194 191 static void raid6_datap_recov_ssse3(int disks, 195 static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, 192 void **ptrs) 196 void **ptrs) 193 { 197 { 194 u8 *p, *q, *dq; 198 u8 *p, *q, *dq; 195 const u8 *qmul; /* Q multiplie 199 const u8 *qmul; /* Q multiplier table */ 196 static const u8 __aligned(16) x0f[16] 200 static const u8 __aligned(16) x0f[16] = { 197 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 201 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 198 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 202 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f}; 199 203 200 p = (u8 *)ptrs[disks-2]; 204 p = (u8 *)ptrs[disks-2]; 201 q = (u8 *)ptrs[disks-1]; 205 q = (u8 *)ptrs[disks-1]; 202 206 203 /* Compute syndrome with zero for the 207 /* Compute syndrome with zero for the missing data page 204 Use the dead data page as temporary 208 Use the dead data page as temporary storage for delta q */ 205 dq = (u8 *)ptrs[faila]; 209 dq = (u8 *)ptrs[faila]; 206 ptrs[faila] = (void *)raid6_empty_zero 210 ptrs[faila] = (void *)raid6_empty_zero_page; 207 ptrs[disks-1] = dq; 211 ptrs[disks-1] = dq; 208 212 209 raid6_call.gen_syndrome(disks, bytes, 213 raid6_call.gen_syndrome(disks, bytes, ptrs); 210 214 211 /* Restore pointer table */ 215 /* Restore pointer table */ 212 ptrs[faila] = dq; 216 ptrs[faila] = dq; 213 ptrs[disks-1] = q; 217 ptrs[disks-1] = q; 214 218 215 /* Now, pick the proper data tables */ 219 /* Now, pick the proper data tables */ 216 qmul = raid6_vgfmul[raid6_gfinv[raid6 220 qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]]; 217 221 218 kernel_fpu_begin(); 222 kernel_fpu_begin(); 219 223 220 asm volatile("movdqa %0, %%xmm7" : : " 224 asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0])); 221 225 222 while (bytes) { 226 while (bytes) { 223 #ifdef CONFIG_X86_64 227 #ifdef CONFIG_X86_64 224 asm volatile("movdqa %0, %%xmm 228 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); 225 asm volatile("movdqa %0, %%xmm 229 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16])); 226 asm volatile("pxor %0, %%xmm3" 230 asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); 227 asm volatile("movdqa %0, %%xmm 231 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); 228 232 229 /* xmm3 = q[0] ^ dq[0] */ 233 /* xmm3 = q[0] ^ dq[0] */ 230 234 231 asm volatile("pxor %0, %%xmm4" 235 asm volatile("pxor %0, %%xmm4" : : "m" (q[16])); 232 asm volatile("movdqa %0, %%xmm 236 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); 233 237 234 /* xmm4 = q[16] ^ dq[16] */ 238 /* xmm4 = q[16] ^ dq[16] */ 235 239 236 asm volatile("movdqa %xmm3, %x 240 asm volatile("movdqa %xmm3, %xmm6"); 237 asm volatile("movdqa %xmm4, %x 241 asm volatile("movdqa %xmm4, %xmm8"); 238 242 239 /* xmm4 = xmm8 = q[16] ^ dq[16 243 /* xmm4 = xmm8 = q[16] ^ dq[16] */ 240 244 241 asm volatile("psraw $4, %xmm3" 245 asm volatile("psraw $4, %xmm3"); 242 asm volatile("pand %xmm7, %xmm 246 asm volatile("pand %xmm7, %xmm6"); 243 asm volatile("pand %xmm7, %xmm 247 asm volatile("pand %xmm7, %xmm3"); 244 asm volatile("pshufb %xmm6, %x 248 asm volatile("pshufb %xmm6, %xmm0"); 245 asm volatile("pshufb %xmm3, %x 249 asm volatile("pshufb %xmm3, %xmm1"); 246 asm volatile("movdqa %0, %%xmm 250 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0])); 247 asm volatile("pxor %xmm0, %xmm 251 asm volatile("pxor %xmm0, %xmm1"); 248 asm volatile("movdqa %0, %%xmm 252 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16])); 249 253 250 /* xmm1 = qmul[q[0] ^ dq[0]] * 254 /* xmm1 = qmul[q[0] ^ dq[0]] */ 251 255 252 asm volatile("psraw $4, %xmm4" 256 asm volatile("psraw $4, %xmm4"); 253 asm volatile("pand %xmm7, %xmm 257 asm volatile("pand %xmm7, %xmm8"); 254 asm volatile("pand %xmm7, %xmm 258 asm volatile("pand %xmm7, %xmm4"); 255 asm volatile("pshufb %xmm8, %x 259 asm volatile("pshufb %xmm8, %xmm10"); 256 asm volatile("pshufb %xmm4, %x 260 asm volatile("pshufb %xmm4, %xmm11"); 257 asm volatile("movdqa %0, %%xmm 261 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); 258 asm volatile("pxor %xmm10, %xm 262 asm volatile("pxor %xmm10, %xmm11"); 259 asm volatile("movdqa %0, %%xmm 263 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16])); 260 264 261 /* xmm11 = qmul[q[16] ^ dq[16] 265 /* xmm11 = qmul[q[16] ^ dq[16]] */ 262 266 263 asm volatile("pxor %xmm1, %xmm 267 asm volatile("pxor %xmm1, %xmm2"); 264 268 265 /* xmm2 = p[0] ^ qmul[q[0] ^ d 269 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */ 266 270 267 asm volatile("pxor %xmm11, %xm 271 asm volatile("pxor %xmm11, %xmm12"); 268 272 269 /* xmm12 = p[16] ^ qmul[q[16] 273 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */ 270 274 271 asm volatile("movdqa %%xmm1, % 275 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); 272 asm volatile("movdqa %%xmm11, 276 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16])); 273 277 274 asm volatile("movdqa %%xmm2, % 278 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); 275 asm volatile("movdqa %%xmm12, 279 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16])); 276 280 277 bytes -= 32; 281 bytes -= 32; 278 p += 32; 282 p += 32; 279 q += 32; 283 q += 32; 280 dq += 32; 284 dq += 32; 281 285 282 #else 286 #else 283 asm volatile("movdqa %0, %%xmm 287 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0])); 284 asm volatile("movdqa %0, %%xmm 288 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0])); 285 asm volatile("pxor %0, %%xmm3" 289 asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); 286 asm volatile("movdqa %0, %%xmm 290 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16])); 287 291 288 /* xmm3 = *q ^ *dq */ 292 /* xmm3 = *q ^ *dq */ 289 293 290 asm volatile("movdqa %xmm3, %x 294 asm volatile("movdqa %xmm3, %xmm6"); 291 asm volatile("movdqa %0, %%xmm 295 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0])); 292 asm volatile("psraw $4, %xmm3" 296 asm volatile("psraw $4, %xmm3"); 293 asm volatile("pand %xmm7, %xmm 297 asm volatile("pand %xmm7, %xmm6"); 294 asm volatile("pand %xmm7, %xmm 298 asm volatile("pand %xmm7, %xmm3"); 295 asm volatile("pshufb %xmm6, %x 299 asm volatile("pshufb %xmm6, %xmm0"); 296 asm volatile("pshufb %xmm3, %x 300 asm volatile("pshufb %xmm3, %xmm1"); 297 asm volatile("pxor %xmm0, %xmm 301 asm volatile("pxor %xmm0, %xmm1"); 298 302 299 /* xmm1 = qmul[*q ^ *dq */ 303 /* xmm1 = qmul[*q ^ *dq */ 300 304 301 asm volatile("pxor %xmm1, %xmm 305 asm volatile("pxor %xmm1, %xmm2"); 302 306 303 /* xmm2 = *p ^ qmul[*q ^ *dq] 307 /* xmm2 = *p ^ qmul[*q ^ *dq] */ 304 308 305 asm volatile("movdqa %%xmm1, % 309 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0])); 306 asm volatile("movdqa %%xmm2, % 310 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0])); 307 311 308 bytes -= 16; 312 bytes -= 16; 309 p += 16; 313 p += 16; 310 q += 16; 314 q += 16; 311 dq += 16; 315 dq += 16; 312 #endif 316 #endif 313 } 317 } 314 318 315 kernel_fpu_end(); 319 kernel_fpu_end(); 316 } 320 } 317 321 318 const struct raid6_recov_calls raid6_recov_sss 322 const struct raid6_recov_calls raid6_recov_ssse3 = { 319 .data2 = raid6_2data_recov_ssse3, 323 .data2 = raid6_2data_recov_ssse3, 320 .datap = raid6_datap_recov_ssse3, 324 .datap = raid6_datap_recov_ssse3, 321 .valid = raid6_has_ssse3, 325 .valid = raid6_has_ssse3, 322 #ifdef CONFIG_X86_64 326 #ifdef CONFIG_X86_64 323 .name = "ssse3x2", 327 .name = "ssse3x2", 324 #else 328 #else 325 .name = "ssse3x1", 329 .name = "ssse3x1", 326 #endif 330 #endif 327 .priority = 1, 331 .priority = 1, 328 }; 332 }; 329 333
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.