~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/raid6/recov_ssse3.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /lib/raid6/recov_ssse3.c (Version linux-6.12-rc7) and /lib/raid6/recov_ssse3.c (Version linux-2.6.0)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  * Copyright (C) 2012 Intel Corporation           
  4  */                                               
  5                                                   
  6 #include <linux/raid/pq.h>                        
  7 #include "x86.h"                                  
  8                                                   
  9 static int raid6_has_ssse3(void)                  
 10 {                                                 
 11         return boot_cpu_has(X86_FEATURE_XMM) &    
 12                 boot_cpu_has(X86_FEATURE_XMM2)    
 13                 boot_cpu_has(X86_FEATURE_SSSE3    
 14 }                                                 
 15                                                   
 16 static void raid6_2data_recov_ssse3(int disks,    
 17                 int failb, void **ptrs)           
 18 {                                                 
 19         u8 *p, *q, *dp, *dq;                      
 20         const u8 *pbmul;        /* P multiplie    
 21         const u8 *qmul;         /* Q multiplie    
 22         static const u8 __aligned(16) x0f[16]     
 23                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    
 24                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    
 25                                                   
 26         p = (u8 *)ptrs[disks-2];                  
 27         q = (u8 *)ptrs[disks-1];                  
 28                                                   
 29         /* Compute syndrome with zero for the     
 30            Use the dead data pages as temporar    
 31            delta p and delta q */                 
 32         dp = (u8 *)ptrs[faila];                   
 33         ptrs[faila] = (void *)raid6_empty_zero    
 34         ptrs[disks-2] = dp;                       
 35         dq = (u8 *)ptrs[failb];                   
 36         ptrs[failb] = (void *)raid6_empty_zero    
 37         ptrs[disks-1] = dq;                       
 38                                                   
 39         raid6_call.gen_syndrome(disks, bytes,     
 40                                                   
 41         /* Restore pointer table */               
 42         ptrs[faila]   = dp;                       
 43         ptrs[failb]   = dq;                       
 44         ptrs[disks-2] = p;                        
 45         ptrs[disks-1] = q;                        
 46                                                   
 47         /* Now, pick the proper data tables */    
 48         pbmul = raid6_vgfmul[raid6_gfexi[failb    
 49         qmul  = raid6_vgfmul[raid6_gfinv[raid6    
 50                 raid6_gfexp[failb]]];             
 51                                                   
 52         kernel_fpu_begin();                       
 53                                                   
 54         asm volatile("movdqa %0,%%xmm7" : : "m    
 55                                                   
 56 #ifdef CONFIG_X86_64                              
 57         asm volatile("movdqa %0,%%xmm6" : : "m    
 58         asm volatile("movdqa %0,%%xmm14" : : "    
 59         asm volatile("movdqa %0,%%xmm15" : : "    
 60 #endif                                            
 61                                                   
 62         /* Now do it... */                        
 63         while (bytes) {                           
 64 #ifdef CONFIG_X86_64                              
 65                 /* xmm6, xmm14, xmm15 */          
 66                                                   
 67                 asm volatile("movdqa %0,%%xmm1    
 68                 asm volatile("movdqa %0,%%xmm9    
 69                 asm volatile("movdqa %0,%%xmm0    
 70                 asm volatile("movdqa %0,%%xmm8    
 71                 asm volatile("pxor   %0,%%xmm1    
 72                 asm volatile("pxor   %0,%%xmm9    
 73                 asm volatile("pxor   %0,%%xmm0    
 74                 asm volatile("pxor   %0,%%xmm8    
 75                                                   
 76                 /* xmm0/8 = px */                 
 77                                                   
 78                 asm volatile("movdqa %xmm6,%xm    
 79                 asm volatile("movdqa %0,%%xmm5    
 80                 asm volatile("movdqa %xmm6,%xm    
 81                 asm volatile("movdqa %xmm5,%xm    
 82                 asm volatile("movdqa %xmm1,%xm    
 83                 asm volatile("movdqa %xmm9,%xm    
 84                 asm volatile("movdqa %xmm0,%xm    
 85                 asm volatile("movdqa %xmm8,%xm    
 86                 asm volatile("psraw  $4,%xmm1"    
 87                 asm volatile("psraw  $4,%xmm9"    
 88                 asm volatile("pand   %xmm7,%xm    
 89                 asm volatile("pand   %xmm7,%xm    
 90                 asm volatile("pand   %xmm7,%xm    
 91                 asm volatile("pand   %xmm7,%xm    
 92                 asm volatile("pshufb %xmm3,%xm    
 93                 asm volatile("pshufb %xmm11,%x    
 94                 asm volatile("pshufb %xmm1,%xm    
 95                 asm volatile("pshufb %xmm9,%xm    
 96                 asm volatile("pxor   %xmm4,%xm    
 97                 asm volatile("pxor   %xmm12,%x    
 98                                                   
 99                 /* xmm5/13 = qx */                
100                                                   
101                 asm volatile("movdqa %xmm14,%x    
102                 asm volatile("movdqa %xmm15,%x    
103                 asm volatile("movdqa %xmm14,%x    
104                 asm volatile("movdqa %xmm15,%x    
105                 asm volatile("movdqa %xmm2,%xm    
106                 asm volatile("movdqa %xmm10,%x    
107                 asm volatile("psraw  $4,%xmm2"    
108                 asm volatile("psraw  $4,%xmm10    
109                 asm volatile("pand   %xmm7,%xm    
110                 asm volatile("pand   %xmm7,%xm    
111                 asm volatile("pand   %xmm7,%xm    
112                 asm volatile("pand   %xmm7,%xm    
113                 asm volatile("pshufb %xmm3,%xm    
114                 asm volatile("pshufb %xmm11,%x    
115                 asm volatile("pshufb %xmm2,%xm    
116                 asm volatile("pshufb %xmm10,%x    
117                 asm volatile("pxor   %xmm4,%xm    
118                 asm volatile("pxor   %xmm12,%x    
119                                                   
120                 /* xmm1/9 = pbmul[px] */          
121                 asm volatile("pxor   %xmm5,%xm    
122                 asm volatile("pxor   %xmm13,%x    
123                 /* xmm1/9 = db = DQ */            
124                 asm volatile("movdqa %%xmm1,%0    
125                 asm volatile("movdqa %%xmm9,%0    
126                                                   
127                 asm volatile("pxor   %xmm1,%xm    
128                 asm volatile("pxor   %xmm9,%xm    
129                 asm volatile("movdqa %%xmm0,%0    
130                 asm volatile("movdqa %%xmm8,%0    
131                                                   
132                 bytes -= 32;                      
133                 p += 32;                          
134                 q += 32;                          
135                 dp += 32;                         
136                 dq += 32;                         
137 #else                                             
138                 asm volatile("movdqa %0,%%xmm1    
139                 asm volatile("movdqa %0,%%xmm0    
140                 asm volatile("pxor   %0,%%xmm1    
141                 asm volatile("pxor   %0,%%xmm0    
142                                                   
143                 /* 1 = dq ^ q                     
144                  * 0 = dp ^ p                     
145                  */                               
146                 asm volatile("movdqa %0,%%xmm4    
147                 asm volatile("movdqa %0,%%xmm5    
148                                                   
149                 asm volatile("movdqa %xmm1,%xm    
150                 asm volatile("psraw  $4,%xmm1"    
151                 asm volatile("pand   %xmm7,%xm    
152                 asm volatile("pand   %xmm7,%xm    
153                 asm volatile("pshufb %xmm3,%xm    
154                 asm volatile("pshufb %xmm1,%xm    
155                 asm volatile("pxor   %xmm4,%xm    
156                                                   
157                 asm volatile("movdqa %xmm0,%xm    
158                                                   
159                 /* xmm5 = qx */                   
160                                                   
161                 asm volatile("movdqa %0,%%xmm4    
162                 asm volatile("movdqa %0,%%xmm1    
163                 asm volatile("movdqa %xmm2,%xm    
164                 asm volatile("psraw  $4,%xmm2"    
165                 asm volatile("pand   %xmm7,%xm    
166                 asm volatile("pand   %xmm7,%xm    
167                 asm volatile("pshufb %xmm3,%xm    
168                 asm volatile("pshufb %xmm2,%xm    
169                 asm volatile("pxor   %xmm4,%xm    
170                                                   
171                 /* xmm1 = pbmul[px] */            
172                 asm volatile("pxor   %xmm5,%xm    
173                 /* xmm1 = db = DQ */              
174                 asm volatile("movdqa %%xmm1,%0    
175                                                   
176                 asm volatile("pxor   %xmm1,%xm    
177                 asm volatile("movdqa %%xmm0,%0    
178                                                   
179                 bytes -= 16;                      
180                 p += 16;                          
181                 q += 16;                          
182                 dp += 16;                         
183                 dq += 16;                         
184 #endif                                            
185         }                                         
186                                                   
187         kernel_fpu_end();                         
188 }                                                 
189                                                   
190                                                   
191 static void raid6_datap_recov_ssse3(int disks,    
192                 void **ptrs)                      
193 {                                                 
194         u8 *p, *q, *dq;                           
195         const u8 *qmul;         /* Q multiplie    
196         static const u8 __aligned(16) x0f[16]     
197                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    
198                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    
199                                                   
200         p = (u8 *)ptrs[disks-2];                  
201         q = (u8 *)ptrs[disks-1];                  
202                                                   
203         /* Compute syndrome with zero for the     
204            Use the dead data page as temporary    
205         dq = (u8 *)ptrs[faila];                   
206         ptrs[faila] = (void *)raid6_empty_zero    
207         ptrs[disks-1] = dq;                       
208                                                   
209         raid6_call.gen_syndrome(disks, bytes,     
210                                                   
211         /* Restore pointer table */               
212         ptrs[faila]   = dq;                       
213         ptrs[disks-1] = q;                        
214                                                   
215         /* Now, pick the proper data tables */    
216         qmul  = raid6_vgfmul[raid6_gfinv[raid6    
217                                                   
218         kernel_fpu_begin();                       
219                                                   
220         asm volatile("movdqa %0, %%xmm7" : : "    
221                                                   
222         while (bytes) {                           
223 #ifdef CONFIG_X86_64                              
224                 asm volatile("movdqa %0, %%xmm    
225                 asm volatile("movdqa %0, %%xmm    
226                 asm volatile("pxor %0, %%xmm3"    
227                 asm volatile("movdqa %0, %%xmm    
228                                                   
229                 /* xmm3 = q[0] ^ dq[0] */         
230                                                   
231                 asm volatile("pxor %0, %%xmm4"    
232                 asm volatile("movdqa %0, %%xmm    
233                                                   
234                 /* xmm4 = q[16] ^ dq[16] */       
235                                                   
236                 asm volatile("movdqa %xmm3, %x    
237                 asm volatile("movdqa %xmm4, %x    
238                                                   
239                 /* xmm4 = xmm8 = q[16] ^ dq[16    
240                                                   
241                 asm volatile("psraw $4, %xmm3"    
242                 asm volatile("pand %xmm7, %xmm    
243                 asm volatile("pand %xmm7, %xmm    
244                 asm volatile("pshufb %xmm6, %x    
245                 asm volatile("pshufb %xmm3, %x    
246                 asm volatile("movdqa %0, %%xmm    
247                 asm volatile("pxor %xmm0, %xmm    
248                 asm volatile("movdqa %0, %%xmm    
249                                                   
250                 /* xmm1 = qmul[q[0] ^ dq[0]] *    
251                                                   
252                 asm volatile("psraw $4, %xmm4"    
253                 asm volatile("pand %xmm7, %xmm    
254                 asm volatile("pand %xmm7, %xmm    
255                 asm volatile("pshufb %xmm8, %x    
256                 asm volatile("pshufb %xmm4, %x    
257                 asm volatile("movdqa %0, %%xmm    
258                 asm volatile("pxor %xmm10, %xm    
259                 asm volatile("movdqa %0, %%xmm    
260                                                   
261                 /* xmm11 = qmul[q[16] ^ dq[16]    
262                                                   
263                 asm volatile("pxor %xmm1, %xmm    
264                                                   
265                 /* xmm2 = p[0] ^ qmul[q[0] ^ d    
266                                                   
267                 asm volatile("pxor %xmm11, %xm    
268                                                   
269                 /* xmm12 = p[16] ^ qmul[q[16]     
270                                                   
271                 asm volatile("movdqa %%xmm1, %    
272                 asm volatile("movdqa %%xmm11,     
273                                                   
274                 asm volatile("movdqa %%xmm2, %    
275                 asm volatile("movdqa %%xmm12,     
276                                                   
277                 bytes -= 32;                      
278                 p += 32;                          
279                 q += 32;                          
280                 dq += 32;                         
281                                                   
282 #else                                             
283                 asm volatile("movdqa %0, %%xmm    
284                 asm volatile("movdqa %0, %%xmm    
285                 asm volatile("pxor %0, %%xmm3"    
286                 asm volatile("movdqa %0, %%xmm    
287                                                   
288                 /* xmm3 = *q ^ *dq */             
289                                                   
290                 asm volatile("movdqa %xmm3, %x    
291                 asm volatile("movdqa %0, %%xmm    
292                 asm volatile("psraw $4, %xmm3"    
293                 asm volatile("pand %xmm7, %xmm    
294                 asm volatile("pand %xmm7, %xmm    
295                 asm volatile("pshufb %xmm6, %x    
296                 asm volatile("pshufb %xmm3, %x    
297                 asm volatile("pxor %xmm0, %xmm    
298                                                   
299                 /* xmm1 = qmul[*q ^ *dq */        
300                                                   
301                 asm volatile("pxor %xmm1, %xmm    
302                                                   
303                 /* xmm2 = *p ^ qmul[*q ^ *dq]     
304                                                   
305                 asm volatile("movdqa %%xmm1, %    
306                 asm volatile("movdqa %%xmm2, %    
307                                                   
308                 bytes -= 16;                      
309                 p += 16;                          
310                 q += 16;                          
311                 dq += 16;                         
312 #endif                                            
313         }                                         
314                                                   
315         kernel_fpu_end();                         
316 }                                                 
317                                                   
318 const struct raid6_recov_calls raid6_recov_sss    
319         .data2 = raid6_2data_recov_ssse3,         
320         .datap = raid6_datap_recov_ssse3,         
321         .valid = raid6_has_ssse3,                 
322 #ifdef CONFIG_X86_64                              
323         .name = "ssse3x2",                        
324 #else                                             
325         .name = "ssse3x1",                        
326 #endif                                            
327         .priority = 1,                            
328 };                                                
329                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php