~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/raid6/recov_ssse3.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /lib/raid6/recov_ssse3.c (Architecture sparc64) and /lib/raid6/recov_ssse3.c (Architecture alpha)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  * Copyright (C) 2012 Intel Corporation             3  * Copyright (C) 2012 Intel Corporation
  4  */                                                 4  */
  5                                                     5 
  6 #include <linux/raid/pq.h>                          6 #include <linux/raid/pq.h>
  7 #include "x86.h"                                    7 #include "x86.h"
  8                                                     8 
  9 static int raid6_has_ssse3(void)                    9 static int raid6_has_ssse3(void)
 10 {                                                  10 {
 11         return boot_cpu_has(X86_FEATURE_XMM) &     11         return boot_cpu_has(X86_FEATURE_XMM) &&
 12                 boot_cpu_has(X86_FEATURE_XMM2)     12                 boot_cpu_has(X86_FEATURE_XMM2) &&
 13                 boot_cpu_has(X86_FEATURE_SSSE3     13                 boot_cpu_has(X86_FEATURE_SSSE3);
 14 }                                                  14 }
 15                                                    15 
 16 static void raid6_2data_recov_ssse3(int disks,     16 static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
 17                 int failb, void **ptrs)            17                 int failb, void **ptrs)
 18 {                                                  18 {
 19         u8 *p, *q, *dp, *dq;                       19         u8 *p, *q, *dp, *dq;
 20         const u8 *pbmul;        /* P multiplie     20         const u8 *pbmul;        /* P multiplier table for B data */
 21         const u8 *qmul;         /* Q multiplie     21         const u8 *qmul;         /* Q multiplier table (for both) */
 22         static const u8 __aligned(16) x0f[16]      22         static const u8 __aligned(16) x0f[16] = {
 23                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,     23                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
 24                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,     24                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
 25                                                    25 
 26         p = (u8 *)ptrs[disks-2];                   26         p = (u8 *)ptrs[disks-2];
 27         q = (u8 *)ptrs[disks-1];                   27         q = (u8 *)ptrs[disks-1];
 28                                                    28 
 29         /* Compute syndrome with zero for the      29         /* Compute syndrome with zero for the missing data pages
 30            Use the dead data pages as temporar     30            Use the dead data pages as temporary storage for
 31            delta p and delta q */                  31            delta p and delta q */
 32         dp = (u8 *)ptrs[faila];                    32         dp = (u8 *)ptrs[faila];
 33         ptrs[faila] = (void *)raid6_empty_zero     33         ptrs[faila] = (void *)raid6_empty_zero_page;
 34         ptrs[disks-2] = dp;                        34         ptrs[disks-2] = dp;
 35         dq = (u8 *)ptrs[failb];                    35         dq = (u8 *)ptrs[failb];
 36         ptrs[failb] = (void *)raid6_empty_zero     36         ptrs[failb] = (void *)raid6_empty_zero_page;
 37         ptrs[disks-1] = dq;                        37         ptrs[disks-1] = dq;
 38                                                    38 
 39         raid6_call.gen_syndrome(disks, bytes,      39         raid6_call.gen_syndrome(disks, bytes, ptrs);
 40                                                    40 
 41         /* Restore pointer table */                41         /* Restore pointer table */
 42         ptrs[faila]   = dp;                        42         ptrs[faila]   = dp;
 43         ptrs[failb]   = dq;                        43         ptrs[failb]   = dq;
 44         ptrs[disks-2] = p;                         44         ptrs[disks-2] = p;
 45         ptrs[disks-1] = q;                         45         ptrs[disks-1] = q;
 46                                                    46 
 47         /* Now, pick the proper data tables */     47         /* Now, pick the proper data tables */
 48         pbmul = raid6_vgfmul[raid6_gfexi[failb     48         pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
 49         qmul  = raid6_vgfmul[raid6_gfinv[raid6     49         qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
 50                 raid6_gfexp[failb]]];              50                 raid6_gfexp[failb]]];
 51                                                    51 
 52         kernel_fpu_begin();                        52         kernel_fpu_begin();
 53                                                    53 
 54         asm volatile("movdqa %0,%%xmm7" : : "m     54         asm volatile("movdqa %0,%%xmm7" : : "m" (x0f[0]));
 55                                                    55 
 56 #ifdef CONFIG_X86_64                               56 #ifdef CONFIG_X86_64
 57         asm volatile("movdqa %0,%%xmm6" : : "m     57         asm volatile("movdqa %0,%%xmm6" : : "m" (qmul[0]));
 58         asm volatile("movdqa %0,%%xmm14" : : "     58         asm volatile("movdqa %0,%%xmm14" : : "m" (pbmul[0]));
 59         asm volatile("movdqa %0,%%xmm15" : : "     59         asm volatile("movdqa %0,%%xmm15" : : "m" (pbmul[16]));
 60 #endif                                             60 #endif
 61                                                    61 
 62         /* Now do it... */                         62         /* Now do it... */
 63         while (bytes) {                            63         while (bytes) {
 64 #ifdef CONFIG_X86_64                               64 #ifdef CONFIG_X86_64
 65                 /* xmm6, xmm14, xmm15 */           65                 /* xmm6, xmm14, xmm15 */
 66                                                    66 
 67                 asm volatile("movdqa %0,%%xmm1     67                 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0]));
 68                 asm volatile("movdqa %0,%%xmm9     68                 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16]));
 69                 asm volatile("movdqa %0,%%xmm0     69                 asm volatile("movdqa %0,%%xmm0" : : "m" (p[0]));
 70                 asm volatile("movdqa %0,%%xmm8     70                 asm volatile("movdqa %0,%%xmm8" : : "m" (p[16]));
 71                 asm volatile("pxor   %0,%%xmm1     71                 asm volatile("pxor   %0,%%xmm1" : : "m" (dq[0]));
 72                 asm volatile("pxor   %0,%%xmm9     72                 asm volatile("pxor   %0,%%xmm9" : : "m" (dq[16]));
 73                 asm volatile("pxor   %0,%%xmm0     73                 asm volatile("pxor   %0,%%xmm0" : : "m" (dp[0]));
 74                 asm volatile("pxor   %0,%%xmm8     74                 asm volatile("pxor   %0,%%xmm8" : : "m" (dp[16]));
 75                                                    75 
 76                 /* xmm0/8 = px */                  76                 /* xmm0/8 = px */
 77                                                    77 
 78                 asm volatile("movdqa %xmm6,%xm     78                 asm volatile("movdqa %xmm6,%xmm4");
 79                 asm volatile("movdqa %0,%%xmm5     79                 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
 80                 asm volatile("movdqa %xmm6,%xm     80                 asm volatile("movdqa %xmm6,%xmm12");
 81                 asm volatile("movdqa %xmm5,%xm     81                 asm volatile("movdqa %xmm5,%xmm13");
 82                 asm volatile("movdqa %xmm1,%xm     82                 asm volatile("movdqa %xmm1,%xmm3");
 83                 asm volatile("movdqa %xmm9,%xm     83                 asm volatile("movdqa %xmm9,%xmm11");
 84                 asm volatile("movdqa %xmm0,%xm     84                 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2/10 = px */
 85                 asm volatile("movdqa %xmm8,%xm     85                 asm volatile("movdqa %xmm8,%xmm10");
 86                 asm volatile("psraw  $4,%xmm1"     86                 asm volatile("psraw  $4,%xmm1");
 87                 asm volatile("psraw  $4,%xmm9"     87                 asm volatile("psraw  $4,%xmm9");
 88                 asm volatile("pand   %xmm7,%xm     88                 asm volatile("pand   %xmm7,%xmm3");
 89                 asm volatile("pand   %xmm7,%xm     89                 asm volatile("pand   %xmm7,%xmm11");
 90                 asm volatile("pand   %xmm7,%xm     90                 asm volatile("pand   %xmm7,%xmm1");
 91                 asm volatile("pand   %xmm7,%xm     91                 asm volatile("pand   %xmm7,%xmm9");
 92                 asm volatile("pshufb %xmm3,%xm     92                 asm volatile("pshufb %xmm3,%xmm4");
 93                 asm volatile("pshufb %xmm11,%x     93                 asm volatile("pshufb %xmm11,%xmm12");
 94                 asm volatile("pshufb %xmm1,%xm     94                 asm volatile("pshufb %xmm1,%xmm5");
 95                 asm volatile("pshufb %xmm9,%xm     95                 asm volatile("pshufb %xmm9,%xmm13");
 96                 asm volatile("pxor   %xmm4,%xm     96                 asm volatile("pxor   %xmm4,%xmm5");
 97                 asm volatile("pxor   %xmm12,%x     97                 asm volatile("pxor   %xmm12,%xmm13");
 98                                                    98 
 99                 /* xmm5/13 = qx */                 99                 /* xmm5/13 = qx */
100                                                   100 
101                 asm volatile("movdqa %xmm14,%x    101                 asm volatile("movdqa %xmm14,%xmm4");
102                 asm volatile("movdqa %xmm15,%x    102                 asm volatile("movdqa %xmm15,%xmm1");
103                 asm volatile("movdqa %xmm14,%x    103                 asm volatile("movdqa %xmm14,%xmm12");
104                 asm volatile("movdqa %xmm15,%x    104                 asm volatile("movdqa %xmm15,%xmm9");
105                 asm volatile("movdqa %xmm2,%xm    105                 asm volatile("movdqa %xmm2,%xmm3");
106                 asm volatile("movdqa %xmm10,%x    106                 asm volatile("movdqa %xmm10,%xmm11");
107                 asm volatile("psraw  $4,%xmm2"    107                 asm volatile("psraw  $4,%xmm2");
108                 asm volatile("psraw  $4,%xmm10    108                 asm volatile("psraw  $4,%xmm10");
109                 asm volatile("pand   %xmm7,%xm    109                 asm volatile("pand   %xmm7,%xmm3");
110                 asm volatile("pand   %xmm7,%xm    110                 asm volatile("pand   %xmm7,%xmm11");
111                 asm volatile("pand   %xmm7,%xm    111                 asm volatile("pand   %xmm7,%xmm2");
112                 asm volatile("pand   %xmm7,%xm    112                 asm volatile("pand   %xmm7,%xmm10");
113                 asm volatile("pshufb %xmm3,%xm    113                 asm volatile("pshufb %xmm3,%xmm4");
114                 asm volatile("pshufb %xmm11,%x    114                 asm volatile("pshufb %xmm11,%xmm12");
115                 asm volatile("pshufb %xmm2,%xm    115                 asm volatile("pshufb %xmm2,%xmm1");
116                 asm volatile("pshufb %xmm10,%x    116                 asm volatile("pshufb %xmm10,%xmm9");
117                 asm volatile("pxor   %xmm4,%xm    117                 asm volatile("pxor   %xmm4,%xmm1");
118                 asm volatile("pxor   %xmm12,%x    118                 asm volatile("pxor   %xmm12,%xmm9");
119                                                   119 
120                 /* xmm1/9 = pbmul[px] */          120                 /* xmm1/9 = pbmul[px] */
121                 asm volatile("pxor   %xmm5,%xm    121                 asm volatile("pxor   %xmm5,%xmm1");
122                 asm volatile("pxor   %xmm13,%x    122                 asm volatile("pxor   %xmm13,%xmm9");
123                 /* xmm1/9 = db = DQ */            123                 /* xmm1/9 = db = DQ */
124                 asm volatile("movdqa %%xmm1,%0    124                 asm volatile("movdqa %%xmm1,%0" : "=m" (dq[0]));
125                 asm volatile("movdqa %%xmm9,%0    125                 asm volatile("movdqa %%xmm9,%0" : "=m" (dq[16]));
126                                                   126 
127                 asm volatile("pxor   %xmm1,%xm    127                 asm volatile("pxor   %xmm1,%xmm0");
128                 asm volatile("pxor   %xmm9,%xm    128                 asm volatile("pxor   %xmm9,%xmm8");
129                 asm volatile("movdqa %%xmm0,%0    129                 asm volatile("movdqa %%xmm0,%0" : "=m" (dp[0]));
130                 asm volatile("movdqa %%xmm8,%0    130                 asm volatile("movdqa %%xmm8,%0" : "=m" (dp[16]));
131                                                   131 
132                 bytes -= 32;                      132                 bytes -= 32;
133                 p += 32;                          133                 p += 32;
134                 q += 32;                          134                 q += 32;
135                 dp += 32;                         135                 dp += 32;
136                 dq += 32;                         136                 dq += 32;
137 #else                                             137 #else
138                 asm volatile("movdqa %0,%%xmm1    138                 asm volatile("movdqa %0,%%xmm1" : : "m" (*q));
139                 asm volatile("movdqa %0,%%xmm0    139                 asm volatile("movdqa %0,%%xmm0" : : "m" (*p));
140                 asm volatile("pxor   %0,%%xmm1    140                 asm volatile("pxor   %0,%%xmm1" : : "m" (*dq));
141                 asm volatile("pxor   %0,%%xmm0    141                 asm volatile("pxor   %0,%%xmm0" : : "m" (*dp));
142                                                   142 
143                 /* 1 = dq ^ q                     143                 /* 1 = dq ^ q
144                  * 0 = dp ^ p                     144                  * 0 = dp ^ p
145                  */                               145                  */
146                 asm volatile("movdqa %0,%%xmm4    146                 asm volatile("movdqa %0,%%xmm4" : : "m" (qmul[0]));
147                 asm volatile("movdqa %0,%%xmm5    147                 asm volatile("movdqa %0,%%xmm5" : : "m" (qmul[16]));
148                                                   148 
149                 asm volatile("movdqa %xmm1,%xm    149                 asm volatile("movdqa %xmm1,%xmm3");
150                 asm volatile("psraw  $4,%xmm1"    150                 asm volatile("psraw  $4,%xmm1");
151                 asm volatile("pand   %xmm7,%xm    151                 asm volatile("pand   %xmm7,%xmm3");
152                 asm volatile("pand   %xmm7,%xm    152                 asm volatile("pand   %xmm7,%xmm1");
153                 asm volatile("pshufb %xmm3,%xm    153                 asm volatile("pshufb %xmm3,%xmm4");
154                 asm volatile("pshufb %xmm1,%xm    154                 asm volatile("pshufb %xmm1,%xmm5");
155                 asm volatile("pxor   %xmm4,%xm    155                 asm volatile("pxor   %xmm4,%xmm5");
156                                                   156 
157                 asm volatile("movdqa %xmm0,%xm    157                 asm volatile("movdqa %xmm0,%xmm2"); /* xmm2 = px */
158                                                   158 
159                 /* xmm5 = qx */                   159                 /* xmm5 = qx */
160                                                   160 
161                 asm volatile("movdqa %0,%%xmm4    161                 asm volatile("movdqa %0,%%xmm4" : : "m" (pbmul[0]));
162                 asm volatile("movdqa %0,%%xmm1    162                 asm volatile("movdqa %0,%%xmm1" : : "m" (pbmul[16]));
163                 asm volatile("movdqa %xmm2,%xm    163                 asm volatile("movdqa %xmm2,%xmm3");
164                 asm volatile("psraw  $4,%xmm2"    164                 asm volatile("psraw  $4,%xmm2");
165                 asm volatile("pand   %xmm7,%xm    165                 asm volatile("pand   %xmm7,%xmm3");
166                 asm volatile("pand   %xmm7,%xm    166                 asm volatile("pand   %xmm7,%xmm2");
167                 asm volatile("pshufb %xmm3,%xm    167                 asm volatile("pshufb %xmm3,%xmm4");
168                 asm volatile("pshufb %xmm2,%xm    168                 asm volatile("pshufb %xmm2,%xmm1");
169                 asm volatile("pxor   %xmm4,%xm    169                 asm volatile("pxor   %xmm4,%xmm1");
170                                                   170 
171                 /* xmm1 = pbmul[px] */            171                 /* xmm1 = pbmul[px] */
172                 asm volatile("pxor   %xmm5,%xm    172                 asm volatile("pxor   %xmm5,%xmm1");
173                 /* xmm1 = db = DQ */              173                 /* xmm1 = db = DQ */
174                 asm volatile("movdqa %%xmm1,%0    174                 asm volatile("movdqa %%xmm1,%0" : "=m" (*dq));
175                                                   175 
176                 asm volatile("pxor   %xmm1,%xm    176                 asm volatile("pxor   %xmm1,%xmm0");
177                 asm volatile("movdqa %%xmm0,%0    177                 asm volatile("movdqa %%xmm0,%0" : "=m" (*dp));
178                                                   178 
179                 bytes -= 16;                      179                 bytes -= 16;
180                 p += 16;                          180                 p += 16;
181                 q += 16;                          181                 q += 16;
182                 dp += 16;                         182                 dp += 16;
183                 dq += 16;                         183                 dq += 16;
184 #endif                                            184 #endif
185         }                                         185         }
186                                                   186 
187         kernel_fpu_end();                         187         kernel_fpu_end();
188 }                                                 188 }
189                                                   189 
190                                                   190 
191 static void raid6_datap_recov_ssse3(int disks,    191 static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
192                 void **ptrs)                      192                 void **ptrs)
193 {                                                 193 {
194         u8 *p, *q, *dq;                           194         u8 *p, *q, *dq;
195         const u8 *qmul;         /* Q multiplie    195         const u8 *qmul;         /* Q multiplier table */
196         static const u8 __aligned(16) x0f[16]     196         static const u8 __aligned(16) x0f[16] = {
197                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    197                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
198                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f,    198                  0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f};
199                                                   199 
200         p = (u8 *)ptrs[disks-2];                  200         p = (u8 *)ptrs[disks-2];
201         q = (u8 *)ptrs[disks-1];                  201         q = (u8 *)ptrs[disks-1];
202                                                   202 
203         /* Compute syndrome with zero for the     203         /* Compute syndrome with zero for the missing data page
204            Use the dead data page as temporary    204            Use the dead data page as temporary storage for delta q */
205         dq = (u8 *)ptrs[faila];                   205         dq = (u8 *)ptrs[faila];
206         ptrs[faila] = (void *)raid6_empty_zero    206         ptrs[faila] = (void *)raid6_empty_zero_page;
207         ptrs[disks-1] = dq;                       207         ptrs[disks-1] = dq;
208                                                   208 
209         raid6_call.gen_syndrome(disks, bytes,     209         raid6_call.gen_syndrome(disks, bytes, ptrs);
210                                                   210 
211         /* Restore pointer table */               211         /* Restore pointer table */
212         ptrs[faila]   = dq;                       212         ptrs[faila]   = dq;
213         ptrs[disks-1] = q;                        213         ptrs[disks-1] = q;
214                                                   214 
215         /* Now, pick the proper data tables */    215         /* Now, pick the proper data tables */
216         qmul  = raid6_vgfmul[raid6_gfinv[raid6    216         qmul  = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
217                                                   217 
218         kernel_fpu_begin();                       218         kernel_fpu_begin();
219                                                   219 
220         asm volatile("movdqa %0, %%xmm7" : : "    220         asm volatile("movdqa %0, %%xmm7" : : "m" (x0f[0]));
221                                                   221 
222         while (bytes) {                           222         while (bytes) {
223 #ifdef CONFIG_X86_64                              223 #ifdef CONFIG_X86_64
224                 asm volatile("movdqa %0, %%xmm    224                 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
225                 asm volatile("movdqa %0, %%xmm    225                 asm volatile("movdqa %0, %%xmm4" : : "m" (dq[16]));
226                 asm volatile("pxor %0, %%xmm3"    226                 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
227                 asm volatile("movdqa %0, %%xmm    227                 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
228                                                   228 
229                 /* xmm3 = q[0] ^ dq[0] */         229                 /* xmm3 = q[0] ^ dq[0] */
230                                                   230 
231                 asm volatile("pxor %0, %%xmm4"    231                 asm volatile("pxor %0, %%xmm4" : : "m" (q[16]));
232                 asm volatile("movdqa %0, %%xmm    232                 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
233                                                   233 
234                 /* xmm4 = q[16] ^ dq[16] */       234                 /* xmm4 = q[16] ^ dq[16] */
235                                                   235 
236                 asm volatile("movdqa %xmm3, %x    236                 asm volatile("movdqa %xmm3, %xmm6");
237                 asm volatile("movdqa %xmm4, %x    237                 asm volatile("movdqa %xmm4, %xmm8");
238                                                   238 
239                 /* xmm4 = xmm8 = q[16] ^ dq[16    239                 /* xmm4 = xmm8 = q[16] ^ dq[16] */
240                                                   240 
241                 asm volatile("psraw $4, %xmm3"    241                 asm volatile("psraw $4, %xmm3");
242                 asm volatile("pand %xmm7, %xmm    242                 asm volatile("pand %xmm7, %xmm6");
243                 asm volatile("pand %xmm7, %xmm    243                 asm volatile("pand %xmm7, %xmm3");
244                 asm volatile("pshufb %xmm6, %x    244                 asm volatile("pshufb %xmm6, %xmm0");
245                 asm volatile("pshufb %xmm3, %x    245                 asm volatile("pshufb %xmm3, %xmm1");
246                 asm volatile("movdqa %0, %%xmm    246                 asm volatile("movdqa %0, %%xmm10" : : "m" (qmul[0]));
247                 asm volatile("pxor %xmm0, %xmm    247                 asm volatile("pxor %xmm0, %xmm1");
248                 asm volatile("movdqa %0, %%xmm    248                 asm volatile("movdqa %0, %%xmm11" : : "m" (qmul[16]));
249                                                   249 
250                 /* xmm1 = qmul[q[0] ^ dq[0]] *    250                 /* xmm1 = qmul[q[0] ^ dq[0]] */
251                                                   251 
252                 asm volatile("psraw $4, %xmm4"    252                 asm volatile("psraw $4, %xmm4");
253                 asm volatile("pand %xmm7, %xmm    253                 asm volatile("pand %xmm7, %xmm8");
254                 asm volatile("pand %xmm7, %xmm    254                 asm volatile("pand %xmm7, %xmm4");
255                 asm volatile("pshufb %xmm8, %x    255                 asm volatile("pshufb %xmm8, %xmm10");
256                 asm volatile("pshufb %xmm4, %x    256                 asm volatile("pshufb %xmm4, %xmm11");
257                 asm volatile("movdqa %0, %%xmm    257                 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
258                 asm volatile("pxor %xmm10, %xm    258                 asm volatile("pxor %xmm10, %xmm11");
259                 asm volatile("movdqa %0, %%xmm    259                 asm volatile("movdqa %0, %%xmm12" : : "m" (p[16]));
260                                                   260 
261                 /* xmm11 = qmul[q[16] ^ dq[16]    261                 /* xmm11 = qmul[q[16] ^ dq[16]] */
262                                                   262 
263                 asm volatile("pxor %xmm1, %xmm    263                 asm volatile("pxor %xmm1, %xmm2");
264                                                   264 
265                 /* xmm2 = p[0] ^ qmul[q[0] ^ d    265                 /* xmm2 = p[0] ^ qmul[q[0] ^ dq[0]] */
266                                                   266 
267                 asm volatile("pxor %xmm11, %xm    267                 asm volatile("pxor %xmm11, %xmm12");
268                                                   268 
269                 /* xmm12 = p[16] ^ qmul[q[16]     269                 /* xmm12 = p[16] ^ qmul[q[16] ^ dq[16]] */
270                                                   270 
271                 asm volatile("movdqa %%xmm1, %    271                 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
272                 asm volatile("movdqa %%xmm11,     272                 asm volatile("movdqa %%xmm11, %0" : "=m" (dq[16]));
273                                                   273 
274                 asm volatile("movdqa %%xmm2, %    274                 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
275                 asm volatile("movdqa %%xmm12,     275                 asm volatile("movdqa %%xmm12, %0" : "=m" (p[16]));
276                                                   276 
277                 bytes -= 32;                      277                 bytes -= 32;
278                 p += 32;                          278                 p += 32;
279                 q += 32;                          279                 q += 32;
280                 dq += 32;                         280                 dq += 32;
281                                                   281 
282 #else                                             282 #else
283                 asm volatile("movdqa %0, %%xmm    283                 asm volatile("movdqa %0, %%xmm3" : : "m" (dq[0]));
284                 asm volatile("movdqa %0, %%xmm    284                 asm volatile("movdqa %0, %%xmm0" : : "m" (qmul[0]));
285                 asm volatile("pxor %0, %%xmm3"    285                 asm volatile("pxor %0, %%xmm3" : : "m" (q[0]));
286                 asm volatile("movdqa %0, %%xmm    286                 asm volatile("movdqa %0, %%xmm1" : : "m" (qmul[16]));
287                                                   287 
288                 /* xmm3 = *q ^ *dq */             288                 /* xmm3 = *q ^ *dq */
289                                                   289 
290                 asm volatile("movdqa %xmm3, %x    290                 asm volatile("movdqa %xmm3, %xmm6");
291                 asm volatile("movdqa %0, %%xmm    291                 asm volatile("movdqa %0, %%xmm2" : : "m" (p[0]));
292                 asm volatile("psraw $4, %xmm3"    292                 asm volatile("psraw $4, %xmm3");
293                 asm volatile("pand %xmm7, %xmm    293                 asm volatile("pand %xmm7, %xmm6");
294                 asm volatile("pand %xmm7, %xmm    294                 asm volatile("pand %xmm7, %xmm3");
295                 asm volatile("pshufb %xmm6, %x    295                 asm volatile("pshufb %xmm6, %xmm0");
296                 asm volatile("pshufb %xmm3, %x    296                 asm volatile("pshufb %xmm3, %xmm1");
297                 asm volatile("pxor %xmm0, %xmm    297                 asm volatile("pxor %xmm0, %xmm1");
298                                                   298 
299                 /* xmm1 = qmul[*q ^ *dq */        299                 /* xmm1 = qmul[*q ^ *dq */
300                                                   300 
301                 asm volatile("pxor %xmm1, %xmm    301                 asm volatile("pxor %xmm1, %xmm2");
302                                                   302 
303                 /* xmm2 = *p ^ qmul[*q ^ *dq]     303                 /* xmm2 = *p ^ qmul[*q ^ *dq] */
304                                                   304 
305                 asm volatile("movdqa %%xmm1, %    305                 asm volatile("movdqa %%xmm1, %0" : "=m" (dq[0]));
306                 asm volatile("movdqa %%xmm2, %    306                 asm volatile("movdqa %%xmm2, %0" : "=m" (p[0]));
307                                                   307 
308                 bytes -= 16;                      308                 bytes -= 16;
309                 p += 16;                          309                 p += 16;
310                 q += 16;                          310                 q += 16;
311                 dq += 16;                         311                 dq += 16;
312 #endif                                            312 #endif
313         }                                         313         }
314                                                   314 
315         kernel_fpu_end();                         315         kernel_fpu_end();
316 }                                                 316 }
317                                                   317 
318 const struct raid6_recov_calls raid6_recov_sss    318 const struct raid6_recov_calls raid6_recov_ssse3 = {
319         .data2 = raid6_2data_recov_ssse3,         319         .data2 = raid6_2data_recov_ssse3,
320         .datap = raid6_datap_recov_ssse3,         320         .datap = raid6_datap_recov_ssse3,
321         .valid = raid6_has_ssse3,                 321         .valid = raid6_has_ssse3,
322 #ifdef CONFIG_X86_64                              322 #ifdef CONFIG_X86_64
323         .name = "ssse3x2",                        323         .name = "ssse3x2",
324 #else                                             324 #else
325         .name = "ssse3x1",                        325         .name = "ssse3x1",
326 #endif                                            326 #endif
327         .priority = 1,                            327         .priority = 1,
328 };                                                328 };
329                                                   329 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php