~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/include/asm/fpu-insn-asm.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Support for Vector Instructions
  4  *
  5  * Assembler macros to generate .byte/.word code for particular
  6  * vector instructions that are supported by recent binutils (>= 2.26) only.
  7  *
  8  * Copyright IBM Corp. 2015
  9  * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
 10  */
 11 
 12 #ifndef __ASM_S390_FPU_INSN_ASM_H
 13 #define __ASM_S390_FPU_INSN_ASM_H
 14 
 15 #ifndef __ASM_S390_FPU_INSN_H
 16 #error only <asm/fpu-insn.h> can be included directly
 17 #endif
 18 
 19 #ifdef __ASSEMBLY__
 20 
 21 /* Macros to generate vector instruction byte code */
 22 
 23 /* GR_NUM - Retrieve general-purpose register number
 24  *
 25  * @opd:        Operand to store register number
 26  * @r64:        String designation register in the format "%rN"
 27  */
 28 .macro  GR_NUM  opd gr
 29         \opd = 255
 30         .ifc \gr,%r0
 31                 \opd = 0
 32         .endif
 33         .ifc \gr,%r1
 34                 \opd = 1
 35         .endif
 36         .ifc \gr,%r2
 37                 \opd = 2
 38         .endif
 39         .ifc \gr,%r3
 40                 \opd = 3
 41         .endif
 42         .ifc \gr,%r4
 43                 \opd = 4
 44         .endif
 45         .ifc \gr,%r5
 46                 \opd = 5
 47         .endif
 48         .ifc \gr,%r6
 49                 \opd = 6
 50         .endif
 51         .ifc \gr,%r7
 52                 \opd = 7
 53         .endif
 54         .ifc \gr,%r8
 55                 \opd = 8
 56         .endif
 57         .ifc \gr,%r9
 58                 \opd = 9
 59         .endif
 60         .ifc \gr,%r10
 61                 \opd = 10
 62         .endif
 63         .ifc \gr,%r11
 64                 \opd = 11
 65         .endif
 66         .ifc \gr,%r12
 67                 \opd = 12
 68         .endif
 69         .ifc \gr,%r13
 70                 \opd = 13
 71         .endif
 72         .ifc \gr,%r14
 73                 \opd = 14
 74         .endif
 75         .ifc \gr,%r15
 76                 \opd = 15
 77         .endif
 78         .if \opd == 255
 79                 \opd = \gr
 80         .endif
 81 .endm
 82 
 83 /* VX_NUM - Retrieve vector register number
 84  *
 85  * @opd:        Operand to store register number
 86  * @vxr:        String designation register in the format "%vN"
 87  *
 88  * The vector register number is used for as input number to the
 89  * instruction and, as well as, to compute the RXB field of the
 90  * instruction.
 91  */
 92 .macro  VX_NUM  opd vxr
 93         \opd = 255
 94         .ifc \vxr,%v0
 95                 \opd = 0
 96         .endif
 97         .ifc \vxr,%v1
 98                 \opd = 1
 99         .endif
100         .ifc \vxr,%v2
101                 \opd = 2
102         .endif
103         .ifc \vxr,%v3
104                 \opd = 3
105         .endif
106         .ifc \vxr,%v4
107                 \opd = 4
108         .endif
109         .ifc \vxr,%v5
110                 \opd = 5
111         .endif
112         .ifc \vxr,%v6
113                 \opd = 6
114         .endif
115         .ifc \vxr,%v7
116                 \opd = 7
117         .endif
118         .ifc \vxr,%v8
119                 \opd = 8
120         .endif
121         .ifc \vxr,%v9
122                 \opd = 9
123         .endif
124         .ifc \vxr,%v10
125                 \opd = 10
126         .endif
127         .ifc \vxr,%v11
128                 \opd = 11
129         .endif
130         .ifc \vxr,%v12
131                 \opd = 12
132         .endif
133         .ifc \vxr,%v13
134                 \opd = 13
135         .endif
136         .ifc \vxr,%v14
137                 \opd = 14
138         .endif
139         .ifc \vxr,%v15
140                 \opd = 15
141         .endif
142         .ifc \vxr,%v16
143                 \opd = 16
144         .endif
145         .ifc \vxr,%v17
146                 \opd = 17
147         .endif
148         .ifc \vxr,%v18
149                 \opd = 18
150         .endif
151         .ifc \vxr,%v19
152                 \opd = 19
153         .endif
154         .ifc \vxr,%v20
155                 \opd = 20
156         .endif
157         .ifc \vxr,%v21
158                 \opd = 21
159         .endif
160         .ifc \vxr,%v22
161                 \opd = 22
162         .endif
163         .ifc \vxr,%v23
164                 \opd = 23
165         .endif
166         .ifc \vxr,%v24
167                 \opd = 24
168         .endif
169         .ifc \vxr,%v25
170                 \opd = 25
171         .endif
172         .ifc \vxr,%v26
173                 \opd = 26
174         .endif
175         .ifc \vxr,%v27
176                 \opd = 27
177         .endif
178         .ifc \vxr,%v28
179                 \opd = 28
180         .endif
181         .ifc \vxr,%v29
182                 \opd = 29
183         .endif
184         .ifc \vxr,%v30
185                 \opd = 30
186         .endif
187         .ifc \vxr,%v31
188                 \opd = 31
189         .endif
190         .if \opd == 255
191                 \opd = \vxr
192         .endif
193 .endm
194 
195 /* RXB - Compute most significant bit used vector registers
196  *
197  * @rxb:        Operand to store computed RXB value
198  * @v1:         Vector register designated operand whose MSB is stored in
199  *              RXB bit 0 (instruction bit 36) and whose remaining bits
200  *              are stored in instruction bits 8-11.
201  * @v2:         Vector register designated operand whose MSB is stored in
202  *              RXB bit 1 (instruction bit 37) and whose remaining bits
203  *              are stored in instruction bits 12-15.
204  * @v3:         Vector register designated operand whose MSB is stored in
205  *              RXB bit 2 (instruction bit 38) and whose remaining bits
206  *              are stored in instruction bits 16-19.
207  * @v4:         Vector register designated operand whose MSB is stored in
208  *              RXB bit 3 (instruction bit 39) and whose remaining bits
209  *              are stored in instruction bits 32-35.
210  *
211  * Note: In most vector instruction formats [1] V1, V2, V3, and V4 directly
212  * correspond to @v1, @v2, @v3, and @v4. But there are exceptions, such as but
213  * not limited to the vector instruction formats VRR-g, VRR-h, VRS-a, VRS-d,
214  * and VSI.
215  *
216  * [1] IBM z/Architecture Principles of Operation, chapter "Program
217  * Execution, section "Instructions", subsection "Instruction Formats".
218  */
219 .macro  RXB     rxb v1 v2=0 v3=0 v4=0
220         \rxb = 0
221         .if \v1 & 0x10
222                 \rxb = \rxb | 0x08
223         .endif
224         .if \v2 & 0x10
225                 \rxb = \rxb | 0x04
226         .endif
227         .if \v3 & 0x10
228                 \rxb = \rxb | 0x02
229         .endif
230         .if \v4 & 0x10
231                 \rxb = \rxb | 0x01
232         .endif
233 .endm
234 
235 /* MRXB - Generate Element Size Control and RXB value
236  *
237  * @m:          Element size control
238  * @v1:         First vector register designated operand (for RXB)
239  * @v2:         Second vector register designated operand (for RXB)
240  * @v3:         Third vector register designated operand (for RXB)
241  * @v4:         Fourth vector register designated operand (for RXB)
242  *
243  * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
244  * description for further details.
245  */
246 .macro  MRXB    m v1 v2=0 v3=0 v4=0
247         rxb = 0
248         RXB     rxb, \v1, \v2, \v3, \v4
249         .byte   (\m << 4) | rxb
250 .endm
251 
252 /* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
253  *
254  * @m:          Element size control
255  * @opc:        Opcode
256  * @v1:         First vector register designated operand (for RXB)
257  * @v2:         Second vector register designated operand (for RXB)
258  * @v3:         Third vector register designated operand (for RXB)
259  * @v4:         Fourth vector register designated operand (for RXB)
260  *
261  * Note: For @v1, @v2, @v3, and @v4 also refer to the RXB macro
262  * description for further details.
263  */
264 .macro  MRXBOPC m opc v1 v2=0 v3=0 v4=0
265         MRXB    \m, \v1, \v2, \v3, \v4
266         .byte   \opc
267 .endm
268 
269 /* Vector support instructions */
270 
271 /* VECTOR GENERATE BYTE MASK */
272 .macro  VGBM    vr imm2
273         VX_NUM  v1, \vr
274         .word   (0xE700 | ((v1&15) << 4))
275         .word   \imm2
276         MRXBOPC 0, 0x44, v1
277 .endm
278 .macro  VZERO   vxr
279         VGBM    \vxr, 0
280 .endm
281 .macro  VONE    vxr
282         VGBM    \vxr, 0xFFFF
283 .endm
284 
285 /* VECTOR LOAD VR ELEMENT FROM GR */
286 .macro  VLVG    v, gr, disp, m
287         VX_NUM  v1, \v
288         GR_NUM  b2, "%r0"
289         GR_NUM  r3, \gr
290         .word   0xE700 | ((v1&15) << 4) | r3
291         .word   (b2 << 12) | (\disp)
292         MRXBOPC \m, 0x22, v1
293 .endm
294 .macro  VLVGB   v, gr, index, base
295         VLVG    \v, \gr, \index, \base, 0
296 .endm
297 .macro  VLVGH   v, gr, index
298         VLVG    \v, \gr, \index, 1
299 .endm
300 .macro  VLVGF   v, gr, index
301         VLVG    \v, \gr, \index, 2
302 .endm
303 .macro  VLVGG   v, gr, index
304         VLVG    \v, \gr, \index, 3
305 .endm
306 
307 /* VECTOR LOAD REGISTER */
308 .macro  VLR     v1, v2
309         VX_NUM  v1, \v1
310         VX_NUM  v2, \v2
311         .word   0xE700 | ((v1&15) << 4) | (v2&15)
312         .word   0
313         MRXBOPC 0, 0x56, v1, v2
314 .endm
315 
316 /* VECTOR LOAD */
317 .macro  VL      v, disp, index="%r0", base
318         VX_NUM  v1, \v
319         GR_NUM  x2, \index
320         GR_NUM  b2, \base
321         .word   0xE700 | ((v1&15) << 4) | x2
322         .word   (b2 << 12) | (\disp)
323         MRXBOPC 0, 0x06, v1
324 .endm
325 
326 /* VECTOR LOAD ELEMENT */
327 .macro  VLEx    vr1, disp, index="%r0", base, m3, opc
328         VX_NUM  v1, \vr1
329         GR_NUM  x2, \index
330         GR_NUM  b2, \base
331         .word   0xE700 | ((v1&15) << 4) | x2
332         .word   (b2 << 12) | (\disp)
333         MRXBOPC \m3, \opc, v1
334 .endm
335 .macro  VLEB    vr1, disp, index="%r0", base, m3
336         VLEx    \vr1, \disp, \index, \base, \m3, 0x00
337 .endm
338 .macro  VLEH    vr1, disp, index="%r0", base, m3
339         VLEx    \vr1, \disp, \index, \base, \m3, 0x01
340 .endm
341 .macro  VLEF    vr1, disp, index="%r0", base, m3
342         VLEx    \vr1, \disp, \index, \base, \m3, 0x03
343 .endm
344 .macro  VLEG    vr1, disp, index="%r0", base, m3
345         VLEx    \vr1, \disp, \index, \base, \m3, 0x02
346 .endm
347 
348 /* VECTOR LOAD ELEMENT IMMEDIATE */
349 .macro  VLEIx   vr1, imm2, m3, opc
350         VX_NUM  v1, \vr1
351         .word   0xE700 | ((v1&15) << 4)
352         .word   \imm2
353         MRXBOPC \m3, \opc, v1
354 .endm
355 .macro  VLEIB   vr1, imm2, index
356         VLEIx   \vr1, \imm2, \index, 0x40
357 .endm
358 .macro  VLEIH   vr1, imm2, index
359         VLEIx   \vr1, \imm2, \index, 0x41
360 .endm
361 .macro  VLEIF   vr1, imm2, index
362         VLEIx   \vr1, \imm2, \index, 0x43
363 .endm
364 .macro  VLEIG   vr1, imm2, index
365         VLEIx   \vr1, \imm2, \index, 0x42
366 .endm
367 
368 /* VECTOR LOAD GR FROM VR ELEMENT */
369 .macro  VLGV    gr, vr, disp, base="%r0", m
370         GR_NUM  r1, \gr
371         GR_NUM  b2, \base
372         VX_NUM  v3, \vr
373         .word   0xE700 | (r1 << 4) | (v3&15)
374         .word   (b2 << 12) | (\disp)
375         MRXBOPC \m, 0x21, 0, v3
376 .endm
377 .macro  VLGVB   gr, vr, disp, base="%r0"
378         VLGV    \gr, \vr, \disp, \base, 0
379 .endm
380 .macro  VLGVH   gr, vr, disp, base="%r0"
381         VLGV    \gr, \vr, \disp, \base, 1
382 .endm
383 .macro  VLGVF   gr, vr, disp, base="%r0"
384         VLGV    \gr, \vr, \disp, \base, 2
385 .endm
386 .macro  VLGVG   gr, vr, disp, base="%r0"
387         VLGV    \gr, \vr, \disp, \base, 3
388 .endm
389 
390 /* VECTOR LOAD MULTIPLE */
391 .macro  VLM     vfrom, vto, disp, base, hint=3
392         VX_NUM  v1, \vfrom
393         VX_NUM  v3, \vto
394         GR_NUM  b2, \base
395         .word   0xE700 | ((v1&15) << 4) | (v3&15)
396         .word   (b2 << 12) | (\disp)
397         MRXBOPC \hint, 0x36, v1, v3
398 .endm
399 
400 /* VECTOR STORE */
401 .macro  VST     vr1, disp, index="%r0", base
402         VX_NUM  v1, \vr1
403         GR_NUM  x2, \index
404         GR_NUM  b2, \base
405         .word   0xE700 | ((v1&15) << 4) | (x2&15)
406         .word   (b2 << 12) | (\disp)
407         MRXBOPC 0, 0x0E, v1
408 .endm
409 
410 /* VECTOR STORE MULTIPLE */
411 .macro  VSTM    vfrom, vto, disp, base, hint=3
412         VX_NUM  v1, \vfrom
413         VX_NUM  v3, \vto
414         GR_NUM  b2, \base
415         .word   0xE700 | ((v1&15) << 4) | (v3&15)
416         .word   (b2 << 12) | (\disp)
417         MRXBOPC \hint, 0x3E, v1, v3
418 .endm
419 
420 /* VECTOR PERMUTE */
421 .macro  VPERM   vr1, vr2, vr3, vr4
422         VX_NUM  v1, \vr1
423         VX_NUM  v2, \vr2
424         VX_NUM  v3, \vr3
425         VX_NUM  v4, \vr4
426         .word   0xE700 | ((v1&15) << 4) | (v2&15)
427         .word   ((v3&15) << 12)
428         MRXBOPC (v4&15), 0x8C, v1, v2, v3, v4
429 .endm
430 
431 /* VECTOR UNPACK LOGICAL LOW */
432 .macro  VUPLL   vr1, vr2, m3
433         VX_NUM  v1, \vr1
434         VX_NUM  v2, \vr2
435         .word   0xE700 | ((v1&15) << 4) | (v2&15)
436         .word   0x0000
437         MRXBOPC \m3, 0xD4, v1, v2
438 .endm
439 .macro  VUPLLB  vr1, vr2
440         VUPLL   \vr1, \vr2, 0
441 .endm
442 .macro  VUPLLH  vr1, vr2
443         VUPLL   \vr1, \vr2, 1
444 .endm
445 .macro  VUPLLF  vr1, vr2
446         VUPLL   \vr1, \vr2, 2
447 .endm
448 
449 /* VECTOR PERMUTE DOUBLEWORD IMMEDIATE */
450 .macro  VPDI    vr1, vr2, vr3, m4
451         VX_NUM  v1, \vr1
452         VX_NUM  v2, \vr2
453         VX_NUM  v3, \vr3
454         .word   0xE700 | ((v1&15) << 4) | (v2&15)
455         .word   ((v3&15) << 12)
456         MRXBOPC \m4, 0x84, v1, v2, v3
457 .endm
458 
459 /* VECTOR REPLICATE */
460 .macro  VREP    vr1, vr3, imm2, m4
461         VX_NUM  v1, \vr1
462         VX_NUM  v3, \vr3
463         .word   0xE700 | ((v1&15) << 4) | (v3&15)
464         .word   \imm2
465         MRXBOPC \m4, 0x4D, v1, v3
466 .endm
467 .macro  VREPB   vr1, vr3, imm2
468         VREP    \vr1, \vr3, \imm2, 0
469 .endm
470 .macro  VREPH   vr1, vr3, imm2
471         VREP    \vr1, \vr3, \imm2, 1
472 .endm
473 .macro  VREPF   vr1, vr3, imm2
474         VREP    \vr1, \vr3, \imm2, 2
475 .endm
476 .macro  VREPG   vr1, vr3, imm2
477         VREP    \vr1, \vr3, \imm2, 3
478 .endm
479 
480 /* VECTOR MERGE HIGH */
481 .macro  VMRH    vr1, vr2, vr3, m4
482         VX_NUM  v1, \vr1
483         VX_NUM  v2, \vr2
484         VX_NUM  v3, \vr3
485         .word   0xE700 | ((v1&15) << 4) | (v2&15)
486         .word   ((v3&15) << 12)
487         MRXBOPC \m4, 0x61, v1, v2, v3
488 .endm
489 .macro  VMRHB   vr1, vr2, vr3
490         VMRH    \vr1, \vr2, \vr3, 0
491 .endm
492 .macro  VMRHH   vr1, vr2, vr3
493         VMRH    \vr1, \vr2, \vr3, 1
494 .endm
495 .macro  VMRHF   vr1, vr2, vr3
496         VMRH    \vr1, \vr2, \vr3, 2
497 .endm
498 .macro  VMRHG   vr1, vr2, vr3
499         VMRH    \vr1, \vr2, \vr3, 3
500 .endm
501 
502 /* VECTOR MERGE LOW */
503 .macro  VMRL    vr1, vr2, vr3, m4
504         VX_NUM  v1, \vr1
505         VX_NUM  v2, \vr2
506         VX_NUM  v3, \vr3
507         .word   0xE700 | ((v1&15) << 4) | (v2&15)
508         .word   ((v3&15) << 12)
509         MRXBOPC \m4, 0x60, v1, v2, v3
510 .endm
511 .macro  VMRLB   vr1, vr2, vr3
512         VMRL    \vr1, \vr2, \vr3, 0
513 .endm
514 .macro  VMRLH   vr1, vr2, vr3
515         VMRL    \vr1, \vr2, \vr3, 1
516 .endm
517 .macro  VMRLF   vr1, vr2, vr3
518         VMRL    \vr1, \vr2, \vr3, 2
519 .endm
520 .macro  VMRLG   vr1, vr2, vr3
521         VMRL    \vr1, \vr2, \vr3, 3
522 .endm
523 
524 /* VECTOR LOAD WITH LENGTH */
525 .macro VLL      v, gr, disp, base
526         VX_NUM  v1, \v
527         GR_NUM  b2, \base
528         GR_NUM  r3, \gr
529         .word   0xE700 | ((v1&15) << 4) | r3
530         .word   (b2 << 12) | (\disp)
531         MRXBOPC 0, 0x37, v1
532 .endm
533 
534 /* VECTOR STORE WITH LENGTH */
535 .macro VSTL     v, gr, disp, base
536         VX_NUM  v1, \v
537         GR_NUM  b2, \base
538         GR_NUM  r3, \gr
539         .word   0xE700 | ((v1&15) << 4) | r3
540         .word   (b2 << 12) | (\disp)
541         MRXBOPC 0, 0x3f, v1
542 .endm
543 
544 /* Vector integer instructions */
545 
546 /* VECTOR AND */
547 .macro  VN      vr1, vr2, vr3
548         VX_NUM  v1, \vr1
549         VX_NUM  v2, \vr2
550         VX_NUM  v3, \vr3
551         .word   0xE700 | ((v1&15) << 4) | (v2&15)
552         .word   ((v3&15) << 12)
553         MRXBOPC 0, 0x68, v1, v2, v3
554 .endm
555 
556 /* VECTOR CHECKSUM */
557 .macro VCKSM    vr1, vr2, vr3
558         VX_NUM  v1, \vr1
559         VX_NUM  v2, \vr2
560         VX_NUM  v3, \vr3
561         .word   0xE700 | ((v1&15) << 4) | (v2&15)
562         .word   ((v3&15) << 12)
563         MRXBOPC 0, 0x66, v1, v2, v3
564 .endm
565 
566 /* VECTOR EXCLUSIVE OR */
567 .macro  VX      vr1, vr2, vr3
568         VX_NUM  v1, \vr1
569         VX_NUM  v2, \vr2
570         VX_NUM  v3, \vr3
571         .word   0xE700 | ((v1&15) << 4) | (v2&15)
572         .word   ((v3&15) << 12)
573         MRXBOPC 0, 0x6D, v1, v2, v3
574 .endm
575 
576 /* VECTOR GALOIS FIELD MULTIPLY SUM */
577 .macro  VGFM    vr1, vr2, vr3, m4
578         VX_NUM  v1, \vr1
579         VX_NUM  v2, \vr2
580         VX_NUM  v3, \vr3
581         .word   0xE700 | ((v1&15) << 4) | (v2&15)
582         .word   ((v3&15) << 12)
583         MRXBOPC \m4, 0xB4, v1, v2, v3
584 .endm
585 .macro  VGFMB   vr1, vr2, vr3
586         VGFM    \vr1, \vr2, \vr3, 0
587 .endm
588 .macro  VGFMH   vr1, vr2, vr3
589         VGFM    \vr1, \vr2, \vr3, 1
590 .endm
591 .macro  VGFMF   vr1, vr2, vr3
592         VGFM    \vr1, \vr2, \vr3, 2
593 .endm
594 .macro  VGFMG   vr1, vr2, vr3
595         VGFM    \vr1, \vr2, \vr3, 3
596 .endm
597 
598 /* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
599 .macro  VGFMA   vr1, vr2, vr3, vr4, m5
600         VX_NUM  v1, \vr1
601         VX_NUM  v2, \vr2
602         VX_NUM  v3, \vr3
603         VX_NUM  v4, \vr4
604         .word   0xE700 | ((v1&15) << 4) | (v2&15)
605         .word   ((v3&15) << 12) | (\m5 << 8)
606         MRXBOPC (v4&15), 0xBC, v1, v2, v3, v4
607 .endm
608 .macro  VGFMAB  vr1, vr2, vr3, vr4
609         VGFMA   \vr1, \vr2, \vr3, \vr4, 0
610 .endm
611 .macro  VGFMAH  vr1, vr2, vr3, vr4
612         VGFMA   \vr1, \vr2, \vr3, \vr4, 1
613 .endm
614 .macro  VGFMAF  vr1, vr2, vr3, vr4
615         VGFMA   \vr1, \vr2, \vr3, \vr4, 2
616 .endm
617 .macro  VGFMAG  vr1, vr2, vr3, vr4
618         VGFMA   \vr1, \vr2, \vr3, \vr4, 3
619 .endm
620 
621 /* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
622 .macro  VSRLB   vr1, vr2, vr3
623         VX_NUM  v1, \vr1
624         VX_NUM  v2, \vr2
625         VX_NUM  v3, \vr3
626         .word   0xE700 | ((v1&15) << 4) | (v2&15)
627         .word   ((v3&15) << 12)
628         MRXBOPC 0, 0x7D, v1, v2, v3
629 .endm
630 
631 /* VECTOR REPLICATE IMMEDIATE */
632 .macro  VREPI   vr1, imm2, m3
633         VX_NUM  v1, \vr1
634         .word   0xE700 | ((v1&15) << 4)
635         .word   \imm2
636         MRXBOPC \m3, 0x45, v1
637 .endm
638 .macro  VREPIB  vr1, imm2
639         VREPI   \vr1, \imm2, 0
640 .endm
641 .macro  VREPIH  vr1, imm2
642         VREPI   \vr1, \imm2, 1
643 .endm
644 .macro  VREPIF  vr1, imm2
645         VREPI   \vr1, \imm2, 2
646 .endm
647 .macro  VREPIG  vr1, imm2
648         VREP    \vr1, \imm2, 3
649 .endm
650 
651 /* VECTOR ADD */
652 .macro  VA      vr1, vr2, vr3, m4
653         VX_NUM  v1, \vr1
654         VX_NUM  v2, \vr2
655         VX_NUM  v3, \vr3
656         .word   0xE700 | ((v1&15) << 4) | (v2&15)
657         .word   ((v3&15) << 12)
658         MRXBOPC \m4, 0xF3, v1, v2, v3
659 .endm
660 .macro  VAB     vr1, vr2, vr3
661         VA      \vr1, \vr2, \vr3, 0
662 .endm
663 .macro  VAH     vr1, vr2, vr3
664         VA      \vr1, \vr2, \vr3, 1
665 .endm
666 .macro  VAF     vr1, vr2, vr3
667         VA      \vr1, \vr2, \vr3, 2
668 .endm
669 .macro  VAG     vr1, vr2, vr3
670         VA      \vr1, \vr2, \vr3, 3
671 .endm
672 .macro  VAQ     vr1, vr2, vr3
673         VA      \vr1, \vr2, \vr3, 4
674 .endm
675 
676 /* VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */
677 .macro  VESRAV  vr1, vr2, vr3, m4
678         VX_NUM  v1, \vr1
679         VX_NUM  v2, \vr2
680         VX_NUM  v3, \vr3
681         .word   0xE700 | ((v1&15) << 4) | (v2&15)
682         .word   ((v3&15) << 12)
683         MRXBOPC \m4, 0x7A, v1, v2, v3
684 .endm
685 
686 .macro  VESRAVB vr1, vr2, vr3
687         VESRAV  \vr1, \vr2, \vr3, 0
688 .endm
689 .macro  VESRAVH vr1, vr2, vr3
690         VESRAV  \vr1, \vr2, \vr3, 1
691 .endm
692 .macro  VESRAVF vr1, vr2, vr3
693         VESRAV  \vr1, \vr2, \vr3, 2
694 .endm
695 .macro  VESRAVG vr1, vr2, vr3
696         VESRAV  \vr1, \vr2, \vr3, 3
697 .endm
698 
699 /* VECTOR ELEMENT ROTATE LEFT LOGICAL */
700 .macro  VERLL   vr1, vr3, disp, base="%r0", m4
701         VX_NUM  v1, \vr1
702         VX_NUM  v3, \vr3
703         GR_NUM  b2, \base
704         .word   0xE700 | ((v1&15) << 4) | (v3&15)
705         .word   (b2 << 12) | (\disp)
706         MRXBOPC \m4, 0x33, v1, v3
707 .endm
708 .macro  VERLLB  vr1, vr3, disp, base="%r0"
709         VERLL   \vr1, \vr3, \disp, \base, 0
710 .endm
711 .macro  VERLLH  vr1, vr3, disp, base="%r0"
712         VERLL   \vr1, \vr3, \disp, \base, 1
713 .endm
714 .macro  VERLLF  vr1, vr3, disp, base="%r0"
715         VERLL   \vr1, \vr3, \disp, \base, 2
716 .endm
717 .macro  VERLLG  vr1, vr3, disp, base="%r0"
718         VERLL   \vr1, \vr3, \disp, \base, 3
719 .endm
720 
721 /* VECTOR SHIFT LEFT DOUBLE BY BYTE */
722 .macro  VSLDB   vr1, vr2, vr3, imm4
723         VX_NUM  v1, \vr1
724         VX_NUM  v2, \vr2
725         VX_NUM  v3, \vr3
726         .word   0xE700 | ((v1&15) << 4) | (v2&15)
727         .word   ((v3&15) << 12) | (\imm4)
728         MRXBOPC 0, 0x77, v1, v2, v3
729 .endm
730 
731 #endif  /* __ASSEMBLY__ */
732 #endif  /* __ASM_S390_FPU_INSN_ASM_H */
733 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php