~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/arm64/abi/syscall-abi-asm.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /tools/testing/selftests/arm64/abi/syscall-abi-asm.S (Architecture i386) and /tools/testing/selftests/arm64/abi/syscall-abi-asm.S (Architecture sparc64)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 // Copyright (C) 2021 ARM Limited.                  2 // Copyright (C) 2021 ARM Limited.
  3 //                                                  3 //
  4 // Assembly portion of the syscall ABI test         4 // Assembly portion of the syscall ABI test
  5                                                     5 
  6 //                                                  6 //
  7 // Load values from memory into registers, inv      7 // Load values from memory into registers, invoke a syscall and save the
  8 // register values back to memory for later ch      8 // register values back to memory for later checking.  The syscall to be
  9 // invoked is configured in x8 of the input GP      9 // invoked is configured in x8 of the input GPR data.
 10 //                                                 10 //
 11 // x0:  SVE VL, 0 for FP only                      11 // x0:  SVE VL, 0 for FP only
 12 // x1:  SME VL                                     12 // x1:  SME VL
 13 //                                                 13 //
 14 //      GPRs:   gpr_in, gpr_out                    14 //      GPRs:   gpr_in, gpr_out
 15 //      FPRs:   fpr_in, fpr_out                    15 //      FPRs:   fpr_in, fpr_out
 16 //      Zn:     z_in, z_out                        16 //      Zn:     z_in, z_out
 17 //      Pn:     p_in, p_out                        17 //      Pn:     p_in, p_out
 18 //      FFR:    ffr_in, ffr_out                    18 //      FFR:    ffr_in, ffr_out
 19 //      ZA:     za_in, za_out                      19 //      ZA:     za_in, za_out
 20 //      SVCR:   svcr_in, svcr_out                  20 //      SVCR:   svcr_in, svcr_out
 21                                                    21 
 22 #include "syscall-abi.h"                           22 #include "syscall-abi.h"
 23                                                    23 
 24 .arch_extension sve                                24 .arch_extension sve
 25                                                    25 
 26 #define ID_AA64SMFR0_EL1_SMEver_SHIFT              26 #define ID_AA64SMFR0_EL1_SMEver_SHIFT           56
 27 #define ID_AA64SMFR0_EL1_SMEver_WIDTH              27 #define ID_AA64SMFR0_EL1_SMEver_WIDTH           4
 28                                                    28 
 29 /*                                                 29 /*
 30  * LDR (vector to ZA array):                       30  * LDR (vector to ZA array):
 31  *      LDR ZA[\nw, #\offset], [X\nxbase, #\of     31  *      LDR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
 32  */                                                32  */
 33 .macro _ldr_za nw, nxbase, offset=0                33 .macro _ldr_za nw, nxbase, offset=0
 34         .inst   0xe1000000                         34         .inst   0xe1000000                      \
 35                 | (((\nw) & 3) << 13)              35                 | (((\nw) & 3) << 13)           \
 36                 | ((\nxbase) << 5)                 36                 | ((\nxbase) << 5)              \
 37                 | ((\offset) & 7)                  37                 | ((\offset) & 7)
 38 .endm                                              38 .endm
 39                                                    39 
 40 /*                                                 40 /*
 41  * STR (vector from ZA array):                     41  * STR (vector from ZA array):
 42  *      STR ZA[\nw, #\offset], [X\nxbase, #\of     42  *      STR ZA[\nw, #\offset], [X\nxbase, #\offset, MUL VL]
 43  */                                                43  */
 44 .macro _str_za nw, nxbase, offset=0                44 .macro _str_za nw, nxbase, offset=0
 45         .inst   0xe1200000                         45         .inst   0xe1200000                      \
 46                 | (((\nw) & 3) << 13)              46                 | (((\nw) & 3) << 13)           \
 47                 | ((\nxbase) << 5)                 47                 | ((\nxbase) << 5)              \
 48                 | ((\offset) & 7)                  48                 | ((\offset) & 7)
 49 .endm                                              49 .endm
 50                                                    50 
 51 /*                                                 51 /*
 52  * LDR (ZT0)                                       52  * LDR (ZT0)
 53  *                                                 53  *
 54  *      LDR ZT0, nx                                54  *      LDR ZT0, nx
 55  */                                                55  */
 56 .macro _ldr_zt nx                                  56 .macro _ldr_zt nx
 57         .inst   0xe11f8000                         57         .inst   0xe11f8000                      \
 58                 | (((\nx) & 0x1f) << 5)            58                 | (((\nx) & 0x1f) << 5)
 59 .endm                                              59 .endm
 60                                                    60 
 61 /*                                                 61 /*
 62  * STR (ZT0)                                       62  * STR (ZT0)
 63  *                                                 63  *
 64  *      STR ZT0, nx                                64  *      STR ZT0, nx
 65  */                                                65  */
 66 .macro _str_zt nx                                  66 .macro _str_zt nx
 67         .inst   0xe13f8000                         67         .inst   0xe13f8000                      \
 68                 | (((\nx) & 0x1f) << 5)            68                 | (((\nx) & 0x1f) << 5)
 69 .endm                                              69 .endm
 70                                                    70 
 71 .globl do_syscall                                  71 .globl do_syscall
 72 do_syscall:                                        72 do_syscall:
 73         // Store callee saved registers x19-x2     73         // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1
 74         stp     x29, x30, [sp, #-112]!             74         stp     x29, x30, [sp, #-112]!
 75         mov     x29, sp                            75         mov     x29, sp
 76         stp     x0, x1, [sp, #16]                  76         stp     x0, x1, [sp, #16]
 77         stp     x19, x20, [sp, #32]                77         stp     x19, x20, [sp, #32]
 78         stp     x21, x22, [sp, #48]                78         stp     x21, x22, [sp, #48]
 79         stp     x23, x24, [sp, #64]                79         stp     x23, x24, [sp, #64]
 80         stp     x25, x26, [sp, #80]                80         stp     x25, x26, [sp, #80]
 81         stp     x27, x28, [sp, #96]                81         stp     x27, x28, [sp, #96]
 82                                                    82 
 83         // Set SVCR if we're doing SME             83         // Set SVCR if we're doing SME
 84         cbz     x1, 1f                             84         cbz     x1, 1f
 85         adrp    x2, svcr_in                        85         adrp    x2, svcr_in
 86         ldr     x2, [x2, :lo12:svcr_in]            86         ldr     x2, [x2, :lo12:svcr_in]
 87         msr     S3_3_C4_C2_2, x2                   87         msr     S3_3_C4_C2_2, x2
 88 1:                                                 88 1:
 89                                                    89 
 90         // Load ZA and ZT0 if enabled - uses x     90         // Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR
 91         tbz     x2, #SVCR_ZA_SHIFT, 1f             91         tbz     x2, #SVCR_ZA_SHIFT, 1f
 92         mov     w12, #0                            92         mov     w12, #0
 93         ldr     x2, =za_in                         93         ldr     x2, =za_in
 94 2:      _ldr_za 12, 2                              94 2:      _ldr_za 12, 2
 95         add     x2, x2, x1                         95         add     x2, x2, x1
 96         add     x12, x12, #1                       96         add     x12, x12, #1
 97         cmp     x1, x12                            97         cmp     x1, x12
 98         bne     2b                                 98         bne     2b
 99                                                    99 
100         // ZT0                                    100         // ZT0
101         mrs     x2, S3_0_C0_C4_5        // ID_    101         mrs     x2, S3_0_C0_C4_5        // ID_AA64SMFR0_EL1
102         ubfx    x2, x2, #ID_AA64SMFR0_EL1_SMEv    102         ubfx    x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
103                          #ID_AA64SMFR0_EL1_SME    103                          #ID_AA64SMFR0_EL1_SMEver_WIDTH
104         cbz     x2, 1f                            104         cbz     x2, 1f
105         adrp    x2, zt_in                         105         adrp    x2, zt_in
106         add     x2, x2, :lo12:zt_in               106         add     x2, x2, :lo12:zt_in
107         _ldr_zt 2                                 107         _ldr_zt 2
108 1:                                                108 1:
109                                                   109 
110         // Load GPRs x8-x28, and save our SP/F    110         // Load GPRs x8-x28, and save our SP/FP for later comparison
111         ldr     x2, =gpr_in                       111         ldr     x2, =gpr_in
112         add     x2, x2, #64                       112         add     x2, x2, #64
113         ldp     x8, x9, [x2], #16                 113         ldp     x8, x9, [x2], #16
114         ldp     x10, x11, [x2], #16               114         ldp     x10, x11, [x2], #16
115         ldp     x12, x13, [x2], #16               115         ldp     x12, x13, [x2], #16
116         ldp     x14, x15, [x2], #16               116         ldp     x14, x15, [x2], #16
117         ldp     x16, x17, [x2], #16               117         ldp     x16, x17, [x2], #16
118         ldp     x18, x19, [x2], #16               118         ldp     x18, x19, [x2], #16
119         ldp     x20, x21, [x2], #16               119         ldp     x20, x21, [x2], #16
120         ldp     x22, x23, [x2], #16               120         ldp     x22, x23, [x2], #16
121         ldp     x24, x25, [x2], #16               121         ldp     x24, x25, [x2], #16
122         ldp     x26, x27, [x2], #16               122         ldp     x26, x27, [x2], #16
123         ldr     x28, [x2], #8                     123         ldr     x28, [x2], #8
124         str     x29, [x2], #8           // FP     124         str     x29, [x2], #8           // FP
125         str     x30, [x2], #8           // LR     125         str     x30, [x2], #8           // LR
126                                                   126 
127         // Load FPRs if we're not doing neithe    127         // Load FPRs if we're not doing neither SVE nor streaming SVE
128         cbnz    x0, 1f                            128         cbnz    x0, 1f
129         ldr     x2, =svcr_in                      129         ldr     x2, =svcr_in
130         tbnz    x2, #SVCR_SM_SHIFT, 1f            130         tbnz    x2, #SVCR_SM_SHIFT, 1f
131                                                   131 
132         ldr     x2, =fpr_in                       132         ldr     x2, =fpr_in
133         ldp     q0, q1, [x2]                      133         ldp     q0, q1, [x2]
134         ldp     q2, q3, [x2, #16 * 2]             134         ldp     q2, q3, [x2, #16 * 2]
135         ldp     q4, q5, [x2, #16 * 4]             135         ldp     q4, q5, [x2, #16 * 4]
136         ldp     q6, q7, [x2, #16 * 6]             136         ldp     q6, q7, [x2, #16 * 6]
137         ldp     q8, q9, [x2, #16 * 8]             137         ldp     q8, q9, [x2, #16 * 8]
138         ldp     q10, q11, [x2, #16 * 10]          138         ldp     q10, q11, [x2, #16 * 10]
139         ldp     q12, q13, [x2, #16 * 12]          139         ldp     q12, q13, [x2, #16 * 12]
140         ldp     q14, q15, [x2, #16 * 14]          140         ldp     q14, q15, [x2, #16 * 14]
141         ldp     q16, q17, [x2, #16 * 16]          141         ldp     q16, q17, [x2, #16 * 16]
142         ldp     q18, q19, [x2, #16 * 18]          142         ldp     q18, q19, [x2, #16 * 18]
143         ldp     q20, q21, [x2, #16 * 20]          143         ldp     q20, q21, [x2, #16 * 20]
144         ldp     q22, q23, [x2, #16 * 22]          144         ldp     q22, q23, [x2, #16 * 22]
145         ldp     q24, q25, [x2, #16 * 24]          145         ldp     q24, q25, [x2, #16 * 24]
146         ldp     q26, q27, [x2, #16 * 26]          146         ldp     q26, q27, [x2, #16 * 26]
147         ldp     q28, q29, [x2, #16 * 28]          147         ldp     q28, q29, [x2, #16 * 28]
148         ldp     q30, q31, [x2, #16 * 30]          148         ldp     q30, q31, [x2, #16 * 30]
149                                                   149 
150         b       2f                                150         b       2f
151 1:                                                151 1:
152                                                   152 
153         // Load the SVE registers if we're doi    153         // Load the SVE registers if we're doing SVE/SME
154                                                   154 
155         ldr     x2, =z_in                         155         ldr     x2, =z_in
156         ldr     z0, [x2, #0, MUL VL]              156         ldr     z0, [x2, #0, MUL VL]
157         ldr     z1, [x2, #1, MUL VL]              157         ldr     z1, [x2, #1, MUL VL]
158         ldr     z2, [x2, #2, MUL VL]              158         ldr     z2, [x2, #2, MUL VL]
159         ldr     z3, [x2, #3, MUL VL]              159         ldr     z3, [x2, #3, MUL VL]
160         ldr     z4, [x2, #4, MUL VL]              160         ldr     z4, [x2, #4, MUL VL]
161         ldr     z5, [x2, #5, MUL VL]              161         ldr     z5, [x2, #5, MUL VL]
162         ldr     z6, [x2, #6, MUL VL]              162         ldr     z6, [x2, #6, MUL VL]
163         ldr     z7, [x2, #7, MUL VL]              163         ldr     z7, [x2, #7, MUL VL]
164         ldr     z8, [x2, #8, MUL VL]              164         ldr     z8, [x2, #8, MUL VL]
165         ldr     z9, [x2, #9, MUL VL]              165         ldr     z9, [x2, #9, MUL VL]
166         ldr     z10, [x2, #10, MUL VL]            166         ldr     z10, [x2, #10, MUL VL]
167         ldr     z11, [x2, #11, MUL VL]            167         ldr     z11, [x2, #11, MUL VL]
168         ldr     z12, [x2, #12, MUL VL]            168         ldr     z12, [x2, #12, MUL VL]
169         ldr     z13, [x2, #13, MUL VL]            169         ldr     z13, [x2, #13, MUL VL]
170         ldr     z14, [x2, #14, MUL VL]            170         ldr     z14, [x2, #14, MUL VL]
171         ldr     z15, [x2, #15, MUL VL]            171         ldr     z15, [x2, #15, MUL VL]
172         ldr     z16, [x2, #16, MUL VL]            172         ldr     z16, [x2, #16, MUL VL]
173         ldr     z17, [x2, #17, MUL VL]            173         ldr     z17, [x2, #17, MUL VL]
174         ldr     z18, [x2, #18, MUL VL]            174         ldr     z18, [x2, #18, MUL VL]
175         ldr     z19, [x2, #19, MUL VL]            175         ldr     z19, [x2, #19, MUL VL]
176         ldr     z20, [x2, #20, MUL VL]            176         ldr     z20, [x2, #20, MUL VL]
177         ldr     z21, [x2, #21, MUL VL]            177         ldr     z21, [x2, #21, MUL VL]
178         ldr     z22, [x2, #22, MUL VL]            178         ldr     z22, [x2, #22, MUL VL]
179         ldr     z23, [x2, #23, MUL VL]            179         ldr     z23, [x2, #23, MUL VL]
180         ldr     z24, [x2, #24, MUL VL]            180         ldr     z24, [x2, #24, MUL VL]
181         ldr     z25, [x2, #25, MUL VL]            181         ldr     z25, [x2, #25, MUL VL]
182         ldr     z26, [x2, #26, MUL VL]            182         ldr     z26, [x2, #26, MUL VL]
183         ldr     z27, [x2, #27, MUL VL]            183         ldr     z27, [x2, #27, MUL VL]
184         ldr     z28, [x2, #28, MUL VL]            184         ldr     z28, [x2, #28, MUL VL]
185         ldr     z29, [x2, #29, MUL VL]            185         ldr     z29, [x2, #29, MUL VL]
186         ldr     z30, [x2, #30, MUL VL]            186         ldr     z30, [x2, #30, MUL VL]
187         ldr     z31, [x2, #31, MUL VL]            187         ldr     z31, [x2, #31, MUL VL]
188                                                   188 
189         // Only set a non-zero FFR, test patte    189         // Only set a non-zero FFR, test patterns must be zero since the
190         // syscall should clear it - this lets    190         // syscall should clear it - this lets us handle FA64.
191         ldr     x2, =ffr_in                       191         ldr     x2, =ffr_in
192         ldr     p0, [x2]                          192         ldr     p0, [x2]
193         ldr     x2, [x2, #0]                      193         ldr     x2, [x2, #0]
194         cbz     x2, 1f                            194         cbz     x2, 1f
195         wrffr   p0.b                              195         wrffr   p0.b
196 1:                                                196 1:
197                                                   197 
198         ldr     x2, =p_in                         198         ldr     x2, =p_in
199         ldr     p0, [x2, #0, MUL VL]              199         ldr     p0, [x2, #0, MUL VL]
200         ldr     p1, [x2, #1, MUL VL]              200         ldr     p1, [x2, #1, MUL VL]
201         ldr     p2, [x2, #2, MUL VL]              201         ldr     p2, [x2, #2, MUL VL]
202         ldr     p3, [x2, #3, MUL VL]              202         ldr     p3, [x2, #3, MUL VL]
203         ldr     p4, [x2, #4, MUL VL]              203         ldr     p4, [x2, #4, MUL VL]
204         ldr     p5, [x2, #5, MUL VL]              204         ldr     p5, [x2, #5, MUL VL]
205         ldr     p6, [x2, #6, MUL VL]              205         ldr     p6, [x2, #6, MUL VL]
206         ldr     p7, [x2, #7, MUL VL]              206         ldr     p7, [x2, #7, MUL VL]
207         ldr     p8, [x2, #8, MUL VL]              207         ldr     p8, [x2, #8, MUL VL]
208         ldr     p9, [x2, #9, MUL VL]              208         ldr     p9, [x2, #9, MUL VL]
209         ldr     p10, [x2, #10, MUL VL]            209         ldr     p10, [x2, #10, MUL VL]
210         ldr     p11, [x2, #11, MUL VL]            210         ldr     p11, [x2, #11, MUL VL]
211         ldr     p12, [x2, #12, MUL VL]            211         ldr     p12, [x2, #12, MUL VL]
212         ldr     p13, [x2, #13, MUL VL]            212         ldr     p13, [x2, #13, MUL VL]
213         ldr     p14, [x2, #14, MUL VL]            213         ldr     p14, [x2, #14, MUL VL]
214         ldr     p15, [x2, #15, MUL VL]            214         ldr     p15, [x2, #15, MUL VL]
215 2:                                                215 2:
216                                                   216 
217         // Do the syscall                         217         // Do the syscall
218         svc     #0                                218         svc     #0
219                                                   219 
220         // Save GPRs x8-x30                       220         // Save GPRs x8-x30
221         ldr     x2, =gpr_out                      221         ldr     x2, =gpr_out
222         add     x2, x2, #64                       222         add     x2, x2, #64
223         stp     x8, x9, [x2], #16                 223         stp     x8, x9, [x2], #16
224         stp     x10, x11, [x2], #16               224         stp     x10, x11, [x2], #16
225         stp     x12, x13, [x2], #16               225         stp     x12, x13, [x2], #16
226         stp     x14, x15, [x2], #16               226         stp     x14, x15, [x2], #16
227         stp     x16, x17, [x2], #16               227         stp     x16, x17, [x2], #16
228         stp     x18, x19, [x2], #16               228         stp     x18, x19, [x2], #16
229         stp     x20, x21, [x2], #16               229         stp     x20, x21, [x2], #16
230         stp     x22, x23, [x2], #16               230         stp     x22, x23, [x2], #16
231         stp     x24, x25, [x2], #16               231         stp     x24, x25, [x2], #16
232         stp     x26, x27, [x2], #16               232         stp     x26, x27, [x2], #16
233         stp     x28, x29, [x2], #16               233         stp     x28, x29, [x2], #16
234         str     x30, [x2]                         234         str     x30, [x2]
235                                                   235 
236         // Restore x0 and x1 for feature check    236         // Restore x0 and x1 for feature checks
237         ldp     x0, x1, [sp, #16]                 237         ldp     x0, x1, [sp, #16]
238                                                   238 
239         // Save FPSIMD state                      239         // Save FPSIMD state
240         ldr     x2, =fpr_out                      240         ldr     x2, =fpr_out
241         stp     q0, q1, [x2]                      241         stp     q0, q1, [x2]
242         stp     q2, q3, [x2, #16 * 2]             242         stp     q2, q3, [x2, #16 * 2]
243         stp     q4, q5, [x2, #16 * 4]             243         stp     q4, q5, [x2, #16 * 4]
244         stp     q6, q7, [x2, #16 * 6]             244         stp     q6, q7, [x2, #16 * 6]
245         stp     q8, q9, [x2, #16 * 8]             245         stp     q8, q9, [x2, #16 * 8]
246         stp     q10, q11, [x2, #16 * 10]          246         stp     q10, q11, [x2, #16 * 10]
247         stp     q12, q13, [x2, #16 * 12]          247         stp     q12, q13, [x2, #16 * 12]
248         stp     q14, q15, [x2, #16 * 14]          248         stp     q14, q15, [x2, #16 * 14]
249         stp     q16, q17, [x2, #16 * 16]          249         stp     q16, q17, [x2, #16 * 16]
250         stp     q18, q19, [x2, #16 * 18]          250         stp     q18, q19, [x2, #16 * 18]
251         stp     q20, q21, [x2, #16 * 20]          251         stp     q20, q21, [x2, #16 * 20]
252         stp     q22, q23, [x2, #16 * 22]          252         stp     q22, q23, [x2, #16 * 22]
253         stp     q24, q25, [x2, #16 * 24]          253         stp     q24, q25, [x2, #16 * 24]
254         stp     q26, q27, [x2, #16 * 26]          254         stp     q26, q27, [x2, #16 * 26]
255         stp     q28, q29, [x2, #16 * 28]          255         stp     q28, q29, [x2, #16 * 28]
256         stp     q30, q31, [x2, #16 * 30]          256         stp     q30, q31, [x2, #16 * 30]
257                                                   257 
258         // Save SVCR if we're doing SME           258         // Save SVCR if we're doing SME
259         cbz     x1, 1f                            259         cbz     x1, 1f
260         mrs     x2, S3_3_C4_C2_2                  260         mrs     x2, S3_3_C4_C2_2
261         adrp    x3, svcr_out                      261         adrp    x3, svcr_out
262         str     x2, [x3, :lo12:svcr_out]          262         str     x2, [x3, :lo12:svcr_out]
263 1:                                                263 1:
264                                                   264 
265         // Save ZA if it's enabled - uses x12     265         // Save ZA if it's enabled - uses x12 as scratch due to SME STR
266         tbz     x2, #SVCR_ZA_SHIFT, 1f            266         tbz     x2, #SVCR_ZA_SHIFT, 1f
267         mov     w12, #0                           267         mov     w12, #0
268         ldr     x2, =za_out                       268         ldr     x2, =za_out
269 2:      _str_za 12, 2                             269 2:      _str_za 12, 2
270         add     x2, x2, x1                        270         add     x2, x2, x1
271         add     x12, x12, #1                      271         add     x12, x12, #1
272         cmp     x1, x12                           272         cmp     x1, x12
273         bne     2b                                273         bne     2b
274                                                   274 
275         // ZT0                                    275         // ZT0
276         mrs     x2, S3_0_C0_C4_5        // ID_    276         mrs     x2, S3_0_C0_C4_5        // ID_AA64SMFR0_EL1
277         ubfx    x2, x2, #ID_AA64SMFR0_EL1_SMEv    277         ubfx    x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \
278                         #ID_AA64SMFR0_EL1_SMEv    278                         #ID_AA64SMFR0_EL1_SMEver_WIDTH
279         cbz     x2, 1f                            279         cbz     x2, 1f
280         adrp    x2, zt_out                        280         adrp    x2, zt_out
281         add     x2, x2, :lo12:zt_out              281         add     x2, x2, :lo12:zt_out
282         _str_zt 2                                 282         _str_zt 2
283 1:                                                283 1:
284                                                   284 
285         // Save the SVE state if we have some     285         // Save the SVE state if we have some
286         cbz     x0, 1f                            286         cbz     x0, 1f
287                                                   287 
288         ldr     x2, =z_out                        288         ldr     x2, =z_out
289         str     z0, [x2, #0, MUL VL]              289         str     z0, [x2, #0, MUL VL]
290         str     z1, [x2, #1, MUL VL]              290         str     z1, [x2, #1, MUL VL]
291         str     z2, [x2, #2, MUL VL]              291         str     z2, [x2, #2, MUL VL]
292         str     z3, [x2, #3, MUL VL]              292         str     z3, [x2, #3, MUL VL]
293         str     z4, [x2, #4, MUL VL]              293         str     z4, [x2, #4, MUL VL]
294         str     z5, [x2, #5, MUL VL]              294         str     z5, [x2, #5, MUL VL]
295         str     z6, [x2, #6, MUL VL]              295         str     z6, [x2, #6, MUL VL]
296         str     z7, [x2, #7, MUL VL]              296         str     z7, [x2, #7, MUL VL]
297         str     z8, [x2, #8, MUL VL]              297         str     z8, [x2, #8, MUL VL]
298         str     z9, [x2, #9, MUL VL]              298         str     z9, [x2, #9, MUL VL]
299         str     z10, [x2, #10, MUL VL]            299         str     z10, [x2, #10, MUL VL]
300         str     z11, [x2, #11, MUL VL]            300         str     z11, [x2, #11, MUL VL]
301         str     z12, [x2, #12, MUL VL]            301         str     z12, [x2, #12, MUL VL]
302         str     z13, [x2, #13, MUL VL]            302         str     z13, [x2, #13, MUL VL]
303         str     z14, [x2, #14, MUL VL]            303         str     z14, [x2, #14, MUL VL]
304         str     z15, [x2, #15, MUL VL]            304         str     z15, [x2, #15, MUL VL]
305         str     z16, [x2, #16, MUL VL]            305         str     z16, [x2, #16, MUL VL]
306         str     z17, [x2, #17, MUL VL]            306         str     z17, [x2, #17, MUL VL]
307         str     z18, [x2, #18, MUL VL]            307         str     z18, [x2, #18, MUL VL]
308         str     z19, [x2, #19, MUL VL]            308         str     z19, [x2, #19, MUL VL]
309         str     z20, [x2, #20, MUL VL]            309         str     z20, [x2, #20, MUL VL]
310         str     z21, [x2, #21, MUL VL]            310         str     z21, [x2, #21, MUL VL]
311         str     z22, [x2, #22, MUL VL]            311         str     z22, [x2, #22, MUL VL]
312         str     z23, [x2, #23, MUL VL]            312         str     z23, [x2, #23, MUL VL]
313         str     z24, [x2, #24, MUL VL]            313         str     z24, [x2, #24, MUL VL]
314         str     z25, [x2, #25, MUL VL]            314         str     z25, [x2, #25, MUL VL]
315         str     z26, [x2, #26, MUL VL]            315         str     z26, [x2, #26, MUL VL]
316         str     z27, [x2, #27, MUL VL]            316         str     z27, [x2, #27, MUL VL]
317         str     z28, [x2, #28, MUL VL]            317         str     z28, [x2, #28, MUL VL]
318         str     z29, [x2, #29, MUL VL]            318         str     z29, [x2, #29, MUL VL]
319         str     z30, [x2, #30, MUL VL]            319         str     z30, [x2, #30, MUL VL]
320         str     z31, [x2, #31, MUL VL]            320         str     z31, [x2, #31, MUL VL]
321                                                   321 
322         ldr     x2, =p_out                        322         ldr     x2, =p_out
323         str     p0, [x2, #0, MUL VL]              323         str     p0, [x2, #0, MUL VL]
324         str     p1, [x2, #1, MUL VL]              324         str     p1, [x2, #1, MUL VL]
325         str     p2, [x2, #2, MUL VL]              325         str     p2, [x2, #2, MUL VL]
326         str     p3, [x2, #3, MUL VL]              326         str     p3, [x2, #3, MUL VL]
327         str     p4, [x2, #4, MUL VL]              327         str     p4, [x2, #4, MUL VL]
328         str     p5, [x2, #5, MUL VL]              328         str     p5, [x2, #5, MUL VL]
329         str     p6, [x2, #6, MUL VL]              329         str     p6, [x2, #6, MUL VL]
330         str     p7, [x2, #7, MUL VL]              330         str     p7, [x2, #7, MUL VL]
331         str     p8, [x2, #8, MUL VL]              331         str     p8, [x2, #8, MUL VL]
332         str     p9, [x2, #9, MUL VL]              332         str     p9, [x2, #9, MUL VL]
333         str     p10, [x2, #10, MUL VL]            333         str     p10, [x2, #10, MUL VL]
334         str     p11, [x2, #11, MUL VL]            334         str     p11, [x2, #11, MUL VL]
335         str     p12, [x2, #12, MUL VL]            335         str     p12, [x2, #12, MUL VL]
336         str     p13, [x2, #13, MUL VL]            336         str     p13, [x2, #13, MUL VL]
337         str     p14, [x2, #14, MUL VL]            337         str     p14, [x2, #14, MUL VL]
338         str     p15, [x2, #15, MUL VL]            338         str     p15, [x2, #15, MUL VL]
339                                                   339 
340         // Only save FFR if we wrote a value f    340         // Only save FFR if we wrote a value for SME
341         ldr     x2, =ffr_in                       341         ldr     x2, =ffr_in
342         ldr     x2, [x2, #0]                      342         ldr     x2, [x2, #0]
343         cbz     x2, 1f                            343         cbz     x2, 1f
344         ldr     x2, =ffr_out                      344         ldr     x2, =ffr_out
345         rdffr   p0.b                              345         rdffr   p0.b
346         str     p0, [x2]                          346         str     p0, [x2]
347 1:                                                347 1:
348                                                   348 
349         // Restore callee saved registers x19-    349         // Restore callee saved registers x19-x30
350         ldp     x19, x20, [sp, #32]               350         ldp     x19, x20, [sp, #32]
351         ldp     x21, x22, [sp, #48]               351         ldp     x21, x22, [sp, #48]
352         ldp     x23, x24, [sp, #64]               352         ldp     x23, x24, [sp, #64]
353         ldp     x25, x26, [sp, #80]               353         ldp     x25, x26, [sp, #80]
354         ldp     x27, x28, [sp, #96]               354         ldp     x27, x28, [sp, #96]
355         ldp     x29, x30, [sp], #112              355         ldp     x29, x30, [sp], #112
356                                                   356 
357         // Clear SVCR if we were doing SME so     357         // Clear SVCR if we were doing SME so future tests don't have ZA
358         cbz     x1, 1f                            358         cbz     x1, 1f
359         msr     S3_3_C4_C2_2, xzr                 359         msr     S3_3_C4_C2_2, xzr
360 1:                                                360 1:
361                                                   361 
362         ret                                       362         ret
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php