~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/el2_setup.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2012,2013 - ARM Ltd
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>
  5  */
  6 
  7 #ifndef __ARM_KVM_INIT_H__
  8 #define __ARM_KVM_INIT_H__
  9 
 10 #ifndef __ASSEMBLY__
 11 #error Assembly-only header
 12 #endif
 13 
 14 #include <asm/kvm_arm.h>
 15 #include <asm/ptrace.h>
 16 #include <asm/sysreg.h>
 17 #include <linux/irqchip/arm-gic-v3.h>
 18 
 19 .macro __init_el2_sctlr
 20         mov_q   x0, INIT_SCTLR_EL2_MMU_OFF
 21         msr     sctlr_el2, x0
 22         isb
 23 .endm
 24 
 25 .macro __init_el2_hcrx
 26         mrs     x0, id_aa64mmfr1_el1
 27         ubfx    x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
 28         cbz     x0, .Lskip_hcrx_\@
 29         mov_q   x0, HCRX_HOST_FLAGS
 30         msr_s   SYS_HCRX_EL2, x0
 31 .Lskip_hcrx_\@:
 32 .endm
 33 
 34 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */
 35 .macro __check_hvhe fail, tmp
 36         mrs     \tmp, hcr_el2
 37         and     \tmp, \tmp, #HCR_E2H
 38         cbz     \tmp, \fail
 39 .endm
 40 
 41 /*
 42  * Allow Non-secure EL1 and EL0 to access physical timer and counter.
 43  * This is not necessary for VHE, since the host kernel runs in EL2,
 44  * and EL0 accesses are configured in the later stage of boot process.
 45  * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
 46  * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
 47  * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
 48  * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
 49  * EL2.
 50  */
 51 .macro __init_el2_timers
 52         mov     x0, #3                          // Enable EL1 physical timers
 53         __check_hvhe .LnVHE_\@, x1
 54         lsl     x0, x0, #10
 55 .LnVHE_\@:
 56         msr     cnthctl_el2, x0
 57         msr     cntvoff_el2, xzr                // Clear virtual offset
 58 .endm
 59 
 60 .macro __init_el2_debug
 61         mrs     x1, id_aa64dfr0_el1
 62         ubfx    x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
 63         cmp     x0, #ID_AA64DFR0_EL1_PMUVer_NI
 64         ccmp    x0, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
 65         b.eq    .Lskip_pmu_\@                   // Skip if no PMU present or IMP_DEF
 66         mrs     x0, pmcr_el0                    // Disable debug access traps
 67         ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
 68 .Lskip_pmu_\@:
 69         csel    x2, xzr, x0, eq                 // all PMU counters from EL1
 70 
 71         /* Statistical profiling */
 72         ubfx    x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
 73         cbz     x0, .Lskip_spe_\@               // Skip if SPE not present
 74 
 75         mrs_s   x0, SYS_PMBIDR_EL1              // If SPE available at EL2,
 76         and     x0, x0, #(1 << PMBIDR_EL1_P_SHIFT)
 77         cbnz    x0, .Lskip_spe_el2_\@           // then permit sampling of physical
 78         mov     x0, #(1 << PMSCR_EL2_PCT_SHIFT | \
 79                       1 << PMSCR_EL2_PA_SHIFT)
 80         msr_s   SYS_PMSCR_EL2, x0               // addresses and physical counter
 81 .Lskip_spe_el2_\@:
 82         mov     x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
 83         orr     x2, x2, x0                      // If we don't have VHE, then
 84                                                 // use EL1&0 translation.
 85 
 86 .Lskip_spe_\@:
 87         /* Trace buffer */
 88         ubfx    x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
 89         cbz     x0, .Lskip_trace_\@             // Skip if TraceBuffer is not present
 90 
 91         mrs_s   x0, SYS_TRBIDR_EL1
 92         and     x0, x0, TRBIDR_EL1_P
 93         cbnz    x0, .Lskip_trace_\@             // If TRBE is available at EL2
 94 
 95         mov     x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
 96         orr     x2, x2, x0                      // allow the EL1&0 translation
 97                                                 // to own it.
 98 
 99 .Lskip_trace_\@:
100         msr     mdcr_el2, x2                    // Configure debug traps
101 .endm
102 
103 /* LORegions */
104 .macro __init_el2_lor
105         mrs     x1, id_aa64mmfr1_el1
106         ubfx    x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
107         cbz     x0, .Lskip_lor_\@
108         msr_s   SYS_LORC_EL1, xzr
109 .Lskip_lor_\@:
110 .endm
111 
112 /* Stage-2 translation */
113 .macro __init_el2_stage2
114         msr     vttbr_el2, xzr
115 .endm
116 
117 /* GICv3 system register access */
118 .macro __init_el2_gicv3
119         mrs     x0, id_aa64pfr0_el1
120         ubfx    x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
121         cbz     x0, .Lskip_gicv3_\@
122 
123         mrs_s   x0, SYS_ICC_SRE_EL2
124         orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
125         orr     x0, x0, #ICC_SRE_EL2_ENABLE     // Set ICC_SRE_EL2.Enable==1
126         msr_s   SYS_ICC_SRE_EL2, x0
127         isb                                     // Make sure SRE is now set
128         mrs_s   x0, SYS_ICC_SRE_EL2             // Read SRE back,
129         tbz     x0, #0, .Lskip_gicv3_\@         // and check that it sticks
130         msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICH_HCR_EL2 to defaults
131 .Lskip_gicv3_\@:
132 .endm
133 
134 .macro __init_el2_hstr
135         msr     hstr_el2, xzr                   // Disable CP15 traps to EL2
136 .endm
137 
138 /* Virtual CPU ID registers */
139 .macro __init_el2_nvhe_idregs
140         mrs     x0, midr_el1
141         mrs     x1, mpidr_el1
142         msr     vpidr_el2, x0
143         msr     vmpidr_el2, x1
144 .endm
145 
146 /* Coprocessor traps */
147 .macro __init_el2_cptr
148         __check_hvhe .LnVHE_\@, x1
149         mov     x0, #CPACR_ELx_FPEN
150         msr     cpacr_el1, x0
151         b       .Lskip_set_cptr_\@
152 .LnVHE_\@:
153         mov     x0, #0x33ff
154         msr     cptr_el2, x0                    // Disable copro. traps to EL2
155 .Lskip_set_cptr_\@:
156 .endm
157 
158 /* Disable any fine grained traps */
159 .macro __init_el2_fgt
160         mrs     x1, id_aa64mmfr0_el1
161         ubfx    x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
162         cbz     x1, .Lskip_fgt_\@
163 
164         mov     x0, xzr
165         mrs     x1, id_aa64dfr0_el1
166         ubfx    x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
167         cmp     x1, #3
168         b.lt    .Lset_debug_fgt_\@
169         /* Disable PMSNEVFR_EL1 read and write traps */
170         orr     x0, x0, #(1 << 62)
171 
172 .Lset_debug_fgt_\@:
173         msr_s   SYS_HDFGRTR_EL2, x0
174         msr_s   SYS_HDFGWTR_EL2, x0
175 
176         mov     x0, xzr
177         mrs     x1, id_aa64pfr1_el1
178         ubfx    x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
179         cbz     x1, .Lset_pie_fgt_\@
180 
181         /* Disable nVHE traps of TPIDR2 and SMPRI */
182         orr     x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
183         orr     x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
184 
185 .Lset_pie_fgt_\@:
186         mrs_s   x1, SYS_ID_AA64MMFR3_EL1
187         ubfx    x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
188         cbz     x1, .Lset_fgt_\@
189 
190         /* Disable trapping of PIR_EL1 / PIRE0_EL1 */
191         orr     x0, x0, #HFGxTR_EL2_nPIR_EL1
192         orr     x0, x0, #HFGxTR_EL2_nPIRE0_EL1
193 
194 .Lset_fgt_\@:
195         msr_s   SYS_HFGRTR_EL2, x0
196         msr_s   SYS_HFGWTR_EL2, x0
197         msr_s   SYS_HFGITR_EL2, xzr
198 
199         mrs     x1, id_aa64pfr0_el1             // AMU traps UNDEF without AMU
200         ubfx    x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
201         cbz     x1, .Lskip_fgt_\@
202 
203         msr_s   SYS_HAFGRTR_EL2, xzr
204 .Lskip_fgt_\@:
205 .endm
206 
207 .macro __init_el2_nvhe_prepare_eret
208         mov     x0, #INIT_PSTATE_EL1
209         msr     spsr_el2, x0
210 .endm
211 
212 /**
213  * Initialize EL2 registers to sane values. This should be called early on all
214  * cores that were booted in EL2. Note that everything gets initialised as
215  * if VHE was not available. The kernel context will be upgraded to VHE
216  * if possible later on in the boot process
217  *
218  * Regs: x0, x1 and x2 are clobbered.
219  */
220 .macro init_el2_state
221         __init_el2_sctlr
222         __init_el2_hcrx
223         __init_el2_timers
224         __init_el2_debug
225         __init_el2_lor
226         __init_el2_stage2
227         __init_el2_gicv3
228         __init_el2_hstr
229         __init_el2_nvhe_idregs
230         __init_el2_cptr
231         __init_el2_fgt
232 .endm
233 
234 #ifndef __KVM_NVHE_HYPERVISOR__
235 // This will clobber tmp1 and tmp2, and expect tmp1 to contain
236 // the id register value as read from the HW
237 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2
238         ubfx    \tmp1, \tmp1, #\fld, #\width
239         cbz     \tmp1, \fail
240 
241         adr_l   \tmp1, \idreg\()_override
242         ldr     \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET]
243         ldr     \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET]
244         ubfx    \tmp2, \tmp2, #\fld, #\width
245         ubfx    \tmp1, \tmp1, #\fld, #\width
246         cmp     \tmp1, xzr
247         and     \tmp2, \tmp2, \tmp1
248         csinv   \tmp2, \tmp2, xzr, ne
249         cbnz    \tmp2, \pass
250         b       \fail
251 .endm
252 
253 // This will clobber tmp1 and tmp2
254 .macro check_override idreg, fld, pass, fail, tmp1, tmp2
255         mrs     \tmp1, \idreg\()_el1
256         __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2
257 .endm
258 #else
259 // This will clobber tmp
260 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore
261         ldr_l   \tmp, \idreg\()_el1_sys_val
262         ubfx    \tmp, \tmp, #\fld, #\width
263         cbnz    \tmp, \pass
264         b       \fail
265 .endm
266 
267 .macro check_override idreg, fld, pass, fail, tmp, ignore
268         __check_override \idreg \fld 4 \pass \fail \tmp \ignore
269 .endm
270 #endif
271 
272 .macro finalise_el2_state
273         check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
274 
275 .Linit_sve_\@:  /* SVE register access */
276         __check_hvhe .Lcptr_nvhe_\@, x1
277 
278         // (h)VHE case
279         mrs     x0, cpacr_el1                   // Disable SVE traps
280         orr     x0, x0, #CPACR_ELx_ZEN
281         msr     cpacr_el1, x0
282         b       .Lskip_set_cptr_\@
283 
284 .Lcptr_nvhe_\@: // nVHE case
285         mrs     x0, cptr_el2                    // Disable SVE traps
286         bic     x0, x0, #CPTR_EL2_TZ
287         msr     cptr_el2, x0
288 .Lskip_set_cptr_\@:
289         isb
290         mov     x1, #ZCR_ELx_LEN_MASK           // SVE: Enable full vector
291         msr_s   SYS_ZCR_EL2, x1                 // length for EL1.
292 
293 .Lskip_sve_\@:
294         check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2
295 
296 .Linit_sme_\@:  /* SME register access and priority mapping */
297         __check_hvhe .Lcptr_nvhe_sme_\@, x1
298 
299         // (h)VHE case
300         mrs     x0, cpacr_el1                   // Disable SME traps
301         orr     x0, x0, #CPACR_ELx_SMEN
302         msr     cpacr_el1, x0
303         b       .Lskip_set_cptr_sme_\@
304 
305 .Lcptr_nvhe_sme_\@: // nVHE case
306         mrs     x0, cptr_el2                    // Disable SME traps
307         bic     x0, x0, #CPTR_EL2_TSM
308         msr     cptr_el2, x0
309 .Lskip_set_cptr_sme_\@:
310         isb
311 
312         mrs     x1, sctlr_el2
313         orr     x1, x1, #SCTLR_ELx_ENTP2        // Disable TPIDR2 traps
314         msr     sctlr_el2, x1
315         isb
316 
317         mov     x0, #0                          // SMCR controls
318 
319         // Full FP in SM?
320         mrs_s   x1, SYS_ID_AA64SMFR0_EL1
321         __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2
322 
323 .Linit_sme_fa64_\@:
324         orr     x0, x0, SMCR_ELx_FA64_MASK
325 .Lskip_sme_fa64_\@:
326 
327         // ZT0 available?
328         mrs_s   x1, SYS_ID_AA64SMFR0_EL1
329         __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2
330 .Linit_sme_zt0_\@:
331         orr     x0, x0, SMCR_ELx_EZT0_MASK
332 .Lskip_sme_zt0_\@:
333 
334         orr     x0, x0, #SMCR_ELx_LEN_MASK      // Enable full SME vector
335         msr_s   SYS_SMCR_EL2, x0                // length for EL1.
336 
337         mrs_s   x1, SYS_SMIDR_EL1               // Priority mapping supported?
338         ubfx    x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
339         cbz     x1, .Lskip_sme_\@
340 
341         msr_s   SYS_SMPRIMAP_EL2, xzr           // Make all priorities equal
342 .Lskip_sme_\@:
343 .endm
344 
345 #endif /* __ARM_KVM_INIT_H__ */
346 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php