~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/proc-v7-bugs.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/arm-smccc.h>
  3 #include <linux/kernel.h>
  4 #include <linux/smp.h>
  5 
  6 #include <asm/cp15.h>
  7 #include <asm/cputype.h>
  8 #include <asm/proc-fns.h>
  9 #include <asm/spectre.h>
 10 #include <asm/system_misc.h>
 11 
 12 #ifdef CONFIG_ARM_PSCI
 13 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
 14 {
 15         struct arm_smccc_res res;
 16 
 17         arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 18                              ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 19 
 20         switch ((int)res.a0) {
 21         case SMCCC_RET_SUCCESS:
 22                 return SPECTRE_MITIGATED;
 23 
 24         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
 25                 return SPECTRE_UNAFFECTED;
 26 
 27         default:
 28                 return SPECTRE_VULNERABLE;
 29         }
 30 }
 31 #else
 32 static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
 33 {
 34         return SPECTRE_VULNERABLE;
 35 }
 36 #endif
 37 
 38 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 39 DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
 40 
 41 extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 42 extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 43 extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 44 extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 45 
 46 static void harden_branch_predictor_bpiall(void)
 47 {
 48         write_sysreg(0, BPIALL);
 49 }
 50 
 51 static void harden_branch_predictor_iciallu(void)
 52 {
 53         write_sysreg(0, ICIALLU);
 54 }
 55 
 56 static void __maybe_unused call_smc_arch_workaround_1(void)
 57 {
 58         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 59 }
 60 
 61 static void __maybe_unused call_hvc_arch_workaround_1(void)
 62 {
 63         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 64 }
 65 
 66 static unsigned int spectre_v2_install_workaround(unsigned int method)
 67 {
 68         const char *spectre_v2_method = NULL;
 69         int cpu = smp_processor_id();
 70 
 71         if (per_cpu(harden_branch_predictor_fn, cpu))
 72                 return SPECTRE_MITIGATED;
 73 
 74         switch (method) {
 75         case SPECTRE_V2_METHOD_BPIALL:
 76                 per_cpu(harden_branch_predictor_fn, cpu) =
 77                         harden_branch_predictor_bpiall;
 78                 spectre_v2_method = "BPIALL";
 79                 break;
 80 
 81         case SPECTRE_V2_METHOD_ICIALLU:
 82                 per_cpu(harden_branch_predictor_fn, cpu) =
 83                         harden_branch_predictor_iciallu;
 84                 spectre_v2_method = "ICIALLU";
 85                 break;
 86 
 87         case SPECTRE_V2_METHOD_HVC:
 88                 per_cpu(harden_branch_predictor_fn, cpu) =
 89                         call_hvc_arch_workaround_1;
 90                 cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
 91                 spectre_v2_method = "hypervisor";
 92                 break;
 93 
 94         case SPECTRE_V2_METHOD_SMC:
 95                 per_cpu(harden_branch_predictor_fn, cpu) =
 96                         call_smc_arch_workaround_1;
 97                 cpu_do_switch_mm = cpu_v7_smc_switch_mm;
 98                 spectre_v2_method = "firmware";
 99                 break;
100         }
101 
102         if (spectre_v2_method)
103                 pr_info("CPU%u: Spectre v2: using %s workaround\n",
104                         smp_processor_id(), spectre_v2_method);
105 
106         return SPECTRE_MITIGATED;
107 }
108 #else
109 static unsigned int spectre_v2_install_workaround(unsigned int method)
110 {
111         pr_info_once("Spectre V2: workarounds disabled by configuration\n");
112 
113         return SPECTRE_VULNERABLE;
114 }
115 #endif
116 
117 static void cpu_v7_spectre_v2_init(void)
118 {
119         unsigned int state, method = 0;
120 
121         switch (read_cpuid_part()) {
122         case ARM_CPU_PART_CORTEX_A8:
123         case ARM_CPU_PART_CORTEX_A9:
124         case ARM_CPU_PART_CORTEX_A12:
125         case ARM_CPU_PART_CORTEX_A17:
126         case ARM_CPU_PART_CORTEX_A73:
127         case ARM_CPU_PART_CORTEX_A75:
128                 state = SPECTRE_MITIGATED;
129                 method = SPECTRE_V2_METHOD_BPIALL;
130                 break;
131 
132         case ARM_CPU_PART_CORTEX_A15:
133         case ARM_CPU_PART_BRAHMA_B15:
134                 state = SPECTRE_MITIGATED;
135                 method = SPECTRE_V2_METHOD_ICIALLU;
136                 break;
137 
138         case ARM_CPU_PART_BRAHMA_B53:
139                 /* Requires no workaround */
140                 state = SPECTRE_UNAFFECTED;
141                 break;
142 
143         default:
144                 /* Other ARM CPUs require no workaround */
145                 if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
146                         state = SPECTRE_UNAFFECTED;
147                         break;
148                 }
149 
150                 fallthrough;
151 
152         /* Cortex A57/A72 require firmware workaround */
153         case ARM_CPU_PART_CORTEX_A57:
154         case ARM_CPU_PART_CORTEX_A72:
155                 state = spectre_v2_get_cpu_fw_mitigation_state();
156                 if (state != SPECTRE_MITIGATED)
157                         break;
158 
159                 switch (arm_smccc_1_1_get_conduit()) {
160                 case SMCCC_CONDUIT_HVC:
161                         method = SPECTRE_V2_METHOD_HVC;
162                         break;
163 
164                 case SMCCC_CONDUIT_SMC:
165                         method = SPECTRE_V2_METHOD_SMC;
166                         break;
167 
168                 default:
169                         state = SPECTRE_VULNERABLE;
170                         break;
171                 }
172         }
173 
174         if (state == SPECTRE_MITIGATED)
175                 state = spectre_v2_install_workaround(method);
176 
177         spectre_v2_update_state(state, method);
178 }
179 
180 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
181 static int spectre_bhb_method;
182 
183 static const char *spectre_bhb_method_name(int method)
184 {
185         switch (method) {
186         case SPECTRE_V2_METHOD_LOOP8:
187                 return "loop";
188 
189         case SPECTRE_V2_METHOD_BPIALL:
190                 return "BPIALL";
191 
192         default:
193                 return "unknown";
194         }
195 }
196 
197 static int spectre_bhb_install_workaround(int method)
198 {
199         if (spectre_bhb_method != method) {
200                 if (spectre_bhb_method) {
201                         pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
202                                smp_processor_id());
203 
204                         return SPECTRE_VULNERABLE;
205                 }
206 
207                 if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
208                         return SPECTRE_VULNERABLE;
209 
210                 spectre_bhb_method = method;
211 
212                 pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
213                         smp_processor_id(), spectre_bhb_method_name(method));
214         }
215 
216         return SPECTRE_MITIGATED;
217 }
218 #else
219 static int spectre_bhb_install_workaround(int method)
220 {
221         return SPECTRE_VULNERABLE;
222 }
223 #endif
224 
225 static void cpu_v7_spectre_bhb_init(void)
226 {
227         unsigned int state, method = 0;
228 
229         switch (read_cpuid_part()) {
230         case ARM_CPU_PART_CORTEX_A15:
231         case ARM_CPU_PART_BRAHMA_B15:
232         case ARM_CPU_PART_CORTEX_A57:
233         case ARM_CPU_PART_CORTEX_A72:
234                 state = SPECTRE_MITIGATED;
235                 method = SPECTRE_V2_METHOD_LOOP8;
236                 break;
237 
238         case ARM_CPU_PART_CORTEX_A73:
239         case ARM_CPU_PART_CORTEX_A75:
240                 state = SPECTRE_MITIGATED;
241                 method = SPECTRE_V2_METHOD_BPIALL;
242                 break;
243 
244         default:
245                 state = SPECTRE_UNAFFECTED;
246                 break;
247         }
248 
249         if (state == SPECTRE_MITIGATED)
250                 state = spectre_bhb_install_workaround(method);
251 
252         spectre_v2_update_state(state, method);
253 }
254 
255 static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
256                                                   u32 mask, const char *msg)
257 {
258         u32 aux_cr;
259 
260         asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
261 
262         if ((aux_cr & mask) != mask) {
263                 if (!*warned)
264                         pr_err("CPU%u: %s", smp_processor_id(), msg);
265                 *warned = true;
266                 return false;
267         }
268         return true;
269 }
270 
271 static DEFINE_PER_CPU(bool, spectre_warned);
272 
273 static bool check_spectre_auxcr(bool *warned, u32 bit)
274 {
275         return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
276                 cpu_v7_check_auxcr_set(warned, bit,
277                                        "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
278 }
279 
280 void cpu_v7_ca8_ibe(void)
281 {
282         if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
283                 cpu_v7_spectre_v2_init();
284 }
285 
286 void cpu_v7_ca15_ibe(void)
287 {
288         if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
289                 cpu_v7_spectre_v2_init();
290         cpu_v7_spectre_bhb_init();
291 }
292 
293 void cpu_v7_bugs_init(void)
294 {
295         cpu_v7_spectre_v2_init();
296         cpu_v7_spectre_bhb_init();
297 }
298 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php