~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/mce/core.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Machine check handler.
  4  *
  5  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  6  * Rest from unknown author(s).
  7  * 2004 Andi Kleen. Rewrote most of it.
  8  * Copyright 2008 Intel Corporation
  9  * Author: Andi Kleen
 10  */
 11 
 12 #include <linux/thread_info.h>
 13 #include <linux/capability.h>
 14 #include <linux/miscdevice.h>
 15 #include <linux/ratelimit.h>
 16 #include <linux/rcupdate.h>
 17 #include <linux/kobject.h>
 18 #include <linux/uaccess.h>
 19 #include <linux/kdebug.h>
 20 #include <linux/kernel.h>
 21 #include <linux/percpu.h>
 22 #include <linux/string.h>
 23 #include <linux/device.h>
 24 #include <linux/syscore_ops.h>
 25 #include <linux/delay.h>
 26 #include <linux/ctype.h>
 27 #include <linux/sched.h>
 28 #include <linux/sysfs.h>
 29 #include <linux/types.h>
 30 #include <linux/slab.h>
 31 #include <linux/init.h>
 32 #include <linux/kmod.h>
 33 #include <linux/poll.h>
 34 #include <linux/nmi.h>
 35 #include <linux/cpu.h>
 36 #include <linux/ras.h>
 37 #include <linux/smp.h>
 38 #include <linux/fs.h>
 39 #include <linux/mm.h>
 40 #include <linux/debugfs.h>
 41 #include <linux/irq_work.h>
 42 #include <linux/export.h>
 43 #include <linux/set_memory.h>
 44 #include <linux/sync_core.h>
 45 #include <linux/task_work.h>
 46 #include <linux/hardirq.h>
 47 #include <linux/kexec.h>
 48 
 49 #include <asm/fred.h>
 50 #include <asm/cpu_device_id.h>
 51 #include <asm/processor.h>
 52 #include <asm/traps.h>
 53 #include <asm/tlbflush.h>
 54 #include <asm/mce.h>
 55 #include <asm/msr.h>
 56 #include <asm/reboot.h>
 57 #include <asm/tdx.h>
 58 
 59 #include "internal.h"
 60 
 61 /* sysfs synchronization */
 62 static DEFINE_MUTEX(mce_sysfs_mutex);
 63 
 64 #define CREATE_TRACE_POINTS
 65 #include <trace/events/mce.h>
 66 
 67 #define SPINUNIT                100     /* 100ns */
 68 
 69 DEFINE_PER_CPU(unsigned, mce_exception_count);
 70 
 71 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
 72 
 73 DEFINE_PER_CPU_READ_MOSTLY(struct mce_bank[MAX_NR_BANKS], mce_banks_array);
 74 
 75 #define ATTR_LEN               16
 76 /* One object for each MCE bank, shared by all CPUs */
 77 struct mce_bank_dev {
 78         struct device_attribute attr;                   /* device attribute */
 79         char                    attrname[ATTR_LEN];     /* attribute name */
 80         u8                      bank;                   /* bank number */
 81 };
 82 static struct mce_bank_dev mce_bank_devs[MAX_NR_BANKS];
 83 
 84 struct mce_vendor_flags mce_flags __read_mostly;
 85 
 86 struct mca_config mca_cfg __read_mostly = {
 87         .bootlog  = -1,
 88         .monarch_timeout = -1
 89 };
 90 
 91 static DEFINE_PER_CPU(struct mce, mces_seen);
 92 static unsigned long mce_need_notify;
 93 
 94 /*
 95  * MCA banks polled by the period polling timer for corrected events.
 96  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
 97  */
 98 DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
 99         [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
100 };
101 
102 /*
103  * MCA banks controlled through firmware first for corrected errors.
104  * This is a global list of banks for which we won't enable CMCI and we
105  * won't poll. Firmware controls these banks and is responsible for
106  * reporting corrected errors through GHES. Uncorrected/recoverable
107  * errors are still notified through a machine check.
108  */
109 mce_banks_t mce_banks_ce_disabled;
110 
111 static struct work_struct mce_work;
112 static struct irq_work mce_irq_work;
113 
114 /*
115  * CPU/chipset specific EDAC code can register a notifier call here to print
116  * MCE errors in a human-readable form.
117  */
118 BLOCKING_NOTIFIER_HEAD(x86_mce_decoder_chain);
119 
120 /* Do initial initialization of a struct mce */
121 void mce_setup(struct mce *m)
122 {
123         memset(m, 0, sizeof(struct mce));
124         m->cpu = m->extcpu = smp_processor_id();
125         /* need the internal __ version to avoid deadlocks */
126         m->time = __ktime_get_real_seconds();
127         m->cpuvendor = boot_cpu_data.x86_vendor;
128         m->cpuid = cpuid_eax(1);
129         m->socketid = cpu_data(m->extcpu).topo.pkg_id;
130         m->apicid = cpu_data(m->extcpu).topo.initial_apicid;
131         m->mcgcap = __rdmsr(MSR_IA32_MCG_CAP);
132         m->ppin = cpu_data(m->extcpu).ppin;
133         m->microcode = boot_cpu_data.microcode;
134 }
135 
136 DEFINE_PER_CPU(struct mce, injectm);
137 EXPORT_PER_CPU_SYMBOL_GPL(injectm);
138 
139 void mce_log(struct mce *m)
140 {
141         if (!mce_gen_pool_add(m))
142                 irq_work_queue(&mce_irq_work);
143 }
144 EXPORT_SYMBOL_GPL(mce_log);
145 
146 void mce_register_decode_chain(struct notifier_block *nb)
147 {
148         if (WARN_ON(nb->priority < MCE_PRIO_LOWEST ||
149                     nb->priority > MCE_PRIO_HIGHEST))
150                 return;
151 
152         blocking_notifier_chain_register(&x86_mce_decoder_chain, nb);
153 }
154 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
155 
156 void mce_unregister_decode_chain(struct notifier_block *nb)
157 {
158         blocking_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
159 }
160 EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
161 
162 static void __print_mce(struct mce *m)
163 {
164         pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
165                  m->extcpu,
166                  (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
167                  m->mcgstatus, m->bank, m->status);
168 
169         if (m->ip) {
170                 pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
171                         !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
172                         m->cs, m->ip);
173 
174                 if (m->cs == __KERNEL_CS)
175                         pr_cont("{%pS}", (void *)(unsigned long)m->ip);
176                 pr_cont("\n");
177         }
178 
179         pr_emerg(HW_ERR "TSC %llx ", m->tsc);
180         if (m->addr)
181                 pr_cont("ADDR %llx ", m->addr);
182         if (m->misc)
183                 pr_cont("MISC %llx ", m->misc);
184         if (m->ppin)
185                 pr_cont("PPIN %llx ", m->ppin);
186 
187         if (mce_flags.smca) {
188                 if (m->synd)
189                         pr_cont("SYND %llx ", m->synd);
190                 if (m->ipid)
191                         pr_cont("IPID %llx ", m->ipid);
192         }
193 
194         pr_cont("\n");
195 
196         /*
197          * Note this output is parsed by external tools and old fields
198          * should not be changed.
199          */
200         pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
201                 m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
202                 m->microcode);
203 }
204 
205 static void print_mce(struct mce *m)
206 {
207         __print_mce(m);
208 
209         if (m->cpuvendor != X86_VENDOR_AMD && m->cpuvendor != X86_VENDOR_HYGON)
210                 pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
211 }
212 
213 #define PANIC_TIMEOUT 5 /* 5 seconds */
214 
215 static atomic_t mce_panicked;
216 
217 static int fake_panic;
218 static atomic_t mce_fake_panicked;
219 
220 /* Panic in progress. Enable interrupts and wait for final IPI */
221 static void wait_for_panic(void)
222 {
223         long timeout = PANIC_TIMEOUT*USEC_PER_SEC;
224 
225         preempt_disable();
226         local_irq_enable();
227         while (timeout-- > 0)
228                 udelay(1);
229         if (panic_timeout == 0)
230                 panic_timeout = mca_cfg.panic_timeout;
231         panic("Panicing machine check CPU died");
232 }
233 
234 static const char *mce_dump_aux_info(struct mce *m)
235 {
236         if (boot_cpu_has_bug(X86_BUG_TDX_PW_MCE))
237                 return tdx_dump_mce_info(m);
238 
239         return NULL;
240 }
241 
242 static noinstr void mce_panic(const char *msg, struct mce *final, char *exp)
243 {
244         struct llist_node *pending;
245         struct mce_evt_llist *l;
246         int apei_err = 0;
247         const char *memmsg;
248 
249         /*
250          * Allow instrumentation around external facilities usage. Not that it
251          * matters a whole lot since the machine is going to panic anyway.
252          */
253         instrumentation_begin();
254 
255         if (!fake_panic) {
256                 /*
257                  * Make sure only one CPU runs in machine check panic
258                  */
259                 if (atomic_inc_return(&mce_panicked) > 1)
260                         wait_for_panic();
261                 barrier();
262 
263                 bust_spinlocks(1);
264                 console_verbose();
265         } else {
266                 /* Don't log too much for fake panic */
267                 if (atomic_inc_return(&mce_fake_panicked) > 1)
268                         goto out;
269         }
270         pending = mce_gen_pool_prepare_records();
271         /* First print corrected ones that are still unlogged */
272         llist_for_each_entry(l, pending, llnode) {
273                 struct mce *m = &l->mce;
274                 if (!(m->status & MCI_STATUS_UC)) {
275                         print_mce(m);
276                         if (!apei_err)
277                                 apei_err = apei_write_mce(m);
278                 }
279         }
280         /* Now print uncorrected but with the final one last */
281         llist_for_each_entry(l, pending, llnode) {
282                 struct mce *m = &l->mce;
283                 if (!(m->status & MCI_STATUS_UC))
284                         continue;
285                 if (!final || mce_cmp(m, final)) {
286                         print_mce(m);
287                         if (!apei_err)
288                                 apei_err = apei_write_mce(m);
289                 }
290         }
291         if (final) {
292                 print_mce(final);
293                 if (!apei_err)
294                         apei_err = apei_write_mce(final);
295         }
296         if (exp)
297                 pr_emerg(HW_ERR "Machine check: %s\n", exp);
298 
299         memmsg = mce_dump_aux_info(final);
300         if (memmsg)
301                 pr_emerg(HW_ERR "Machine check: %s\n", memmsg);
302 
303         if (!fake_panic) {
304                 if (panic_timeout == 0)
305                         panic_timeout = mca_cfg.panic_timeout;
306 
307                 /*
308                  * Kdump skips the poisoned page in order to avoid
309                  * touching the error bits again. Poison the page even
310                  * if the error is fatal and the machine is about to
311                  * panic.
312                  */
313                 if (kexec_crash_loaded()) {
314                         if (final && (final->status & MCI_STATUS_ADDRV)) {
315                                 struct page *p;
316                                 p = pfn_to_online_page(final->addr >> PAGE_SHIFT);
317                                 if (p)
318                                         SetPageHWPoison(p);
319                         }
320                 }
321                 panic(msg);
322         } else
323                 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
324 
325 out:
326         instrumentation_end();
327 }
328 
329 /* Support code for software error injection */
330 
331 static int msr_to_offset(u32 msr)
332 {
333         unsigned bank = __this_cpu_read(injectm.bank);
334 
335         if (msr == mca_cfg.rip_msr)
336                 return offsetof(struct mce, ip);
337         if (msr == mca_msr_reg(bank, MCA_STATUS))
338                 return offsetof(struct mce, status);
339         if (msr == mca_msr_reg(bank, MCA_ADDR))
340                 return offsetof(struct mce, addr);
341         if (msr == mca_msr_reg(bank, MCA_MISC))
342                 return offsetof(struct mce, misc);
343         if (msr == MSR_IA32_MCG_STATUS)
344                 return offsetof(struct mce, mcgstatus);
345         return -1;
346 }
347 
348 void ex_handler_msr_mce(struct pt_regs *regs, bool wrmsr)
349 {
350         if (wrmsr) {
351                 pr_emerg("MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
352                          (unsigned int)regs->cx, (unsigned int)regs->dx, (unsigned int)regs->ax,
353                          regs->ip, (void *)regs->ip);
354         } else {
355                 pr_emerg("MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
356                          (unsigned int)regs->cx, regs->ip, (void *)regs->ip);
357         }
358 
359         show_stack_regs(regs);
360 
361         panic("MCA architectural violation!\n");
362 
363         while (true)
364                 cpu_relax();
365 }
366 
367 /* MSR access wrappers used for error injection */
368 noinstr u64 mce_rdmsrl(u32 msr)
369 {
370         DECLARE_ARGS(val, low, high);
371 
372         if (__this_cpu_read(injectm.finished)) {
373                 int offset;
374                 u64 ret;
375 
376                 instrumentation_begin();
377 
378                 offset = msr_to_offset(msr);
379                 if (offset < 0)
380                         ret = 0;
381                 else
382                         ret = *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
383 
384                 instrumentation_end();
385 
386                 return ret;
387         }
388 
389         /*
390          * RDMSR on MCA MSRs should not fault. If they do, this is very much an
391          * architectural violation and needs to be reported to hw vendor. Panic
392          * the box to not allow any further progress.
393          */
394         asm volatile("1: rdmsr\n"
395                      "2:\n"
396                      _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR_IN_MCE)
397                      : EAX_EDX_RET(val, low, high) : "c" (msr));
398 
399 
400         return EAX_EDX_VAL(val, low, high);
401 }
402 
403 static noinstr void mce_wrmsrl(u32 msr, u64 v)
404 {
405         u32 low, high;
406 
407         if (__this_cpu_read(injectm.finished)) {
408                 int offset;
409 
410                 instrumentation_begin();
411 
412                 offset = msr_to_offset(msr);
413                 if (offset >= 0)
414                         *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
415 
416                 instrumentation_end();
417 
418                 return;
419         }
420 
421         low  = (u32)v;
422         high = (u32)(v >> 32);
423 
424         /* See comment in mce_rdmsrl() */
425         asm volatile("1: wrmsr\n"
426                      "2:\n"
427                      _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR_IN_MCE)
428                      : : "c" (msr), "a"(low), "d" (high) : "memory");
429 }
430 
431 /*
432  * Collect all global (w.r.t. this processor) status about this machine
433  * check into our "mce" struct so that we can use it later to assess
434  * the severity of the problem as we read per-bank specific details.
435  */
436 static noinstr void mce_gather_info(struct mce *m, struct pt_regs *regs)
437 {
438         /*
439          * Enable instrumentation around mce_setup() which calls external
440          * facilities.
441          */
442         instrumentation_begin();
443         mce_setup(m);
444         instrumentation_end();
445 
446         m->mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
447         if (regs) {
448                 /*
449                  * Get the address of the instruction at the time of
450                  * the machine check error.
451                  */
452                 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
453                         m->ip = regs->ip;
454                         m->cs = regs->cs;
455 
456                         /*
457                          * When in VM86 mode make the cs look like ring 3
458                          * always. This is a lie, but it's better than passing
459                          * the additional vm86 bit around everywhere.
460                          */
461                         if (v8086_mode(regs))
462                                 m->cs |= 3;
463                 }
464                 /* Use accurate RIP reporting if available. */
465                 if (mca_cfg.rip_msr)
466                         m->ip = mce_rdmsrl(mca_cfg.rip_msr);
467         }
468 }
469 
470 int mce_available(struct cpuinfo_x86 *c)
471 {
472         if (mca_cfg.disabled)
473                 return 0;
474         return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
475 }
476 
477 static void mce_schedule_work(void)
478 {
479         if (!mce_gen_pool_empty())
480                 schedule_work(&mce_work);
481 }
482 
483 static void mce_irq_work_cb(struct irq_work *entry)
484 {
485         mce_schedule_work();
486 }
487 
488 bool mce_usable_address(struct mce *m)
489 {
490         if (!(m->status & MCI_STATUS_ADDRV))
491                 return false;
492 
493         switch (m->cpuvendor) {
494         case X86_VENDOR_AMD:
495                 return amd_mce_usable_address(m);
496 
497         case X86_VENDOR_INTEL:
498         case X86_VENDOR_ZHAOXIN:
499                 return intel_mce_usable_address(m);
500 
501         default:
502                 return true;
503         }
504 }
505 EXPORT_SYMBOL_GPL(mce_usable_address);
506 
507 bool mce_is_memory_error(struct mce *m)
508 {
509         switch (m->cpuvendor) {
510         case X86_VENDOR_AMD:
511         case X86_VENDOR_HYGON:
512                 return amd_mce_is_memory_error(m);
513 
514         case X86_VENDOR_INTEL:
515         case X86_VENDOR_ZHAOXIN:
516                 /*
517                  * Intel SDM Volume 3B - 15.9.2 Compound Error Codes
518                  *
519                  * Bit 7 of the MCACOD field of IA32_MCi_STATUS is used for
520                  * indicating a memory error. Bit 8 is used for indicating a
521                  * cache hierarchy error. The combination of bit 2 and bit 3
522                  * is used for indicating a `generic' cache hierarchy error
523                  * But we can't just blindly check the above bits, because if
524                  * bit 11 is set, then it is a bus/interconnect error - and
525                  * either way the above bits just gives more detail on what
526                  * bus/interconnect error happened. Note that bit 12 can be
527                  * ignored, as it's the "filter" bit.
528                  */
529                 return (m->status & 0xef80) == BIT(7) ||
530                        (m->status & 0xef00) == BIT(8) ||
531                        (m->status & 0xeffc) == 0xc;
532 
533         default:
534                 return false;
535         }
536 }
537 EXPORT_SYMBOL_GPL(mce_is_memory_error);
538 
539 static bool whole_page(struct mce *m)
540 {
541         if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV))
542                 return true;
543 
544         return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT;
545 }
546 
547 bool mce_is_correctable(struct mce *m)
548 {
549         if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
550                 return false;
551 
552         if (m->cpuvendor == X86_VENDOR_HYGON && m->status & MCI_STATUS_DEFERRED)
553                 return false;
554 
555         if (m->status & MCI_STATUS_UC)
556                 return false;
557 
558         return true;
559 }
560 EXPORT_SYMBOL_GPL(mce_is_correctable);
561 
562 static int mce_early_notifier(struct notifier_block *nb, unsigned long val,
563                               void *data)
564 {
565         struct mce *m = (struct mce *)data;
566 
567         if (!m)
568                 return NOTIFY_DONE;
569 
570         /* Emit the trace record: */
571         trace_mce_record(m);
572 
573         set_bit(0, &mce_need_notify);
574 
575         mce_notify_irq();
576 
577         return NOTIFY_DONE;
578 }
579 
580 static struct notifier_block early_nb = {
581         .notifier_call  = mce_early_notifier,
582         .priority       = MCE_PRIO_EARLY,
583 };
584 
585 static int uc_decode_notifier(struct notifier_block *nb, unsigned long val,
586                               void *data)
587 {
588         struct mce *mce = (struct mce *)data;
589         unsigned long pfn;
590 
591         if (!mce || !mce_usable_address(mce))
592                 return NOTIFY_DONE;
593 
594         if (mce->severity != MCE_AO_SEVERITY &&
595             mce->severity != MCE_DEFERRED_SEVERITY)
596                 return NOTIFY_DONE;
597 
598         pfn = (mce->addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
599         if (!memory_failure(pfn, 0)) {
600                 set_mce_nospec(pfn);
601                 mce->kflags |= MCE_HANDLED_UC;
602         }
603 
604         return NOTIFY_OK;
605 }
606 
607 static struct notifier_block mce_uc_nb = {
608         .notifier_call  = uc_decode_notifier,
609         .priority       = MCE_PRIO_UC,
610 };
611 
612 static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
613                                 void *data)
614 {
615         struct mce *m = (struct mce *)data;
616 
617         if (!m)
618                 return NOTIFY_DONE;
619 
620         if (mca_cfg.print_all || !m->kflags)
621                 __print_mce(m);
622 
623         return NOTIFY_DONE;
624 }
625 
626 static struct notifier_block mce_default_nb = {
627         .notifier_call  = mce_default_notifier,
628         /* lowest prio, we want it to run last. */
629         .priority       = MCE_PRIO_LOWEST,
630 };
631 
632 /*
633  * Read ADDR and MISC registers.
634  */
635 static noinstr void mce_read_aux(struct mce *m, int i)
636 {
637         if (m->status & MCI_STATUS_MISCV)
638                 m->misc = mce_rdmsrl(mca_msr_reg(i, MCA_MISC));
639 
640         if (m->status & MCI_STATUS_ADDRV) {
641                 m->addr = mce_rdmsrl(mca_msr_reg(i, MCA_ADDR));
642 
643                 /*
644                  * Mask the reported address by the reported granularity.
645                  */
646                 if (mca_cfg.ser && (m->status & MCI_STATUS_MISCV)) {
647                         u8 shift = MCI_MISC_ADDR_LSB(m->misc);
648                         m->addr >>= shift;
649                         m->addr <<= shift;
650                 }
651 
652                 smca_extract_err_addr(m);
653         }
654 
655         if (mce_flags.smca) {
656                 m->ipid = mce_rdmsrl(MSR_AMD64_SMCA_MCx_IPID(i));
657 
658                 if (m->status & MCI_STATUS_SYNDV)
659                         m->synd = mce_rdmsrl(MSR_AMD64_SMCA_MCx_SYND(i));
660         }
661 }
662 
663 DEFINE_PER_CPU(unsigned, mce_poll_count);
664 
665 /*
666  * Poll for corrected events or events that happened before reset.
667  * Those are just logged through /dev/mcelog.
668  *
669  * This is executed in standard interrupt context.
670  *
671  * Note: spec recommends to panic for fatal unsignalled
672  * errors here. However this would be quite problematic --
673  * we would need to reimplement the Monarch handling and
674  * it would mess up the exclusion between exception handler
675  * and poll handler -- * so we skip this for now.
676  * These cases should not happen anyways, or only when the CPU
677  * is already totally * confused. In this case it's likely it will
678  * not fully execute the machine check handler either.
679  */
680 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
681 {
682         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
683         struct mce m;
684         int i;
685 
686         this_cpu_inc(mce_poll_count);
687 
688         mce_gather_info(&m, NULL);
689 
690         if (flags & MCP_TIMESTAMP)
691                 m.tsc = rdtsc();
692 
693         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
694                 if (!mce_banks[i].ctl || !test_bit(i, *b))
695                         continue;
696 
697                 m.misc = 0;
698                 m.addr = 0;
699                 m.bank = i;
700 
701                 barrier();
702                 m.status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
703 
704                 /*
705                  * Update storm tracking here, before checking for the
706                  * MCI_STATUS_VAL bit. Valid corrected errors count
707                  * towards declaring, or maintaining, storm status. No
708                  * error in a bank counts towards avoiding, or ending,
709                  * storm status.
710                  */
711                 if (!mca_cfg.cmci_disabled)
712                         mce_track_storm(&m);
713 
714                 /* If this entry is not valid, ignore it */
715                 if (!(m.status & MCI_STATUS_VAL))
716                         continue;
717 
718                 /*
719                  * If we are logging everything (at CPU online) or this
720                  * is a corrected error, then we must log it.
721                  */
722                 if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
723                         goto log_it;
724 
725                 /*
726                  * Newer Intel systems that support software error
727                  * recovery need to make additional checks. Other
728                  * CPUs should skip over uncorrected errors, but log
729                  * everything else.
730                  */
731                 if (!mca_cfg.ser) {
732                         if (m.status & MCI_STATUS_UC)
733                                 continue;
734                         goto log_it;
735                 }
736 
737                 /* Log "not enabled" (speculative) errors */
738                 if (!(m.status & MCI_STATUS_EN))
739                         goto log_it;
740 
741                 /*
742                  * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
743                  * UC == 1 && PCC == 0 && S == 0
744                  */
745                 if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
746                         goto log_it;
747 
748                 /*
749                  * Skip anything else. Presumption is that our read of this
750                  * bank is racing with a machine check. Leave the log alone
751                  * for do_machine_check() to deal with it.
752                  */
753                 continue;
754 
755 log_it:
756                 if (flags & MCP_DONTLOG)
757                         goto clear_it;
758 
759                 mce_read_aux(&m, i);
760                 m.severity = mce_severity(&m, NULL, NULL, false);
761                 /*
762                  * Don't get the IP here because it's unlikely to
763                  * have anything to do with the actual error location.
764                  */
765 
766                 if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
767                         goto clear_it;
768 
769                 if (flags & MCP_QUEUE_LOG)
770                         mce_gen_pool_add(&m);
771                 else
772                         mce_log(&m);
773 
774 clear_it:
775                 /*
776                  * Clear state for this bank.
777                  */
778                 mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
779         }
780 
781         /*
782          * Don't clear MCG_STATUS here because it's only defined for
783          * exceptions.
784          */
785 
786         sync_core();
787 }
788 EXPORT_SYMBOL_GPL(machine_check_poll);
789 
790 /*
791  * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and
792  * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM
793  * Vol 3B Table 15-20). But this confuses both the code that determines
794  * whether the machine check occurred in kernel or user mode, and also
795  * the severity assessment code. Pretend that EIPV was set, and take the
796  * ip/cs values from the pt_regs that mce_gather_info() ignored earlier.
797  */
798 static __always_inline void
799 quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
800 {
801         if (bank != 0)
802                 return;
803         if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0)
804                 return;
805         if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC|
806                           MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV|
807                           MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR|
808                           MCACOD)) !=
809                          (MCI_STATUS_UC|MCI_STATUS_EN|
810                           MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S|
811                           MCI_STATUS_AR|MCACOD_INSTR))
812                 return;
813 
814         m->mcgstatus |= MCG_STATUS_EIPV;
815         m->ip = regs->ip;
816         m->cs = regs->cs;
817 }
818 
819 /*
820  * Disable fast string copy and return from the MCE handler upon the first SRAR
821  * MCE on bank 1 due to a CPU erratum on Intel Skylake/Cascade Lake/Cooper Lake
822  * CPUs.
823  * The fast string copy instructions ("REP; MOVS*") could consume an
824  * uncorrectable memory error in the cache line _right after_ the desired region
825  * to copy and raise an MCE with RIP pointing to the instruction _after_ the
826  * "REP; MOVS*".
827  * This mitigation addresses the issue completely with the caveat of performance
828  * degradation on the CPU affected. This is still better than the OS crashing on
829  * MCEs raised on an irrelevant process due to "REP; MOVS*" accesses from a
830  * kernel context (e.g., copy_page).
831  *
832  * Returns true when fast string copy on CPU has been disabled.
833  */
834 static noinstr bool quirk_skylake_repmov(void)
835 {
836         u64 mcgstatus   = mce_rdmsrl(MSR_IA32_MCG_STATUS);
837         u64 misc_enable = mce_rdmsrl(MSR_IA32_MISC_ENABLE);
838         u64 mc1_status;
839 
840         /*
841          * Apply the quirk only to local machine checks, i.e., no broadcast
842          * sync is needed.
843          */
844         if (!(mcgstatus & MCG_STATUS_LMCES) ||
845             !(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING))
846                 return false;
847 
848         mc1_status = mce_rdmsrl(MSR_IA32_MCx_STATUS(1));
849 
850         /* Check for a software-recoverable data fetch error. */
851         if ((mc1_status &
852              (MCI_STATUS_VAL | MCI_STATUS_OVER | MCI_STATUS_UC | MCI_STATUS_EN |
853               MCI_STATUS_ADDRV | MCI_STATUS_MISCV | MCI_STATUS_PCC |
854               MCI_STATUS_AR | MCI_STATUS_S)) ==
855              (MCI_STATUS_VAL |                   MCI_STATUS_UC | MCI_STATUS_EN |
856               MCI_STATUS_ADDRV | MCI_STATUS_MISCV |
857               MCI_STATUS_AR | MCI_STATUS_S)) {
858                 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
859                 mce_wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
860                 mce_wrmsrl(MSR_IA32_MCx_STATUS(1), 0);
861 
862                 instrumentation_begin();
863                 pr_err_once("Erratum detected, disable fast string copy instructions.\n");
864                 instrumentation_end();
865 
866                 return true;
867         }
868 
869         return false;
870 }
871 
872 /*
873  * Some Zen-based Instruction Fetch Units set EIPV=RIPV=0 on poison consumption
874  * errors. This means mce_gather_info() will not save the "ip" and "cs" registers.
875  *
876  * However, the context is still valid, so save the "cs" register for later use.
877  *
878  * The "ip" register is truly unknown, so don't save it or fixup EIPV/RIPV.
879  *
880  * The Instruction Fetch Unit is at MCA bank 1 for all affected systems.
881  */
882 static __always_inline void quirk_zen_ifu(int bank, struct mce *m, struct pt_regs *regs)
883 {
884         if (bank != 1)
885                 return;
886         if (!(m->status & MCI_STATUS_POISON))
887                 return;
888 
889         m->cs = regs->cs;
890 }
891 
892 /*
893  * Do a quick check if any of the events requires a panic.
894  * This decides if we keep the events around or clear them.
895  */
896 static __always_inline int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
897                                           struct pt_regs *regs)
898 {
899         char *tmp = *msg;
900         int i;
901 
902         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
903                 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
904                 if (!(m->status & MCI_STATUS_VAL))
905                         continue;
906 
907                 arch___set_bit(i, validp);
908                 if (mce_flags.snb_ifu_quirk)
909                         quirk_sandybridge_ifu(i, m, regs);
910 
911                 if (mce_flags.zen_ifu_quirk)
912                         quirk_zen_ifu(i, m, regs);
913 
914                 m->bank = i;
915                 if (mce_severity(m, regs, &tmp, true) >= MCE_PANIC_SEVERITY) {
916                         mce_read_aux(m, i);
917                         *msg = tmp;
918                         return 1;
919                 }
920         }
921         return 0;
922 }
923 
924 /*
925  * Variable to establish order between CPUs while scanning.
926  * Each CPU spins initially until executing is equal its number.
927  */
928 static atomic_t mce_executing;
929 
930 /*
931  * Defines order of CPUs on entry. First CPU becomes Monarch.
932  */
933 static atomic_t mce_callin;
934 
935 /*
936  * Track which CPUs entered the MCA broadcast synchronization and which not in
937  * order to print holdouts.
938  */
939 static cpumask_t mce_missing_cpus = CPU_MASK_ALL;
940 
941 /*
942  * Check if a timeout waiting for other CPUs happened.
943  */
944 static noinstr int mce_timed_out(u64 *t, const char *msg)
945 {
946         int ret = 0;
947 
948         /* Enable instrumentation around calls to external facilities */
949         instrumentation_begin();
950 
951         /*
952          * The others already did panic for some reason.
953          * Bail out like in a timeout.
954          * rmb() to tell the compiler that system_state
955          * might have been modified by someone else.
956          */
957         rmb();
958         if (atomic_read(&mce_panicked))
959                 wait_for_panic();
960         if (!mca_cfg.monarch_timeout)
961                 goto out;
962         if ((s64)*t < SPINUNIT) {
963                 if (cpumask_and(&mce_missing_cpus, cpu_online_mask, &mce_missing_cpus))
964                         pr_emerg("CPUs not responding to MCE broadcast (may include false positives): %*pbl\n",
965                                  cpumask_pr_args(&mce_missing_cpus));
966                 mce_panic(msg, NULL, NULL);
967 
968                 ret = 1;
969                 goto out;
970         }
971         *t -= SPINUNIT;
972 
973 out:
974         touch_nmi_watchdog();
975 
976         instrumentation_end();
977 
978         return ret;
979 }
980 
981 /*
982  * The Monarch's reign.  The Monarch is the CPU who entered
983  * the machine check handler first. It waits for the others to
984  * raise the exception too and then grades them. When any
985  * error is fatal panic. Only then let the others continue.
986  *
987  * The other CPUs entering the MCE handler will be controlled by the
988  * Monarch. They are called Subjects.
989  *
990  * This way we prevent any potential data corruption in a unrecoverable case
991  * and also makes sure always all CPU's errors are examined.
992  *
993  * Also this detects the case of a machine check event coming from outer
994  * space (not detected by any CPUs) In this case some external agent wants
995  * us to shut down, so panic too.
996  *
997  * The other CPUs might still decide to panic if the handler happens
998  * in a unrecoverable place, but in this case the system is in a semi-stable
999  * state and won't corrupt anything by itself. It's ok to let the others
1000  * continue for a bit first.
1001  *
1002  * All the spin loops have timeouts; when a timeout happens a CPU
1003  * typically elects itself to be Monarch.
1004  */
1005 static void mce_reign(void)
1006 {
1007         int cpu;
1008         struct mce *m = NULL;
1009         int global_worst = 0;
1010         char *msg = NULL;
1011 
1012         /*
1013          * This CPU is the Monarch and the other CPUs have run
1014          * through their handlers.
1015          * Grade the severity of the errors of all the CPUs.
1016          */
1017         for_each_possible_cpu(cpu) {
1018                 struct mce *mtmp = &per_cpu(mces_seen, cpu);
1019 
1020                 if (mtmp->severity > global_worst) {
1021                         global_worst = mtmp->severity;
1022                         m = &per_cpu(mces_seen, cpu);
1023                 }
1024         }
1025 
1026         /*
1027          * Cannot recover? Panic here then.
1028          * This dumps all the mces in the log buffer and stops the
1029          * other CPUs.
1030          */
1031         if (m && global_worst >= MCE_PANIC_SEVERITY) {
1032                 /* call mce_severity() to get "msg" for panic */
1033                 mce_severity(m, NULL, &msg, true);
1034                 mce_panic("Fatal machine check", m, msg);
1035         }
1036 
1037         /*
1038          * For UC somewhere we let the CPU who detects it handle it.
1039          * Also must let continue the others, otherwise the handling
1040          * CPU could deadlock on a lock.
1041          */
1042 
1043         /*
1044          * No machine check event found. Must be some external
1045          * source or one CPU is hung. Panic.
1046          */
1047         if (global_worst <= MCE_KEEP_SEVERITY)
1048                 mce_panic("Fatal machine check from unknown source", NULL, NULL);
1049 
1050         /*
1051          * Now clear all the mces_seen so that they don't reappear on
1052          * the next mce.
1053          */
1054         for_each_possible_cpu(cpu)
1055                 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce));
1056 }
1057 
1058 static atomic_t global_nwo;
1059 
1060 /*
1061  * Start of Monarch synchronization. This waits until all CPUs have
1062  * entered the exception handler and then determines if any of them
1063  * saw a fatal event that requires panic. Then it executes them
1064  * in the entry order.
1065  * TBD double check parallel CPU hotunplug
1066  */
1067 static noinstr int mce_start(int *no_way_out)
1068 {
1069         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1070         int order, ret = -1;
1071 
1072         if (!timeout)
1073                 return ret;
1074 
1075         raw_atomic_add(*no_way_out, &global_nwo);
1076         /*
1077          * Rely on the implied barrier below, such that global_nwo
1078          * is updated before mce_callin.
1079          */
1080         order = raw_atomic_inc_return(&mce_callin);
1081         arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
1082 
1083         /* Enable instrumentation around calls to external facilities */
1084         instrumentation_begin();
1085 
1086         /*
1087          * Wait for everyone.
1088          */
1089         while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
1090                 if (mce_timed_out(&timeout,
1091                                   "Timeout: Not all CPUs entered broadcast exception handler")) {
1092                         raw_atomic_set(&global_nwo, 0);
1093                         goto out;
1094                 }
1095                 ndelay(SPINUNIT);
1096         }
1097 
1098         /*
1099          * mce_callin should be read before global_nwo
1100          */
1101         smp_rmb();
1102 
1103         if (order == 1) {
1104                 /*
1105                  * Monarch: Starts executing now, the others wait.
1106                  */
1107                 raw_atomic_set(&mce_executing, 1);
1108         } else {
1109                 /*
1110                  * Subject: Now start the scanning loop one by one in
1111                  * the original callin order.
1112                  * This way when there are any shared banks it will be
1113                  * only seen by one CPU before cleared, avoiding duplicates.
1114                  */
1115                 while (raw_atomic_read(&mce_executing) < order) {
1116                         if (mce_timed_out(&timeout,
1117                                           "Timeout: Subject CPUs unable to finish machine check processing")) {
1118                                 raw_atomic_set(&global_nwo, 0);
1119                                 goto out;
1120                         }
1121                         ndelay(SPINUNIT);
1122                 }
1123         }
1124 
1125         /*
1126          * Cache the global no_way_out state.
1127          */
1128         *no_way_out = raw_atomic_read(&global_nwo);
1129 
1130         ret = order;
1131 
1132 out:
1133         instrumentation_end();
1134 
1135         return ret;
1136 }
1137 
1138 /*
1139  * Synchronize between CPUs after main scanning loop.
1140  * This invokes the bulk of the Monarch processing.
1141  */
1142 static noinstr int mce_end(int order)
1143 {
1144         u64 timeout = (u64)mca_cfg.monarch_timeout * NSEC_PER_USEC;
1145         int ret = -1;
1146 
1147         /* Allow instrumentation around external facilities. */
1148         instrumentation_begin();
1149 
1150         if (!timeout)
1151                 goto reset;
1152         if (order < 0)
1153                 goto reset;
1154 
1155         /*
1156          * Allow others to run.
1157          */
1158         atomic_inc(&mce_executing);
1159 
1160         if (order == 1) {
1161                 /*
1162                  * Monarch: Wait for everyone to go through their scanning
1163                  * loops.
1164                  */
1165                 while (atomic_read(&mce_executing) <= num_online_cpus()) {
1166                         if (mce_timed_out(&timeout,
1167                                           "Timeout: Monarch CPU unable to finish machine check processing"))
1168                                 goto reset;
1169                         ndelay(SPINUNIT);
1170                 }
1171 
1172                 mce_reign();
1173                 barrier();
1174                 ret = 0;
1175         } else {
1176                 /*
1177                  * Subject: Wait for Monarch to finish.
1178                  */
1179                 while (atomic_read(&mce_executing) != 0) {
1180                         if (mce_timed_out(&timeout,
1181                                           "Timeout: Monarch CPU did not finish machine check processing"))
1182                                 goto reset;
1183                         ndelay(SPINUNIT);
1184                 }
1185 
1186                 /*
1187                  * Don't reset anything. That's done by the Monarch.
1188                  */
1189                 ret = 0;
1190                 goto out;
1191         }
1192 
1193         /*
1194          * Reset all global state.
1195          */
1196 reset:
1197         atomic_set(&global_nwo, 0);
1198         atomic_set(&mce_callin, 0);
1199         cpumask_setall(&mce_missing_cpus);
1200         barrier();
1201 
1202         /*
1203          * Let others run again.
1204          */
1205         atomic_set(&mce_executing, 0);
1206 
1207 out:
1208         instrumentation_end();
1209 
1210         return ret;
1211 }
1212 
1213 static __always_inline void mce_clear_state(unsigned long *toclear)
1214 {
1215         int i;
1216 
1217         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1218                 if (arch_test_bit(i, toclear))
1219                         mce_wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1220         }
1221 }
1222 
1223 /*
1224  * Cases where we avoid rendezvous handler timeout:
1225  * 1) If this CPU is offline.
1226  *
1227  * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
1228  *  skip those CPUs which remain looping in the 1st kernel - see
1229  *  crash_nmi_callback().
1230  *
1231  * Note: there still is a small window between kexec-ing and the new,
1232  * kdump kernel establishing a new #MC handler where a broadcasted MCE
1233  * might not get handled properly.
1234  */
1235 static noinstr bool mce_check_crashing_cpu(void)
1236 {
1237         unsigned int cpu = smp_processor_id();
1238 
1239         if (arch_cpu_is_offline(cpu) ||
1240             (crashing_cpu != -1 && crashing_cpu != cpu)) {
1241                 u64 mcgstatus;
1242 
1243                 mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS);
1244 
1245                 if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) {
1246                         if (mcgstatus & MCG_STATUS_LMCES)
1247                                 return false;
1248                 }
1249 
1250                 if (mcgstatus & MCG_STATUS_RIPV) {
1251                         __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
1252                         return true;
1253                 }
1254         }
1255         return false;
1256 }
1257 
1258 static __always_inline int
1259 __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *final,
1260                 unsigned long *toclear, unsigned long *valid_banks, int no_way_out,
1261                 int *worst)
1262 {
1263         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1264         struct mca_config *cfg = &mca_cfg;
1265         int severity, i, taint = 0;
1266 
1267         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1268                 arch___clear_bit(i, toclear);
1269                 if (!arch_test_bit(i, valid_banks))
1270                         continue;
1271 
1272                 if (!mce_banks[i].ctl)
1273                         continue;
1274 
1275                 m->misc = 0;
1276                 m->addr = 0;
1277                 m->bank = i;
1278 
1279                 m->status = mce_rdmsrl(mca_msr_reg(i, MCA_STATUS));
1280                 if (!(m->status & MCI_STATUS_VAL))
1281                         continue;
1282 
1283                 /*
1284                  * Corrected or non-signaled errors are handled by
1285                  * machine_check_poll(). Leave them alone, unless this panics.
1286                  */
1287                 if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
1288                         !no_way_out)
1289                         continue;
1290 
1291                 /* Set taint even when machine check was not enabled. */
1292                 taint++;
1293 
1294                 severity = mce_severity(m, regs, NULL, true);
1295 
1296                 /*
1297                  * When machine check was for corrected/deferred handler don't
1298                  * touch, unless we're panicking.
1299                  */
1300                 if ((severity == MCE_KEEP_SEVERITY ||
1301                      severity == MCE_UCNA_SEVERITY) && !no_way_out)
1302                         continue;
1303 
1304                 arch___set_bit(i, toclear);
1305 
1306                 /* Machine check event was not enabled. Clear, but ignore. */
1307                 if (severity == MCE_NO_SEVERITY)
1308                         continue;
1309 
1310                 mce_read_aux(m, i);
1311 
1312                 /* assuming valid severity level != 0 */
1313                 m->severity = severity;
1314 
1315                 /*
1316                  * Enable instrumentation around the mce_log() call which is
1317                  * done in #MC context, where instrumentation is disabled.
1318                  */
1319                 instrumentation_begin();
1320                 mce_log(m);
1321                 instrumentation_end();
1322 
1323                 if (severity > *worst) {
1324                         *final = *m;
1325                         *worst = severity;
1326                 }
1327         }
1328 
1329         /* mce_clear_state will clear *final, save locally for use later */
1330         *m = *final;
1331 
1332         return taint;
1333 }
1334 
1335 static void kill_me_now(struct callback_head *ch)
1336 {
1337         struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
1338 
1339         p->mce_count = 0;
1340         force_sig(SIGBUS);
1341 }
1342 
1343 static void kill_me_maybe(struct callback_head *cb)
1344 {
1345         struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1346         int flags = MF_ACTION_REQUIRED;
1347         unsigned long pfn;
1348         int ret;
1349 
1350         p->mce_count = 0;
1351         pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
1352 
1353         if (!p->mce_ripv)
1354                 flags |= MF_MUST_KILL;
1355 
1356         pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1357         ret = memory_failure(pfn, flags);
1358         if (!ret) {
1359                 set_mce_nospec(pfn);
1360                 sync_core();
1361                 return;
1362         }
1363 
1364         /*
1365          * -EHWPOISON from memory_failure() means that it already sent SIGBUS
1366          * to the current process with the proper error info,
1367          * -EOPNOTSUPP means hwpoison_filter() filtered the error event,
1368          *
1369          * In both cases, no further processing is required.
1370          */
1371         if (ret == -EHWPOISON || ret == -EOPNOTSUPP)
1372                 return;
1373 
1374         pr_err("Memory error not recovered");
1375         kill_me_now(cb);
1376 }
1377 
1378 static void kill_me_never(struct callback_head *cb)
1379 {
1380         struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
1381         unsigned long pfn;
1382 
1383         p->mce_count = 0;
1384         pr_err("Kernel accessed poison in user space at %llx\n", p->mce_addr);
1385         pfn = (p->mce_addr & MCI_ADDR_PHYSADDR) >> PAGE_SHIFT;
1386         if (!memory_failure(pfn, 0))
1387                 set_mce_nospec(pfn);
1388 }
1389 
1390 static void queue_task_work(struct mce *m, char *msg, void (*func)(struct callback_head *))
1391 {
1392         int count = ++current->mce_count;
1393 
1394         /* First call, save all the details */
1395         if (count == 1) {
1396                 current->mce_addr = m->addr;
1397                 current->mce_kflags = m->kflags;
1398                 current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
1399                 current->mce_whole_page = whole_page(m);
1400                 current->mce_kill_me.func = func;
1401         }
1402 
1403         /* Ten is likely overkill. Don't expect more than two faults before task_work() */
1404         if (count > 10)
1405                 mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
1406 
1407         /* Second or later call, make sure page address matches the one from first call */
1408         if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
1409                 mce_panic("Consecutive machine checks to different user pages", m, msg);
1410 
1411         /* Do not call task_work_add() more than once */
1412         if (count > 1)
1413                 return;
1414 
1415         task_work_add(current, &current->mce_kill_me, TWA_RESUME);
1416 }
1417 
1418 /* Handle unconfigured int18 (should never happen) */
1419 static noinstr void unexpected_machine_check(struct pt_regs *regs)
1420 {
1421         instrumentation_begin();
1422         pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
1423                smp_processor_id());
1424         instrumentation_end();
1425 }
1426 
1427 /*
1428  * The actual machine check handler. This only handles real exceptions when
1429  * something got corrupted coming in through int 18.
1430  *
1431  * This is executed in #MC context not subject to normal locking rules.
1432  * This implies that most kernel services cannot be safely used. Don't even
1433  * think about putting a printk in there!
1434  *
1435  * On Intel systems this is entered on all CPUs in parallel through
1436  * MCE broadcast. However some CPUs might be broken beyond repair,
1437  * so be always careful when synchronizing with others.
1438  *
1439  * Tracing and kprobes are disabled: if we interrupted a kernel context
1440  * with IF=1, we need to minimize stack usage.  There are also recursion
1441  * issues: if the machine check was due to a failure of the memory
1442  * backing the user stack, tracing that reads the user stack will cause
1443  * potentially infinite recursion.
1444  *
1445  * Currently, the #MC handler calls out to a number of external facilities
1446  * and, therefore, allows instrumentation around them. The optimal thing to
1447  * have would be to do the absolutely minimal work required in #MC context
1448  * and have instrumentation disabled only around that. Further processing can
1449  * then happen in process context where instrumentation is allowed. Achieving
1450  * that requires careful auditing and modifications. Until then, the code
1451  * allows instrumentation temporarily, where required. *
1452  */
1453 noinstr void do_machine_check(struct pt_regs *regs)
1454 {
1455         int worst = 0, order, no_way_out, kill_current_task, lmce, taint = 0;
1456         DECLARE_BITMAP(valid_banks, MAX_NR_BANKS) = { 0 };
1457         DECLARE_BITMAP(toclear, MAX_NR_BANKS) = { 0 };
1458         struct mce m, *final;
1459         char *msg = NULL;
1460 
1461         if (unlikely(mce_flags.p5))
1462                 return pentium_machine_check(regs);
1463         else if (unlikely(mce_flags.winchip))
1464                 return winchip_machine_check(regs);
1465         else if (unlikely(!mca_cfg.initialized))
1466                 return unexpected_machine_check(regs);
1467 
1468         if (mce_flags.skx_repmov_quirk && quirk_skylake_repmov())
1469                 goto clear;
1470 
1471         /*
1472          * Establish sequential order between the CPUs entering the machine
1473          * check handler.
1474          */
1475         order = -1;
1476 
1477         /*
1478          * If no_way_out gets set, there is no safe way to recover from this
1479          * MCE.
1480          */
1481         no_way_out = 0;
1482 
1483         /*
1484          * If kill_current_task is not set, there might be a way to recover from this
1485          * error.
1486          */
1487         kill_current_task = 0;
1488 
1489         /*
1490          * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
1491          * on Intel.
1492          */
1493         lmce = 1;
1494 
1495         this_cpu_inc(mce_exception_count);
1496 
1497         mce_gather_info(&m, regs);
1498         m.tsc = rdtsc();
1499 
1500         final = this_cpu_ptr(&mces_seen);
1501         *final = m;
1502 
1503         no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs);
1504 
1505         barrier();
1506 
1507         /*
1508          * When no restart IP might need to kill or panic.
1509          * Assume the worst for now, but if we find the
1510          * severity is MCE_AR_SEVERITY we have other options.
1511          */
1512         if (!(m.mcgstatus & MCG_STATUS_RIPV))
1513                 kill_current_task = 1;
1514         /*
1515          * Check if this MCE is signaled to only this logical processor,
1516          * on Intel, Zhaoxin only.
1517          */
1518         if (m.cpuvendor == X86_VENDOR_INTEL ||
1519             m.cpuvendor == X86_VENDOR_ZHAOXIN)
1520                 lmce = m.mcgstatus & MCG_STATUS_LMCES;
1521 
1522         /*
1523          * Local machine check may already know that we have to panic.
1524          * Broadcast machine check begins rendezvous in mce_start()
1525          * Go through all banks in exclusion of the other CPUs. This way we
1526          * don't report duplicated events on shared banks because the first one
1527          * to see it will clear it.
1528          */
1529         if (lmce) {
1530                 if (no_way_out)
1531                         mce_panic("Fatal local machine check", &m, msg);
1532         } else {
1533                 order = mce_start(&no_way_out);
1534         }
1535 
1536         taint = __mc_scan_banks(&m, regs, final, toclear, valid_banks, no_way_out, &worst);
1537 
1538         if (!no_way_out)
1539                 mce_clear_state(toclear);
1540 
1541         /*
1542          * Do most of the synchronization with other CPUs.
1543          * When there's any problem use only local no_way_out state.
1544          */
1545         if (!lmce) {
1546                 if (mce_end(order) < 0) {
1547                         if (!no_way_out)
1548                                 no_way_out = worst >= MCE_PANIC_SEVERITY;
1549 
1550                         if (no_way_out)
1551                                 mce_panic("Fatal machine check on current CPU", &m, msg);
1552                 }
1553         } else {
1554                 /*
1555                  * If there was a fatal machine check we should have
1556                  * already called mce_panic earlier in this function.
1557                  * Since we re-read the banks, we might have found
1558                  * something new. Check again to see if we found a
1559                  * fatal error. We call "mce_severity()" again to
1560                  * make sure we have the right "msg".
1561                  */
1562                 if (worst >= MCE_PANIC_SEVERITY) {
1563                         mce_severity(&m, regs, &msg, true);
1564                         mce_panic("Local fatal machine check!", &m, msg);
1565                 }
1566         }
1567 
1568         /*
1569          * Enable instrumentation around the external facilities like task_work_add()
1570          * (via queue_task_work()), fixup_exception() etc. For now, that is. Fixing this
1571          * properly would need a lot more involved reorganization.
1572          */
1573         instrumentation_begin();
1574 
1575         if (taint)
1576                 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
1577 
1578         if (worst != MCE_AR_SEVERITY && !kill_current_task)
1579                 goto out;
1580 
1581         /* Fault was in user mode and we need to take some action */
1582         if ((m.cs & 3) == 3) {
1583                 /* If this triggers there is no way to recover. Die hard. */
1584                 BUG_ON(!on_thread_stack() || !user_mode(regs));
1585 
1586                 if (!mce_usable_address(&m))
1587                         queue_task_work(&m, msg, kill_me_now);
1588                 else
1589                         queue_task_work(&m, msg, kill_me_maybe);
1590 
1591         } else if (m.mcgstatus & MCG_STATUS_SEAM_NR) {
1592                 /*
1593                  * Saved RIP on stack makes it look like the machine check
1594                  * was taken in the kernel on the instruction following
1595                  * the entry to SEAM mode. But MCG_STATUS_SEAM_NR indicates
1596                  * that the machine check was taken inside SEAM non-root
1597                  * mode.  CPU core has already marked that guest as dead.
1598                  * It is OK for the kernel to resume execution at the
1599                  * apparent point of the machine check as the fault did
1600                  * not occur there. Mark the page as poisoned so it won't
1601                  * be added to free list when the guest is terminated.
1602                  */
1603                 if (mce_usable_address(&m)) {
1604                         struct page *p = pfn_to_online_page(m.addr >> PAGE_SHIFT);
1605 
1606                         if (p)
1607                                 SetPageHWPoison(p);
1608                 }
1609         } else {
1610                 /*
1611                  * Handle an MCE which has happened in kernel space but from
1612                  * which the kernel can recover: ex_has_fault_handler() has
1613                  * already verified that the rIP at which the error happened is
1614                  * a rIP from which the kernel can recover (by jumping to
1615                  * recovery code specified in _ASM_EXTABLE_FAULT()) and the
1616                  * corresponding exception handler which would do that is the
1617                  * proper one.
1618                  */
1619                 if (m.kflags & MCE_IN_KERNEL_RECOV) {
1620                         if (!fixup_exception(regs, X86_TRAP_MC, 0, 0))
1621                                 mce_panic("Failed kernel mode recovery", &m, msg);
1622                 }
1623 
1624                 if (m.kflags & MCE_IN_KERNEL_COPYIN)
1625                         queue_task_work(&m, msg, kill_me_never);
1626         }
1627 
1628 out:
1629         instrumentation_end();
1630 
1631 clear:
1632         mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
1633 }
1634 EXPORT_SYMBOL_GPL(do_machine_check);
1635 
1636 #ifndef CONFIG_MEMORY_FAILURE
1637 int memory_failure(unsigned long pfn, int flags)
1638 {
1639         /* mce_severity() should not hand us an ACTION_REQUIRED error */
1640         BUG_ON(flags & MF_ACTION_REQUIRED);
1641         pr_err("Uncorrected memory error in page 0x%lx ignored\n"
1642                "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
1643                pfn);
1644 
1645         return 0;
1646 }
1647 #endif
1648 
1649 /*
1650  * Periodic polling timer for "silent" machine check errors.  If the
1651  * poller finds an MCE, poll 2x faster.  When the poller finds no more
1652  * errors, poll 2x slower (up to check_interval seconds).
1653  */
1654 static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
1655 
1656 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1657 static DEFINE_PER_CPU(struct timer_list, mce_timer);
1658 
1659 static void __start_timer(struct timer_list *t, unsigned long interval)
1660 {
1661         unsigned long when = jiffies + interval;
1662         unsigned long flags;
1663 
1664         local_irq_save(flags);
1665 
1666         if (!timer_pending(t) || time_before(when, t->expires))
1667                 mod_timer(t, round_jiffies(when));
1668 
1669         local_irq_restore(flags);
1670 }
1671 
1672 static void mc_poll_banks_default(void)
1673 {
1674         machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
1675 }
1676 
1677 void (*mc_poll_banks)(void) = mc_poll_banks_default;
1678 
1679 static void mce_timer_fn(struct timer_list *t)
1680 {
1681         struct timer_list *cpu_t = this_cpu_ptr(&mce_timer);
1682         unsigned long iv;
1683 
1684         WARN_ON(cpu_t != t);
1685 
1686         iv = __this_cpu_read(mce_next_interval);
1687 
1688         if (mce_available(this_cpu_ptr(&cpu_info)))
1689                 mc_poll_banks();
1690 
1691         /*
1692          * Alert userspace if needed. If we logged an MCE, reduce the polling
1693          * interval, otherwise increase the polling interval.
1694          */
1695         if (mce_notify_irq())
1696                 iv = max(iv / 2, (unsigned long) HZ/100);
1697         else
1698                 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1699 
1700         if (mce_get_storm_mode()) {
1701                 __start_timer(t, HZ);
1702         } else {
1703                 __this_cpu_write(mce_next_interval, iv);
1704                 __start_timer(t, iv);
1705         }
1706 }
1707 
1708 /*
1709  * When a storm starts on any bank on this CPU, switch to polling
1710  * once per second. When the storm ends, revert to the default
1711  * polling interval.
1712  */
1713 void mce_timer_kick(bool storm)
1714 {
1715         struct timer_list *t = this_cpu_ptr(&mce_timer);
1716 
1717         mce_set_storm_mode(storm);
1718 
1719         if (storm)
1720                 __start_timer(t, HZ);
1721         else
1722                 __this_cpu_write(mce_next_interval, check_interval * HZ);
1723 }
1724 
1725 /* Must not be called in IRQ context where del_timer_sync() can deadlock */
1726 static void mce_timer_delete_all(void)
1727 {
1728         int cpu;
1729 
1730         for_each_online_cpu(cpu)
1731                 del_timer_sync(&per_cpu(mce_timer, cpu));
1732 }
1733 
1734 /*
1735  * Notify the user(s) about new machine check events.
1736  * Can be called from interrupt context, but not from machine check/NMI
1737  * context.
1738  */
1739 int mce_notify_irq(void)
1740 {
1741         /* Not more than two messages every minute */
1742         static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
1743 
1744         if (test_and_clear_bit(0, &mce_need_notify)) {
1745                 mce_work_trigger();
1746 
1747                 if (__ratelimit(&ratelimit))
1748                         pr_info(HW_ERR "Machine check events logged\n");
1749 
1750                 return 1;
1751         }
1752         return 0;
1753 }
1754 EXPORT_SYMBOL_GPL(mce_notify_irq);
1755 
1756 static void __mcheck_cpu_mce_banks_init(void)
1757 {
1758         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1759         u8 n_banks = this_cpu_read(mce_num_banks);
1760         int i;
1761 
1762         for (i = 0; i < n_banks; i++) {
1763                 struct mce_bank *b = &mce_banks[i];
1764 
1765                 /*
1766                  * Init them all, __mcheck_cpu_apply_quirks() is going to apply
1767                  * the required vendor quirks before
1768                  * __mcheck_cpu_init_clear_banks() does the final bank setup.
1769                  */
1770                 b->ctl = -1ULL;
1771                 b->init = true;
1772         }
1773 }
1774 
1775 /*
1776  * Initialize Machine Checks for a CPU.
1777  */
1778 static void __mcheck_cpu_cap_init(void)
1779 {
1780         u64 cap;
1781         u8 b;
1782 
1783         rdmsrl(MSR_IA32_MCG_CAP, cap);
1784 
1785         b = cap & MCG_BANKCNT_MASK;
1786 
1787         if (b > MAX_NR_BANKS) {
1788                 pr_warn("CPU%d: Using only %u machine check banks out of %u\n",
1789                         smp_processor_id(), MAX_NR_BANKS, b);
1790                 b = MAX_NR_BANKS;
1791         }
1792 
1793         this_cpu_write(mce_num_banks, b);
1794 
1795         __mcheck_cpu_mce_banks_init();
1796 
1797         /* Use accurate RIP reporting if available. */
1798         if ((cap & MCG_EXT_P) && MCG_EXT_CNT(cap) >= 9)
1799                 mca_cfg.rip_msr = MSR_IA32_MCG_EIP;
1800 
1801         if (cap & MCG_SER_P)
1802                 mca_cfg.ser = 1;
1803 }
1804 
1805 static void __mcheck_cpu_init_generic(void)
1806 {
1807         enum mcp_flags m_fl = 0;
1808         mce_banks_t all_banks;
1809         u64 cap;
1810 
1811         if (!mca_cfg.bootlog)
1812                 m_fl = MCP_DONTLOG;
1813 
1814         /*
1815          * Log the machine checks left over from the previous reset. Log them
1816          * only, do not start processing them. That will happen in mcheck_late_init()
1817          * when all consumers have been registered on the notifier chain.
1818          */
1819         bitmap_fill(all_banks, MAX_NR_BANKS);
1820         machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1821 
1822         cr4_set_bits(X86_CR4_MCE);
1823 
1824         rdmsrl(MSR_IA32_MCG_CAP, cap);
1825         if (cap & MCG_CTL_P)
1826                 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1827 }
1828 
1829 static void __mcheck_cpu_init_clear_banks(void)
1830 {
1831         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1832         int i;
1833 
1834         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1835                 struct mce_bank *b = &mce_banks[i];
1836 
1837                 if (!b->init)
1838                         continue;
1839                 wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
1840                 wrmsrl(mca_msr_reg(i, MCA_STATUS), 0);
1841         }
1842 }
1843 
1844 /*
1845  * Do a final check to see if there are any unused/RAZ banks.
1846  *
1847  * This must be done after the banks have been initialized and any quirks have
1848  * been applied.
1849  *
1850  * Do not call this from any user-initiated flows, e.g. CPU hotplug or sysfs.
1851  * Otherwise, a user who disables a bank will not be able to re-enable it
1852  * without a system reboot.
1853  */
1854 static void __mcheck_cpu_check_banks(void)
1855 {
1856         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1857         u64 msrval;
1858         int i;
1859 
1860         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
1861                 struct mce_bank *b = &mce_banks[i];
1862 
1863                 if (!b->init)
1864                         continue;
1865 
1866                 rdmsrl(mca_msr_reg(i, MCA_CTL), msrval);
1867                 b->init = !!msrval;
1868         }
1869 }
1870 
1871 /* Add per CPU specific workarounds here */
1872 static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1873 {
1874         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
1875         struct mca_config *cfg = &mca_cfg;
1876 
1877         if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1878                 pr_info("unknown CPU type - not enabling MCE support\n");
1879                 return -EOPNOTSUPP;
1880         }
1881 
1882         /* This should be disabled by the BIOS, but isn't always */
1883         if (c->x86_vendor == X86_VENDOR_AMD) {
1884                 if (c->x86 == 15 && this_cpu_read(mce_num_banks) > 4) {
1885                         /*
1886                          * disable GART TBL walk error reporting, which
1887                          * trips off incorrectly with the IOMMU & 3ware
1888                          * & Cerberus:
1889                          */
1890                         clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1891                 }
1892                 if (c->x86 < 0x11 && cfg->bootlog < 0) {
1893                         /*
1894                          * Lots of broken BIOS around that don't clear them
1895                          * by default and leave crap in there. Don't log:
1896                          */
1897                         cfg->bootlog = 0;
1898                 }
1899                 /*
1900                  * Various K7s with broken bank 0 around. Always disable
1901                  * by default.
1902                  */
1903                 if (c->x86 == 6 && this_cpu_read(mce_num_banks) > 0)
1904                         mce_banks[0].ctl = 0;
1905 
1906                 /*
1907                  * overflow_recov is supported for F15h Models 00h-0fh
1908                  * even though we don't have a CPUID bit for it.
1909                  */
1910                 if (c->x86 == 0x15 && c->x86_model <= 0xf)
1911                         mce_flags.overflow_recov = 1;
1912 
1913                 if (c->x86 >= 0x17 && c->x86 <= 0x1A)
1914                         mce_flags.zen_ifu_quirk = 1;
1915 
1916         }
1917 
1918         if (c->x86_vendor == X86_VENDOR_INTEL) {
1919                 /*
1920                  * SDM documents that on family 6 bank 0 should not be written
1921                  * because it aliases to another special BIOS controlled
1922                  * register.
1923                  * But it's not aliased anymore on model 0x1a+
1924                  * Don't ignore bank 0 completely because there could be a
1925                  * valid event later, merely don't write CTL0.
1926                  */
1927 
1928                 if (c->x86 == 6 && c->x86_model < 0x1A && this_cpu_read(mce_num_banks) > 0)
1929                         mce_banks[0].init = false;
1930 
1931                 /*
1932                  * All newer Intel systems support MCE broadcasting. Enable
1933                  * synchronization with a one second timeout.
1934                  */
1935                 if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
1936                         cfg->monarch_timeout < 0)
1937                         cfg->monarch_timeout = USEC_PER_SEC;
1938 
1939                 /*
1940                  * There are also broken BIOSes on some Pentium M and
1941                  * earlier systems:
1942                  */
1943                 if (c->x86 == 6 && c->x86_model <= 13 && cfg->bootlog < 0)
1944                         cfg->bootlog = 0;
1945 
1946                 if (c->x86_vfm == INTEL_SANDYBRIDGE_X)
1947                         mce_flags.snb_ifu_quirk = 1;
1948 
1949                 /*
1950                  * Skylake, Cascacde Lake and Cooper Lake require a quirk on
1951                  * rep movs.
1952                  */
1953                 if (c->x86_vfm == INTEL_SKYLAKE_X)
1954                         mce_flags.skx_repmov_quirk = 1;
1955         }
1956 
1957         if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
1958                 /*
1959                  * All newer Zhaoxin CPUs support MCE broadcasting. Enable
1960                  * synchronization with a one second timeout.
1961                  */
1962                 if (c->x86 > 6 || (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
1963                         if (cfg->monarch_timeout < 0)
1964                                 cfg->monarch_timeout = USEC_PER_SEC;
1965                 }
1966         }
1967 
1968         if (cfg->monarch_timeout < 0)
1969                 cfg->monarch_timeout = 0;
1970         if (cfg->bootlog != 0)
1971                 cfg->panic_timeout = 30;
1972 
1973         return 0;
1974 }
1975 
1976 static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1977 {
1978         if (c->x86 != 5)
1979                 return 0;
1980 
1981         switch (c->x86_vendor) {
1982         case X86_VENDOR_INTEL:
1983                 intel_p5_mcheck_init(c);
1984                 mce_flags.p5 = 1;
1985                 return 1;
1986         case X86_VENDOR_CENTAUR:
1987                 winchip_mcheck_init(c);
1988                 mce_flags.winchip = 1;
1989                 return 1;
1990         default:
1991                 return 0;
1992         }
1993 
1994         return 0;
1995 }
1996 
1997 /*
1998  * Init basic CPU features needed for early decoding of MCEs.
1999  */
2000 static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
2001 {
2002         if (c->x86_vendor == X86_VENDOR_AMD || c->x86_vendor == X86_VENDOR_HYGON) {
2003                 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
2004                 mce_flags.succor         = !!cpu_has(c, X86_FEATURE_SUCCOR);
2005                 mce_flags.smca           = !!cpu_has(c, X86_FEATURE_SMCA);
2006                 mce_flags.amd_threshold  = 1;
2007         }
2008 }
2009 
2010 static void mce_centaur_feature_init(struct cpuinfo_x86 *c)
2011 {
2012         struct mca_config *cfg = &mca_cfg;
2013 
2014          /*
2015           * All newer Centaur CPUs support MCE broadcasting. Enable
2016           * synchronization with a one second timeout.
2017           */
2018         if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) ||
2019              c->x86 > 6) {
2020                 if (cfg->monarch_timeout < 0)
2021                         cfg->monarch_timeout = USEC_PER_SEC;
2022         }
2023 }
2024 
2025 static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c)
2026 {
2027         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2028 
2029         /*
2030          * These CPUs have MCA bank 8 which reports only one error type called
2031          * SVAD (System View Address Decoder). The reporting of that error is
2032          * controlled by IA32_MC8.CTL.0.
2033          *
2034          * If enabled, prefetching on these CPUs will cause SVAD MCE when
2035          * virtual machines start and result in a system  panic. Always disable
2036          * bank 8 SVAD error by default.
2037          */
2038         if ((c->x86 == 7 && c->x86_model == 0x1b) ||
2039             (c->x86_model == 0x19 || c->x86_model == 0x1f)) {
2040                 if (this_cpu_read(mce_num_banks) > 8)
2041                         mce_banks[8].ctl = 0;
2042         }
2043 
2044         intel_init_cmci();
2045         intel_init_lmce();
2046 }
2047 
2048 static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c)
2049 {
2050         intel_clear_lmce();
2051 }
2052 
2053 static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
2054 {
2055         switch (c->x86_vendor) {
2056         case X86_VENDOR_INTEL:
2057                 mce_intel_feature_init(c);
2058                 break;
2059 
2060         case X86_VENDOR_AMD: {
2061                 mce_amd_feature_init(c);
2062                 break;
2063                 }
2064 
2065         case X86_VENDOR_HYGON:
2066                 mce_hygon_feature_init(c);
2067                 break;
2068 
2069         case X86_VENDOR_CENTAUR:
2070                 mce_centaur_feature_init(c);
2071                 break;
2072 
2073         case X86_VENDOR_ZHAOXIN:
2074                 mce_zhaoxin_feature_init(c);
2075                 break;
2076 
2077         default:
2078                 break;
2079         }
2080 }
2081 
2082 static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
2083 {
2084         switch (c->x86_vendor) {
2085         case X86_VENDOR_INTEL:
2086                 mce_intel_feature_clear(c);
2087                 break;
2088 
2089         case X86_VENDOR_ZHAOXIN:
2090                 mce_zhaoxin_feature_clear(c);
2091                 break;
2092 
2093         default:
2094                 break;
2095         }
2096 }
2097 
2098 static void mce_start_timer(struct timer_list *t)
2099 {
2100         unsigned long iv = check_interval * HZ;
2101 
2102         if (mca_cfg.ignore_ce || !iv)
2103                 return;
2104 
2105         this_cpu_write(mce_next_interval, iv);
2106         __start_timer(t, iv);
2107 }
2108 
2109 static void __mcheck_cpu_setup_timer(void)
2110 {
2111         struct timer_list *t = this_cpu_ptr(&mce_timer);
2112 
2113         timer_setup(t, mce_timer_fn, TIMER_PINNED);
2114 }
2115 
2116 static void __mcheck_cpu_init_timer(void)
2117 {
2118         struct timer_list *t = this_cpu_ptr(&mce_timer);
2119 
2120         timer_setup(t, mce_timer_fn, TIMER_PINNED);
2121         mce_start_timer(t);
2122 }
2123 
2124 bool filter_mce(struct mce *m)
2125 {
2126         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
2127                 return amd_filter_mce(m);
2128         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2129                 return intel_filter_mce(m);
2130 
2131         return false;
2132 }
2133 
2134 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
2135 {
2136         irqentry_state_t irq_state;
2137 
2138         WARN_ON_ONCE(user_mode(regs));
2139 
2140         /*
2141          * Only required when from kernel mode. See
2142          * mce_check_crashing_cpu() for details.
2143          */
2144         if (mca_cfg.initialized && mce_check_crashing_cpu())
2145                 return;
2146 
2147         irq_state = irqentry_nmi_enter(regs);
2148 
2149         do_machine_check(regs);
2150 
2151         irqentry_nmi_exit(regs, irq_state);
2152 }
2153 
2154 static __always_inline void exc_machine_check_user(struct pt_regs *regs)
2155 {
2156         irqentry_enter_from_user_mode(regs);
2157 
2158         do_machine_check(regs);
2159 
2160         irqentry_exit_to_user_mode(regs);
2161 }
2162 
2163 #ifdef CONFIG_X86_64
2164 /* MCE hit kernel mode */
2165 DEFINE_IDTENTRY_MCE(exc_machine_check)
2166 {
2167         unsigned long dr7;
2168 
2169         dr7 = local_db_save();
2170         exc_machine_check_kernel(regs);
2171         local_db_restore(dr7);
2172 }
2173 
2174 /* The user mode variant. */
2175 DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
2176 {
2177         unsigned long dr7;
2178 
2179         dr7 = local_db_save();
2180         exc_machine_check_user(regs);
2181         local_db_restore(dr7);
2182 }
2183 
2184 #ifdef CONFIG_X86_FRED
2185 /*
2186  * When occurred on different ring level, i.e., from user or kernel
2187  * context, #MCE needs to be handled on different stack: User #MCE
2188  * on current task stack, while kernel #MCE on a dedicated stack.
2189  *
2190  * This is exactly how FRED event delivery invokes an exception
2191  * handler: ring 3 event on level 0 stack, i.e., current task stack;
2192  * ring 0 event on the #MCE dedicated stack specified in the
2193  * IA32_FRED_STKLVLS MSR. So unlike IDT, the FRED machine check entry
2194  * stub doesn't do stack switch.
2195  */
2196 DEFINE_FREDENTRY_MCE(exc_machine_check)
2197 {
2198         unsigned long dr7;
2199 
2200         dr7 = local_db_save();
2201         if (user_mode(regs))
2202                 exc_machine_check_user(regs);
2203         else
2204                 exc_machine_check_kernel(regs);
2205         local_db_restore(dr7);
2206 }
2207 #endif
2208 #else
2209 /* 32bit unified entry point */
2210 DEFINE_IDTENTRY_RAW(exc_machine_check)
2211 {
2212         unsigned long dr7;
2213 
2214         dr7 = local_db_save();
2215         if (user_mode(regs))
2216                 exc_machine_check_user(regs);
2217         else
2218                 exc_machine_check_kernel(regs);
2219         local_db_restore(dr7);
2220 }
2221 #endif
2222 
2223 /*
2224  * Called for each booted CPU to set up machine checks.
2225  * Must be called with preempt off:
2226  */
2227 void mcheck_cpu_init(struct cpuinfo_x86 *c)
2228 {
2229         if (mca_cfg.disabled)
2230                 return;
2231 
2232         if (__mcheck_cpu_ancient_init(c))
2233                 return;
2234 
2235         if (!mce_available(c))
2236                 return;
2237 
2238         __mcheck_cpu_cap_init();
2239 
2240         if (__mcheck_cpu_apply_quirks(c) < 0) {
2241                 mca_cfg.disabled = 1;
2242                 return;
2243         }
2244 
2245         if (mce_gen_pool_init()) {
2246                 mca_cfg.disabled = 1;
2247                 pr_emerg("Couldn't allocate MCE records pool!\n");
2248                 return;
2249         }
2250 
2251         mca_cfg.initialized = 1;
2252 
2253         __mcheck_cpu_init_early(c);
2254         __mcheck_cpu_init_generic();
2255         __mcheck_cpu_init_vendor(c);
2256         __mcheck_cpu_init_clear_banks();
2257         __mcheck_cpu_check_banks();
2258         __mcheck_cpu_setup_timer();
2259 }
2260 
2261 /*
2262  * Called for each booted CPU to clear some machine checks opt-ins
2263  */
2264 void mcheck_cpu_clear(struct cpuinfo_x86 *c)
2265 {
2266         if (mca_cfg.disabled)
2267                 return;
2268 
2269         if (!mce_available(c))
2270                 return;
2271 
2272         /*
2273          * Possibly to clear general settings generic to x86
2274          * __mcheck_cpu_clear_generic(c);
2275          */
2276         __mcheck_cpu_clear_vendor(c);
2277 
2278 }
2279 
2280 static void __mce_disable_bank(void *arg)
2281 {
2282         int bank = *((int *)arg);
2283         __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
2284         cmci_disable_bank(bank);
2285 }
2286 
2287 void mce_disable_bank(int bank)
2288 {
2289         if (bank >= this_cpu_read(mce_num_banks)) {
2290                 pr_warn(FW_BUG
2291                         "Ignoring request to disable invalid MCA bank %d.\n",
2292                         bank);
2293                 return;
2294         }
2295         set_bit(bank, mce_banks_ce_disabled);
2296         on_each_cpu(__mce_disable_bank, &bank, 1);
2297 }
2298 
2299 /*
2300  * mce=off Disables machine check
2301  * mce=no_cmci Disables CMCI
2302  * mce=no_lmce Disables LMCE
2303  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
2304  * mce=print_all Print all machine check logs to console
2305  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
2306  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
2307  *      monarchtimeout is how long to wait for other CPUs on machine
2308  *      check, or 0 to not wait
2309  * mce=bootlog Log MCEs from before booting. Disabled by default on AMD Fam10h
2310         and older.
2311  * mce=nobootlog Don't log MCEs from before booting.
2312  * mce=bios_cmci_threshold Don't program the CMCI threshold
2313  * mce=recovery force enable copy_mc_fragile()
2314  */
2315 static int __init mcheck_enable(char *str)
2316 {
2317         struct mca_config *cfg = &mca_cfg;
2318 
2319         if (*str == 0) {
2320                 enable_p5_mce();
2321                 return 1;
2322         }
2323         if (*str == '=')
2324                 str++;
2325         if (!strcmp(str, "off"))
2326                 cfg->disabled = 1;
2327         else if (!strcmp(str, "no_cmci"))
2328                 cfg->cmci_disabled = true;
2329         else if (!strcmp(str, "no_lmce"))
2330                 cfg->lmce_disabled = 1;
2331         else if (!strcmp(str, "dont_log_ce"))
2332                 cfg->dont_log_ce = true;
2333         else if (!strcmp(str, "print_all"))
2334                 cfg->print_all = true;
2335         else if (!strcmp(str, "ignore_ce"))
2336                 cfg->ignore_ce = true;
2337         else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog"))
2338                 cfg->bootlog = (str[0] == 'b');
2339         else if (!strcmp(str, "bios_cmci_threshold"))
2340                 cfg->bios_cmci_threshold = 1;
2341         else if (!strcmp(str, "recovery"))
2342                 cfg->recovery = 1;
2343         else if (isdigit(str[0]))
2344                 get_option(&str, &(cfg->monarch_timeout));
2345         else {
2346                 pr_info("mce argument %s ignored. Please use /sys\n", str);
2347                 return 0;
2348         }
2349         return 1;
2350 }
2351 __setup("mce", mcheck_enable);
2352 
2353 int __init mcheck_init(void)
2354 {
2355         mce_register_decode_chain(&early_nb);
2356         mce_register_decode_chain(&mce_uc_nb);
2357         mce_register_decode_chain(&mce_default_nb);
2358 
2359         INIT_WORK(&mce_work, mce_gen_pool_process);
2360         init_irq_work(&mce_irq_work, mce_irq_work_cb);
2361 
2362         return 0;
2363 }
2364 
2365 /*
2366  * mce_syscore: PM support
2367  */
2368 
2369 /*
2370  * Disable machine checks on suspend and shutdown. We can't really handle
2371  * them later.
2372  */
2373 static void mce_disable_error_reporting(void)
2374 {
2375         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2376         int i;
2377 
2378         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2379                 struct mce_bank *b = &mce_banks[i];
2380 
2381                 if (b->init)
2382                         wrmsrl(mca_msr_reg(i, MCA_CTL), 0);
2383         }
2384         return;
2385 }
2386 
2387 static void vendor_disable_error_reporting(void)
2388 {
2389         /*
2390          * Don't clear on Intel or AMD or Hygon or Zhaoxin CPUs. Some of these
2391          * MSRs are socket-wide. Disabling them for just a single offlined CPU
2392          * is bad, since it will inhibit reporting for all shared resources on
2393          * the socket like the last level cache (LLC), the integrated memory
2394          * controller (iMC), etc.
2395          */
2396         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
2397             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ||
2398             boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
2399             boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)
2400                 return;
2401 
2402         mce_disable_error_reporting();
2403 }
2404 
2405 static int mce_syscore_suspend(void)
2406 {
2407         vendor_disable_error_reporting();
2408         return 0;
2409 }
2410 
2411 static void mce_syscore_shutdown(void)
2412 {
2413         vendor_disable_error_reporting();
2414 }
2415 
2416 /*
2417  * On resume clear all MCE state. Don't want to see leftovers from the BIOS.
2418  * Only one CPU is active at this time, the others get re-added later using
2419  * CPU hotplug:
2420  */
2421 static void mce_syscore_resume(void)
2422 {
2423         __mcheck_cpu_init_generic();
2424         __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
2425         __mcheck_cpu_init_clear_banks();
2426 }
2427 
2428 static struct syscore_ops mce_syscore_ops = {
2429         .suspend        = mce_syscore_suspend,
2430         .shutdown       = mce_syscore_shutdown,
2431         .resume         = mce_syscore_resume,
2432 };
2433 
2434 /*
2435  * mce_device: Sysfs support
2436  */
2437 
2438 static void mce_cpu_restart(void *data)
2439 {
2440         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2441                 return;
2442         __mcheck_cpu_init_generic();
2443         __mcheck_cpu_init_clear_banks();
2444         __mcheck_cpu_init_timer();
2445 }
2446 
2447 /* Reinit MCEs after user configuration changes */
2448 static void mce_restart(void)
2449 {
2450         mce_timer_delete_all();
2451         on_each_cpu(mce_cpu_restart, NULL, 1);
2452         mce_schedule_work();
2453 }
2454 
2455 /* Toggle features for corrected errors */
2456 static void mce_disable_cmci(void *data)
2457 {
2458         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2459                 return;
2460         cmci_clear();
2461 }
2462 
2463 static void mce_enable_ce(void *all)
2464 {
2465         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2466                 return;
2467         cmci_reenable();
2468         cmci_recheck();
2469         if (all)
2470                 __mcheck_cpu_init_timer();
2471 }
2472 
2473 static const struct bus_type mce_subsys = {
2474         .name           = "machinecheck",
2475         .dev_name       = "machinecheck",
2476 };
2477 
2478 DEFINE_PER_CPU(struct device *, mce_device);
2479 
2480 static inline struct mce_bank_dev *attr_to_bank(struct device_attribute *attr)
2481 {
2482         return container_of(attr, struct mce_bank_dev, attr);
2483 }
2484 
2485 static ssize_t show_bank(struct device *s, struct device_attribute *attr,
2486                          char *buf)
2487 {
2488         u8 bank = attr_to_bank(attr)->bank;
2489         struct mce_bank *b;
2490 
2491         if (bank >= per_cpu(mce_num_banks, s->id))
2492                 return -EINVAL;
2493 
2494         b = &per_cpu(mce_banks_array, s->id)[bank];
2495 
2496         if (!b->init)
2497                 return -ENODEV;
2498 
2499         return sprintf(buf, "%llx\n", b->ctl);
2500 }
2501 
2502 static ssize_t set_bank(struct device *s, struct device_attribute *attr,
2503                         const char *buf, size_t size)
2504 {
2505         u8 bank = attr_to_bank(attr)->bank;
2506         struct mce_bank *b;
2507         u64 new;
2508 
2509         if (kstrtou64(buf, 0, &new) < 0)
2510                 return -EINVAL;
2511 
2512         if (bank >= per_cpu(mce_num_banks, s->id))
2513                 return -EINVAL;
2514 
2515         b = &per_cpu(mce_banks_array, s->id)[bank];
2516         if (!b->init)
2517                 return -ENODEV;
2518 
2519         b->ctl = new;
2520 
2521         mutex_lock(&mce_sysfs_mutex);
2522         mce_restart();
2523         mutex_unlock(&mce_sysfs_mutex);
2524 
2525         return size;
2526 }
2527 
2528 static ssize_t set_ignore_ce(struct device *s,
2529                              struct device_attribute *attr,
2530                              const char *buf, size_t size)
2531 {
2532         u64 new;
2533 
2534         if (kstrtou64(buf, 0, &new) < 0)
2535                 return -EINVAL;
2536 
2537         mutex_lock(&mce_sysfs_mutex);
2538         if (mca_cfg.ignore_ce ^ !!new) {
2539                 if (new) {
2540                         /* disable ce features */
2541                         mce_timer_delete_all();
2542                         on_each_cpu(mce_disable_cmci, NULL, 1);
2543                         mca_cfg.ignore_ce = true;
2544                 } else {
2545                         /* enable ce features */
2546                         mca_cfg.ignore_ce = false;
2547                         on_each_cpu(mce_enable_ce, (void *)1, 1);
2548                 }
2549         }
2550         mutex_unlock(&mce_sysfs_mutex);
2551 
2552         return size;
2553 }
2554 
2555 static ssize_t set_cmci_disabled(struct device *s,
2556                                  struct device_attribute *attr,
2557                                  const char *buf, size_t size)
2558 {
2559         u64 new;
2560 
2561         if (kstrtou64(buf, 0, &new) < 0)
2562                 return -EINVAL;
2563 
2564         mutex_lock(&mce_sysfs_mutex);
2565         if (mca_cfg.cmci_disabled ^ !!new) {
2566                 if (new) {
2567                         /* disable cmci */
2568                         on_each_cpu(mce_disable_cmci, NULL, 1);
2569                         mca_cfg.cmci_disabled = true;
2570                 } else {
2571                         /* enable cmci */
2572                         mca_cfg.cmci_disabled = false;
2573                         on_each_cpu(mce_enable_ce, NULL, 1);
2574                 }
2575         }
2576         mutex_unlock(&mce_sysfs_mutex);
2577 
2578         return size;
2579 }
2580 
2581 static ssize_t store_int_with_restart(struct device *s,
2582                                       struct device_attribute *attr,
2583                                       const char *buf, size_t size)
2584 {
2585         unsigned long old_check_interval = check_interval;
2586         ssize_t ret = device_store_ulong(s, attr, buf, size);
2587 
2588         if (check_interval == old_check_interval)
2589                 return ret;
2590 
2591         mutex_lock(&mce_sysfs_mutex);
2592         mce_restart();
2593         mutex_unlock(&mce_sysfs_mutex);
2594 
2595         return ret;
2596 }
2597 
2598 static DEVICE_INT_ATTR(monarch_timeout, 0644, mca_cfg.monarch_timeout);
2599 static DEVICE_BOOL_ATTR(dont_log_ce, 0644, mca_cfg.dont_log_ce);
2600 static DEVICE_BOOL_ATTR(print_all, 0644, mca_cfg.print_all);
2601 
2602 static struct dev_ext_attribute dev_attr_check_interval = {
2603         __ATTR(check_interval, 0644, device_show_int, store_int_with_restart),
2604         &check_interval
2605 };
2606 
2607 static struct dev_ext_attribute dev_attr_ignore_ce = {
2608         __ATTR(ignore_ce, 0644, device_show_bool, set_ignore_ce),
2609         &mca_cfg.ignore_ce
2610 };
2611 
2612 static struct dev_ext_attribute dev_attr_cmci_disabled = {
2613         __ATTR(cmci_disabled, 0644, device_show_bool, set_cmci_disabled),
2614         &mca_cfg.cmci_disabled
2615 };
2616 
2617 static struct device_attribute *mce_device_attrs[] = {
2618         &dev_attr_check_interval.attr,
2619 #ifdef CONFIG_X86_MCELOG_LEGACY
2620         &dev_attr_trigger,
2621 #endif
2622         &dev_attr_monarch_timeout.attr,
2623         &dev_attr_dont_log_ce.attr,
2624         &dev_attr_print_all.attr,
2625         &dev_attr_ignore_ce.attr,
2626         &dev_attr_cmci_disabled.attr,
2627         NULL
2628 };
2629 
2630 static cpumask_var_t mce_device_initialized;
2631 
2632 static void mce_device_release(struct device *dev)
2633 {
2634         kfree(dev);
2635 }
2636 
2637 /* Per CPU device init. All of the CPUs still share the same bank device: */
2638 static int mce_device_create(unsigned int cpu)
2639 {
2640         struct device *dev;
2641         int err;
2642         int i, j;
2643 
2644         dev = per_cpu(mce_device, cpu);
2645         if (dev)
2646                 return 0;
2647 
2648         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2649         if (!dev)
2650                 return -ENOMEM;
2651         dev->id  = cpu;
2652         dev->bus = &mce_subsys;
2653         dev->release = &mce_device_release;
2654 
2655         err = device_register(dev);
2656         if (err) {
2657                 put_device(dev);
2658                 return err;
2659         }
2660 
2661         for (i = 0; mce_device_attrs[i]; i++) {
2662                 err = device_create_file(dev, mce_device_attrs[i]);
2663                 if (err)
2664                         goto error;
2665         }
2666         for (j = 0; j < per_cpu(mce_num_banks, cpu); j++) {
2667                 err = device_create_file(dev, &mce_bank_devs[j].attr);
2668                 if (err)
2669                         goto error2;
2670         }
2671         cpumask_set_cpu(cpu, mce_device_initialized);
2672         per_cpu(mce_device, cpu) = dev;
2673 
2674         return 0;
2675 error2:
2676         while (--j >= 0)
2677                 device_remove_file(dev, &mce_bank_devs[j].attr);
2678 error:
2679         while (--i >= 0)
2680                 device_remove_file(dev, mce_device_attrs[i]);
2681 
2682         device_unregister(dev);
2683 
2684         return err;
2685 }
2686 
2687 static void mce_device_remove(unsigned int cpu)
2688 {
2689         struct device *dev = per_cpu(mce_device, cpu);
2690         int i;
2691 
2692         if (!cpumask_test_cpu(cpu, mce_device_initialized))
2693                 return;
2694 
2695         for (i = 0; mce_device_attrs[i]; i++)
2696                 device_remove_file(dev, mce_device_attrs[i]);
2697 
2698         for (i = 0; i < per_cpu(mce_num_banks, cpu); i++)
2699                 device_remove_file(dev, &mce_bank_devs[i].attr);
2700 
2701         device_unregister(dev);
2702         cpumask_clear_cpu(cpu, mce_device_initialized);
2703         per_cpu(mce_device, cpu) = NULL;
2704 }
2705 
2706 /* Make sure there are no machine checks on offlined CPUs. */
2707 static void mce_disable_cpu(void)
2708 {
2709         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2710                 return;
2711 
2712         if (!cpuhp_tasks_frozen)
2713                 cmci_clear();
2714 
2715         vendor_disable_error_reporting();
2716 }
2717 
2718 static void mce_reenable_cpu(void)
2719 {
2720         struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array);
2721         int i;
2722 
2723         if (!mce_available(raw_cpu_ptr(&cpu_info)))
2724                 return;
2725 
2726         if (!cpuhp_tasks_frozen)
2727                 cmci_reenable();
2728         for (i = 0; i < this_cpu_read(mce_num_banks); i++) {
2729                 struct mce_bank *b = &mce_banks[i];
2730 
2731                 if (b->init)
2732                         wrmsrl(mca_msr_reg(i, MCA_CTL), b->ctl);
2733         }
2734 }
2735 
2736 static int mce_cpu_dead(unsigned int cpu)
2737 {
2738         /* intentionally ignoring frozen here */
2739         if (!cpuhp_tasks_frozen)
2740                 cmci_rediscover();
2741         return 0;
2742 }
2743 
2744 static int mce_cpu_online(unsigned int cpu)
2745 {
2746         struct timer_list *t = this_cpu_ptr(&mce_timer);
2747         int ret;
2748 
2749         mce_device_create(cpu);
2750 
2751         ret = mce_threshold_create_device(cpu);
2752         if (ret) {
2753                 mce_device_remove(cpu);
2754                 return ret;
2755         }
2756         mce_reenable_cpu();
2757         mce_start_timer(t);
2758         return 0;
2759 }
2760 
2761 static int mce_cpu_pre_down(unsigned int cpu)
2762 {
2763         struct timer_list *t = this_cpu_ptr(&mce_timer);
2764 
2765         mce_disable_cpu();
2766         del_timer_sync(t);
2767         mce_threshold_remove_device(cpu);
2768         mce_device_remove(cpu);
2769         return 0;
2770 }
2771 
2772 static __init void mce_init_banks(void)
2773 {
2774         int i;
2775 
2776         for (i = 0; i < MAX_NR_BANKS; i++) {
2777                 struct mce_bank_dev *b = &mce_bank_devs[i];
2778                 struct device_attribute *a = &b->attr;
2779 
2780                 b->bank = i;
2781 
2782                 sysfs_attr_init(&a->attr);
2783                 a->attr.name    = b->attrname;
2784                 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
2785 
2786                 a->attr.mode    = 0644;
2787                 a->show         = show_bank;
2788                 a->store        = set_bank;
2789         }
2790 }
2791 
2792 /*
2793  * When running on XEN, this initcall is ordered against the XEN mcelog
2794  * initcall:
2795  *
2796  *   device_initcall(xen_late_init_mcelog);
2797  *   device_initcall_sync(mcheck_init_device);
2798  */
2799 static __init int mcheck_init_device(void)
2800 {
2801         int err;
2802 
2803         /*
2804          * Check if we have a spare virtual bit. This will only become
2805          * a problem if/when we move beyond 5-level page tables.
2806          */
2807         MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63);
2808 
2809         if (!mce_available(&boot_cpu_data)) {
2810                 err = -EIO;
2811                 goto err_out;
2812         }
2813 
2814         if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
2815                 err = -ENOMEM;
2816                 goto err_out;
2817         }
2818 
2819         mce_init_banks();
2820 
2821         err = subsys_system_register(&mce_subsys, NULL);
2822         if (err)
2823                 goto err_out_mem;
2824 
2825         err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
2826                                 mce_cpu_dead);
2827         if (err)
2828                 goto err_out_mem;
2829 
2830         /*
2831          * Invokes mce_cpu_online() on all CPUs which are online when
2832          * the state is installed.
2833          */
2834         err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
2835                                 mce_cpu_online, mce_cpu_pre_down);
2836         if (err < 0)
2837                 goto err_out_online;
2838 
2839         register_syscore_ops(&mce_syscore_ops);
2840 
2841         return 0;
2842 
2843 err_out_online:
2844         cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
2845 
2846 err_out_mem:
2847         free_cpumask_var(mce_device_initialized);
2848 
2849 err_out:
2850         pr_err("Unable to init MCE device (rc: %d)\n", err);
2851 
2852         return err;
2853 }
2854 device_initcall_sync(mcheck_init_device);
2855 
2856 /*
2857  * Old style boot options parsing. Only for compatibility.
2858  */
2859 static int __init mcheck_disable(char *str)
2860 {
2861         mca_cfg.disabled = 1;
2862         return 1;
2863 }
2864 __setup("nomce", mcheck_disable);
2865 
2866 #ifdef CONFIG_DEBUG_FS
2867 struct dentry *mce_get_debugfs_dir(void)
2868 {
2869         static struct dentry *dmce;
2870 
2871         if (!dmce)
2872                 dmce = debugfs_create_dir("mce", NULL);
2873 
2874         return dmce;
2875 }
2876 
2877 static void mce_reset(void)
2878 {
2879         atomic_set(&mce_fake_panicked, 0);
2880         atomic_set(&mce_executing, 0);
2881         atomic_set(&mce_callin, 0);
2882         atomic_set(&global_nwo, 0);
2883         cpumask_setall(&mce_missing_cpus);
2884 }
2885 
2886 static int fake_panic_get(void *data, u64 *val)
2887 {
2888         *val = fake_panic;
2889         return 0;
2890 }
2891 
2892 static int fake_panic_set(void *data, u64 val)
2893 {
2894         mce_reset();
2895         fake_panic = val;
2896         return 0;
2897 }
2898 
2899 DEFINE_DEBUGFS_ATTRIBUTE(fake_panic_fops, fake_panic_get, fake_panic_set,
2900                          "%llu\n");
2901 
2902 static void __init mcheck_debugfs_init(void)
2903 {
2904         struct dentry *dmce;
2905 
2906         dmce = mce_get_debugfs_dir();
2907         debugfs_create_file_unsafe("fake_panic", 0444, dmce, NULL,
2908                                    &fake_panic_fops);
2909 }
2910 #else
2911 static void __init mcheck_debugfs_init(void) { }
2912 #endif
2913 
2914 static int __init mcheck_late_init(void)
2915 {
2916         if (mca_cfg.recovery)
2917                 enable_copy_mc_fragile();
2918 
2919         mcheck_debugfs_init();
2920 
2921         /*
2922          * Flush out everything that has been logged during early boot, now that
2923          * everything has been initialized (workqueues, decoders, ...).
2924          */
2925         mce_schedule_work();
2926 
2927         return 0;
2928 }
2929 late_initcall(mcheck_late_init);
2930 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php