~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/nmi.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/nmi.h (Version linux-6.12-rc7) and /include/linux/nmi.h (Version linux-6.2.16)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*                                                  2 /*
  3  *  linux/include/linux/nmi.h                       3  *  linux/include/linux/nmi.h
  4  */                                                 4  */
  5 #ifndef LINUX_NMI_H                                 5 #ifndef LINUX_NMI_H
  6 #define LINUX_NMI_H                                 6 #define LINUX_NMI_H
  7                                                     7 
  8 #include <linux/sched.h>                            8 #include <linux/sched.h>
  9 #include <asm/irq.h>                                9 #include <asm/irq.h>
 10                                                !!  10 #if defined(CONFIG_HAVE_NMI_WATCHDOG)
 11 /* Arch specific watchdogs might need to share << 
 12 #if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) | << 
 13 #include <asm/nmi.h>                               11 #include <asm/nmi.h>
 14 #endif                                             12 #endif
 15                                                    13 
 16 #ifdef CONFIG_LOCKUP_DETECTOR                      14 #ifdef CONFIG_LOCKUP_DETECTOR
 17 void lockup_detector_init(void);                   15 void lockup_detector_init(void);
 18 void lockup_detector_retry_init(void);         << 
 19 void lockup_detector_soft_poweroff(void);          16 void lockup_detector_soft_poweroff(void);
 20 void lockup_detector_cleanup(void);                17 void lockup_detector_cleanup(void);
                                                   >>  18 bool is_hardlockup(void);
 21                                                    19 
 22 extern int watchdog_user_enabled;                  20 extern int watchdog_user_enabled;
                                                   >>  21 extern int nmi_watchdog_user_enabled;
                                                   >>  22 extern int soft_watchdog_user_enabled;
 23 extern int watchdog_thresh;                        23 extern int watchdog_thresh;
 24 extern unsigned long watchdog_enabled;             24 extern unsigned long watchdog_enabled;
 25                                                    25 
 26 extern struct cpumask watchdog_cpumask;            26 extern struct cpumask watchdog_cpumask;
 27 extern unsigned long *watchdog_cpumask_bits;       27 extern unsigned long *watchdog_cpumask_bits;
 28 #ifdef CONFIG_SMP                                  28 #ifdef CONFIG_SMP
 29 extern int sysctl_softlockup_all_cpu_backtrace     29 extern int sysctl_softlockup_all_cpu_backtrace;
 30 extern int sysctl_hardlockup_all_cpu_backtrace     30 extern int sysctl_hardlockup_all_cpu_backtrace;
 31 #else                                              31 #else
 32 #define sysctl_softlockup_all_cpu_backtrace 0      32 #define sysctl_softlockup_all_cpu_backtrace 0
 33 #define sysctl_hardlockup_all_cpu_backtrace 0      33 #define sysctl_hardlockup_all_cpu_backtrace 0
 34 #endif /* !CONFIG_SMP */                           34 #endif /* !CONFIG_SMP */
 35                                                    35 
 36 #else /* CONFIG_LOCKUP_DETECTOR */                 36 #else /* CONFIG_LOCKUP_DETECTOR */
 37 static inline void lockup_detector_init(void)      37 static inline void lockup_detector_init(void) { }
 38 static inline void lockup_detector_retry_init( << 
 39 static inline void lockup_detector_soft_powero     38 static inline void lockup_detector_soft_poweroff(void) { }
 40 static inline void lockup_detector_cleanup(voi     39 static inline void lockup_detector_cleanup(void) { }
 41 #endif /* !CONFIG_LOCKUP_DETECTOR */               40 #endif /* !CONFIG_LOCKUP_DETECTOR */
 42                                                    41 
 43 #ifdef CONFIG_SOFTLOCKUP_DETECTOR                  42 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
 44 extern void touch_softlockup_watchdog_sched(vo     43 extern void touch_softlockup_watchdog_sched(void);
 45 extern void touch_softlockup_watchdog(void);       44 extern void touch_softlockup_watchdog(void);
 46 extern void touch_softlockup_watchdog_sync(voi     45 extern void touch_softlockup_watchdog_sync(void);
 47 extern void touch_all_softlockup_watchdogs(voi     46 extern void touch_all_softlockup_watchdogs(void);
 48 extern unsigned int  softlockup_panic;             47 extern unsigned int  softlockup_panic;
 49                                                    48 
 50 extern int lockup_detector_online_cpu(unsigned     49 extern int lockup_detector_online_cpu(unsigned int cpu);
 51 extern int lockup_detector_offline_cpu(unsigne     50 extern int lockup_detector_offline_cpu(unsigned int cpu);
 52 #else /* CONFIG_SOFTLOCKUP_DETECTOR */             51 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
 53 static inline void touch_softlockup_watchdog_s     52 static inline void touch_softlockup_watchdog_sched(void) { }
 54 static inline void touch_softlockup_watchdog(v     53 static inline void touch_softlockup_watchdog(void) { }
 55 static inline void touch_softlockup_watchdog_s     54 static inline void touch_softlockup_watchdog_sync(void) { }
 56 static inline void touch_all_softlockup_watchd     55 static inline void touch_all_softlockup_watchdogs(void) { }
 57                                                    56 
 58 #define lockup_detector_online_cpu      NULL       57 #define lockup_detector_online_cpu      NULL
 59 #define lockup_detector_offline_cpu     NULL       58 #define lockup_detector_offline_cpu     NULL
 60 #endif /* CONFIG_SOFTLOCKUP_DETECTOR */            59 #endif /* CONFIG_SOFTLOCKUP_DETECTOR */
 61                                                    60 
 62 #ifdef CONFIG_DETECT_HUNG_TASK                     61 #ifdef CONFIG_DETECT_HUNG_TASK
 63 void reset_hung_task_detector(void);               62 void reset_hung_task_detector(void);
 64 #else                                              63 #else
 65 static inline void reset_hung_task_detector(vo     64 static inline void reset_hung_task_detector(void) { }
 66 #endif                                             65 #endif
 67                                                    66 
 68 /*                                                 67 /*
 69  * The run state of the lockup detectors is co     68  * The run state of the lockup detectors is controlled by the content of the
 70  * 'watchdog_enabled' variable. Each lockup de     69  * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
 71  * bit 0 for the hard lockup detector and bit      70  * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
 72  *                                                 71  *
 73  * 'watchdog_user_enabled', 'watchdog_hardlock !!  72  * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and
 74  * 'watchdog_softlockup_user_enabled' are vari !!  73  * 'soft_watchdog_user_enabled' are variables that are only used as an
 75  * 'interface' between the parameters in /proc     74  * 'interface' between the parameters in /proc/sys/kernel and the internal
 76  * state bits in 'watchdog_enabled'. The 'watc     75  * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
 77  * handled differently because its value is no     76  * handled differently because its value is not boolean, and the lockup
 78  * detectors are 'suspended' while 'watchdog_t     77  * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
 79  */                                                78  */
 80 #define WATCHDOG_HARDLOCKUP_ENABLED_BIT  0     !!  79 #define NMI_WATCHDOG_ENABLED_BIT   0
 81 #define WATCHDOG_SOFTOCKUP_ENABLED_BIT   1     !!  80 #define SOFT_WATCHDOG_ENABLED_BIT  1
 82 #define WATCHDOG_HARDLOCKUP_ENABLED     (1 <<  !!  81 #define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
 83 #define WATCHDOG_SOFTOCKUP_ENABLED      (1 <<  !!  82 #define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
 84                                                    83 
 85 #if defined(CONFIG_HARDLOCKUP_DETECTOR)            84 #if defined(CONFIG_HARDLOCKUP_DETECTOR)
 86 extern void hardlockup_detector_disable(void);     85 extern void hardlockup_detector_disable(void);
 87 extern unsigned int hardlockup_panic;              86 extern unsigned int hardlockup_panic;
 88 #else                                              87 #else
 89 static inline void hardlockup_detector_disable     88 static inline void hardlockup_detector_disable(void) {}
 90 #endif                                             89 #endif
 91                                                    90 
 92 /* Sparc64 has special implemetantion that is  !!  91 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 93 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || def !!  92 # define NMI_WATCHDOG_SYSCTL_PERM       0644
 94 void arch_touch_nmi_watchdog(void);            << 
 95 #else                                              93 #else
 96 static inline void arch_touch_nmi_watchdog(voi !!  94 # define NMI_WATCHDOG_SYSCTL_PERM       0444
 97 #endif                                         << 
 98                                                << 
 99 #if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_ << 
100 void watchdog_hardlockup_touch_cpu(unsigned in << 
101 void watchdog_hardlockup_check(unsigned int cp << 
102 #endif                                             95 #endif
103                                                    96 
104 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)       97 #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
                                                   >>  98 extern void arch_touch_nmi_watchdog(void);
105 extern void hardlockup_detector_perf_stop(void     99 extern void hardlockup_detector_perf_stop(void);
106 extern void hardlockup_detector_perf_restart(v    100 extern void hardlockup_detector_perf_restart(void);
                                                   >> 101 extern void hardlockup_detector_perf_disable(void);
                                                   >> 102 extern void hardlockup_detector_perf_enable(void);
107 extern void hardlockup_detector_perf_cleanup(v    103 extern void hardlockup_detector_perf_cleanup(void);
108 extern void hardlockup_config_perf_event(const !! 104 extern int hardlockup_detector_perf_init(void);
109 #else                                             105 #else
110 static inline void hardlockup_detector_perf_st    106 static inline void hardlockup_detector_perf_stop(void) { }
111 static inline void hardlockup_detector_perf_re    107 static inline void hardlockup_detector_perf_restart(void) { }
                                                   >> 108 static inline void hardlockup_detector_perf_disable(void) { }
                                                   >> 109 static inline void hardlockup_detector_perf_enable(void) { }
112 static inline void hardlockup_detector_perf_cl    110 static inline void hardlockup_detector_perf_cleanup(void) { }
113 static inline void hardlockup_config_perf_even !! 111 # if !defined(CONFIG_HAVE_NMI_WATCHDOG)
                                                   >> 112 static inline int hardlockup_detector_perf_init(void) { return -ENODEV; }
                                                   >> 113 static inline void arch_touch_nmi_watchdog(void) {}
                                                   >> 114 # else
                                                   >> 115 static inline int hardlockup_detector_perf_init(void) { return 0; }
                                                   >> 116 # endif
114 #endif                                            117 #endif
115                                                   118 
116 void watchdog_hardlockup_stop(void);           !! 119 void watchdog_nmi_stop(void);
117 void watchdog_hardlockup_start(void);          !! 120 void watchdog_nmi_start(void);
118 int watchdog_hardlockup_probe(void);           !! 121 int watchdog_nmi_probe(void);
119 void watchdog_hardlockup_enable(unsigned int c !! 122 int watchdog_nmi_enable(unsigned int cpu);
120 void watchdog_hardlockup_disable(unsigned int  !! 123 void watchdog_nmi_disable(unsigned int cpu);
121                                                   124 
122 void lockup_detector_reconfigure(void);           125 void lockup_detector_reconfigure(void);
123                                                   126 
124 #ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY        << 
125 void watchdog_buddy_check_hardlockup(int hrtim << 
126 #else                                          << 
127 static inline void watchdog_buddy_check_hardlo << 
128 #endif                                         << 
129                                                << 
130 /**                                               127 /**
131  * touch_nmi_watchdog - manually reset the har !! 128  * touch_nmi_watchdog - restart NMI watchdog timeout.
132  *                                                129  *
133  * If we support detecting hardlockups, touch_ !! 130  * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
134  * used to pet the watchdog (reset the timeout !! 131  * may be used to reset the timeout - for code which intentionally
135  * intentionally disables interrupts for a lon !! 132  * disables interrupts for a long time. This call is stateless.
136  *                                             << 
137  * Though this function has "nmi" in the name, << 
138  * not be backed by NMIs. This function will l << 
139  * touch_hardlockup_watchdog() in the future.  << 
140  */                                               133  */
141 static inline void touch_nmi_watchdog(void)       134 static inline void touch_nmi_watchdog(void)
142 {                                                 135 {
143         /*                                     << 
144          * Pass on to the hardlockup detector  << 
145          * the hardlockup detector may not be  << 
146          * and the arch_touch_nmi_watchdog() f << 
147          * in the future.                      << 
148          */                                    << 
149         arch_touch_nmi_watchdog();                136         arch_touch_nmi_watchdog();
150                                                << 
151         touch_softlockup_watchdog();              137         touch_softlockup_watchdog();
152 }                                                 138 }
153                                                   139 
154 /*                                                140 /*
155  * Create trigger_all_cpu_backtrace() out of t    141  * Create trigger_all_cpu_backtrace() out of the arch-provided
156  * base function. Return whether such support     142  * base function. Return whether such support was available,
157  * to allow calling code to fall back to some     143  * to allow calling code to fall back to some other mechanism:
158  */                                               144  */
159 #ifdef arch_trigger_cpumask_backtrace             145 #ifdef arch_trigger_cpumask_backtrace
160 static inline bool trigger_all_cpu_backtrace(v    146 static inline bool trigger_all_cpu_backtrace(void)
161 {                                                 147 {
162         arch_trigger_cpumask_backtrace(cpu_onl !! 148         arch_trigger_cpumask_backtrace(cpu_online_mask, false);
163         return true;                              149         return true;
164 }                                                 150 }
165                                                   151 
166 static inline bool trigger_allbutcpu_cpu_backt !! 152 static inline bool trigger_allbutself_cpu_backtrace(void)
167 {                                                 153 {
168         arch_trigger_cpumask_backtrace(cpu_onl !! 154         arch_trigger_cpumask_backtrace(cpu_online_mask, true);
169         return true;                              155         return true;
170 }                                                 156 }
171                                                   157 
172 static inline bool trigger_cpumask_backtrace(s    158 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
173 {                                                 159 {
174         arch_trigger_cpumask_backtrace(mask, - !! 160         arch_trigger_cpumask_backtrace(mask, false);
175         return true;                              161         return true;
176 }                                                 162 }
177                                                   163 
178 static inline bool trigger_single_cpu_backtrac    164 static inline bool trigger_single_cpu_backtrace(int cpu)
179 {                                                 165 {
180         arch_trigger_cpumask_backtrace(cpumask !! 166         arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
181         return true;                              167         return true;
182 }                                                 168 }
183                                                   169 
184 /* generic implementation */                      170 /* generic implementation */
185 void nmi_trigger_cpumask_backtrace(const cpuma    171 void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
186                                    int exclude !! 172                                    bool exclude_self,
187                                    void (*rais    173                                    void (*raise)(cpumask_t *mask));
188 bool nmi_cpu_backtrace(struct pt_regs *regs);     174 bool nmi_cpu_backtrace(struct pt_regs *regs);
189                                                   175 
190 #else                                             176 #else
191 static inline bool trigger_all_cpu_backtrace(v    177 static inline bool trigger_all_cpu_backtrace(void)
192 {                                                 178 {
193         return false;                             179         return false;
194 }                                                 180 }
195 static inline bool trigger_allbutcpu_cpu_backt !! 181 static inline bool trigger_allbutself_cpu_backtrace(void)
196 {                                                 182 {
197         return false;                             183         return false;
198 }                                                 184 }
199 static inline bool trigger_cpumask_backtrace(s    185 static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
200 {                                                 186 {
201         return false;                             187         return false;
202 }                                                 188 }
203 static inline bool trigger_single_cpu_backtrac    189 static inline bool trigger_single_cpu_backtrace(int cpu)
204 {                                                 190 {
205         return false;                             191         return false;
206 }                                                 192 }
207 #endif                                            193 #endif
208                                                   194 
209 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF            195 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
210 u64 hw_nmi_get_sample_period(int watchdog_thre    196 u64 hw_nmi_get_sample_period(int watchdog_thresh);
211 bool arch_perf_nmi_is_available(void);         << 
212 #endif                                            197 #endif
213                                                   198 
214 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP)    199 #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
215     defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)   !! 200     defined(CONFIG_HARDLOCKUP_DETECTOR)
216 void watchdog_update_hrtimer_threshold(u64 per    201 void watchdog_update_hrtimer_threshold(u64 period);
217 #else                                             202 #else
218 static inline void watchdog_update_hrtimer_thr    203 static inline void watchdog_update_hrtimer_threshold(u64 period) { }
219 #endif                                            204 #endif
220                                                   205 
                                                   >> 206 struct ctl_table;
                                                   >> 207 int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
                                                   >> 208 int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
                                                   >> 209 int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
                                                   >> 210 int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
                                                   >> 211 int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
                                                   >> 212 
221 #ifdef CONFIG_HAVE_ACPI_APEI_NMI                  213 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
222 #include <asm/nmi.h>                              214 #include <asm/nmi.h>
223 #endif                                         << 
224                                                << 
225 #ifdef CONFIG_NMI_CHECK_CPU                    << 
226 void nmi_backtrace_stall_snap(const struct cpu << 
227 void nmi_backtrace_stall_check(const struct cp << 
228 #else                                          << 
229 static inline void nmi_backtrace_stall_snap(co << 
230 static inline void nmi_backtrace_stall_check(c << 
231 #endif                                            215 #endif
232                                                   216 
233 #endif                                            217 #endif
234                                                   218 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php