~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/preempt.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __ASM_PREEMPT_H
  3 #define __ASM_PREEMPT_H
  4 
  5 #include <asm/rmwcc.h>
  6 #include <asm/percpu.h>
  7 #include <asm/current.h>
  8 
  9 #include <linux/static_call_types.h>
 10 
 11 /* We use the MSB mostly because its available */
 12 #define PREEMPT_NEED_RESCHED    0x80000000
 13 
 14 /*
 15  * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
 16  * that a decrement hitting 0 means we can and should reschedule.
 17  */
 18 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
 19 
 20 /*
 21  * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
 22  * that think a non-zero value indicates we cannot preempt.
 23  */
 24 static __always_inline int preempt_count(void)
 25 {
 26         return raw_cpu_read_4(pcpu_hot.preempt_count) & ~PREEMPT_NEED_RESCHED;
 27 }
 28 
 29 static __always_inline void preempt_count_set(int pc)
 30 {
 31         int old, new;
 32 
 33         old = raw_cpu_read_4(pcpu_hot.preempt_count);
 34         do {
 35                 new = (old & PREEMPT_NEED_RESCHED) |
 36                         (pc & ~PREEMPT_NEED_RESCHED);
 37         } while (!raw_cpu_try_cmpxchg_4(pcpu_hot.preempt_count, &old, new));
 38 }
 39 
 40 /*
 41  * must be macros to avoid header recursion hell
 42  */
 43 #define init_task_preempt_count(p) do { } while (0)
 44 
 45 #define init_idle_preempt_count(p, cpu) do { \
 46         per_cpu(pcpu_hot.preempt_count, (cpu)) = PREEMPT_DISABLED; \
 47 } while (0)
 48 
 49 /*
 50  * We fold the NEED_RESCHED bit into the preempt count such that
 51  * preempt_enable() can decrement and test for needing to reschedule with a
 52  * single instruction.
 53  *
 54  * We invert the actual bit, so that when the decrement hits 0 we know we both
 55  * need to resched (the bit is cleared) and can resched (no preempt count).
 56  */
 57 
 58 static __always_inline void set_preempt_need_resched(void)
 59 {
 60         raw_cpu_and_4(pcpu_hot.preempt_count, ~PREEMPT_NEED_RESCHED);
 61 }
 62 
 63 static __always_inline void clear_preempt_need_resched(void)
 64 {
 65         raw_cpu_or_4(pcpu_hot.preempt_count, PREEMPT_NEED_RESCHED);
 66 }
 67 
 68 static __always_inline bool test_preempt_need_resched(void)
 69 {
 70         return !(raw_cpu_read_4(pcpu_hot.preempt_count) & PREEMPT_NEED_RESCHED);
 71 }
 72 
 73 /*
 74  * The various preempt_count add/sub methods
 75  */
 76 
 77 static __always_inline void __preempt_count_add(int val)
 78 {
 79         raw_cpu_add_4(pcpu_hot.preempt_count, val);
 80 }
 81 
 82 static __always_inline void __preempt_count_sub(int val)
 83 {
 84         raw_cpu_add_4(pcpu_hot.preempt_count, -val);
 85 }
 86 
 87 /*
 88  * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
 89  * a decrement which hits zero means we have no preempt_count and should
 90  * reschedule.
 91  */
 92 static __always_inline bool __preempt_count_dec_and_test(void)
 93 {
 94         return GEN_UNARY_RMWcc("decl", __my_cpu_var(pcpu_hot.preempt_count), e,
 95                                __percpu_arg([var]));
 96 }
 97 
 98 /*
 99  * Returns true when we need to resched and can (barring IRQ state).
100  */
101 static __always_inline bool should_resched(int preempt_offset)
102 {
103         return unlikely(raw_cpu_read_4(pcpu_hot.preempt_count) == preempt_offset);
104 }
105 
106 #ifdef CONFIG_PREEMPTION
107 
108 extern asmlinkage void preempt_schedule(void);
109 extern asmlinkage void preempt_schedule_thunk(void);
110 
111 #define preempt_schedule_dynamic_enabled        preempt_schedule_thunk
112 #define preempt_schedule_dynamic_disabled       NULL
113 
114 extern asmlinkage void preempt_schedule_notrace(void);
115 extern asmlinkage void preempt_schedule_notrace_thunk(void);
116 
117 #define preempt_schedule_notrace_dynamic_enabled        preempt_schedule_notrace_thunk
118 #define preempt_schedule_notrace_dynamic_disabled       NULL
119 
120 #ifdef CONFIG_PREEMPT_DYNAMIC
121 
122 DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
123 
124 #define __preempt_schedule() \
125 do { \
126         __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule); \
127         asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
128 } while (0)
129 
130 DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
131 
132 #define __preempt_schedule_notrace() \
133 do { \
134         __STATIC_CALL_MOD_ADDRESSABLE(preempt_schedule_notrace); \
135         asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : ASM_CALL_CONSTRAINT); \
136 } while (0)
137 
138 #else /* PREEMPT_DYNAMIC */
139 
140 #define __preempt_schedule() \
141         asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT);
142 
143 #define __preempt_schedule_notrace() \
144         asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT);
145 
146 #endif /* PREEMPT_DYNAMIC */
147 
148 #endif /* PREEMPTION */
149 
150 #endif /* __ASM_PREEMPT_H */
151 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php