1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_IDLE_H 2 #ifndef _LINUX_SCHED_IDLE_H 3 #define _LINUX_SCHED_IDLE_H 3 #define _LINUX_SCHED_IDLE_H 4 4 5 #include <linux/sched.h> 5 #include <linux/sched.h> 6 6 7 enum cpu_idle_type { 7 enum cpu_idle_type { 8 __CPU_NOT_IDLE = 0, 8 __CPU_NOT_IDLE = 0, 9 CPU_IDLE, 9 CPU_IDLE, 10 CPU_NEWLY_IDLE, 10 CPU_NEWLY_IDLE, 11 CPU_MAX_IDLE_TYPES 11 CPU_MAX_IDLE_TYPES 12 }; 12 }; 13 13 14 #ifdef CONFIG_SMP 14 #ifdef CONFIG_SMP 15 extern void wake_up_if_idle(int cpu); 15 extern void wake_up_if_idle(int cpu); 16 #else 16 #else 17 static inline void wake_up_if_idle(int cpu) { 17 static inline void wake_up_if_idle(int cpu) { } 18 #endif 18 #endif 19 19 20 /* 20 /* 21 * Idle thread specific functions to determine 21 * Idle thread specific functions to determine the need_resched 22 * polling state. 22 * polling state. 23 */ 23 */ 24 #ifdef TIF_POLLING_NRFLAG 24 #ifdef TIF_POLLING_NRFLAG 25 25 26 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC 26 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H 27 27 28 static __always_inline void __current_set_poll 28 static __always_inline void __current_set_polling(void) 29 { 29 { 30 arch_set_bit(TIF_POLLING_NRFLAG, 30 arch_set_bit(TIF_POLLING_NRFLAG, 31 (unsigned long *)(¤ 31 (unsigned long *)(¤t_thread_info()->flags)); 32 } 32 } 33 33 34 static __always_inline void __current_clr_poll 34 static __always_inline void __current_clr_polling(void) 35 { 35 { 36 arch_clear_bit(TIF_POLLING_NRFLAG, 36 arch_clear_bit(TIF_POLLING_NRFLAG, 37 (unsigned long *)(&curr 37 (unsigned long *)(¤t_thread_info()->flags)); 38 } 38 } 39 39 40 #else 40 #else 41 41 42 static __always_inline void __current_set_poll 42 static __always_inline void __current_set_polling(void) 43 { 43 { 44 set_bit(TIF_POLLING_NRFLAG, 44 set_bit(TIF_POLLING_NRFLAG, 45 (unsigned long *)(¤t_thr 45 (unsigned long *)(¤t_thread_info()->flags)); 46 } 46 } 47 47 48 static __always_inline void __current_clr_poll 48 static __always_inline void __current_clr_polling(void) 49 { 49 { 50 clear_bit(TIF_POLLING_NRFLAG, 50 clear_bit(TIF_POLLING_NRFLAG, 51 (unsigned long *)(¤t_t 51 (unsigned long *)(¤t_thread_info()->flags)); 52 } 52 } 53 53 54 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATO 54 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */ 55 55 56 static __always_inline bool __must_check curre 56 static __always_inline bool __must_check current_set_polling_and_test(void) 57 { 57 { 58 __current_set_polling(); 58 __current_set_polling(); 59 59 60 /* 60 /* 61 * Polling state must be visible befor 61 * Polling state must be visible before we test NEED_RESCHED, 62 * paired by resched_curr() 62 * paired by resched_curr() 63 */ 63 */ 64 smp_mb__after_atomic(); 64 smp_mb__after_atomic(); 65 65 66 return unlikely(tif_need_resched()); 66 return unlikely(tif_need_resched()); 67 } 67 } 68 68 69 static __always_inline bool __must_check curre 69 static __always_inline bool __must_check current_clr_polling_and_test(void) 70 { 70 { 71 __current_clr_polling(); 71 __current_clr_polling(); 72 72 73 /* 73 /* 74 * Polling state must be visible befor 74 * Polling state must be visible before we test NEED_RESCHED, 75 * paired by resched_curr() 75 * paired by resched_curr() 76 */ 76 */ 77 smp_mb__after_atomic(); 77 smp_mb__after_atomic(); 78 78 79 return unlikely(tif_need_resched()); 79 return unlikely(tif_need_resched()); 80 } 80 } 81 81 82 #else 82 #else 83 static inline void __current_set_polling(void) 83 static inline void __current_set_polling(void) { } 84 static inline void __current_clr_polling(void) 84 static inline void __current_clr_polling(void) { } 85 85 86 static inline bool __must_check current_set_po 86 static inline bool __must_check current_set_polling_and_test(void) 87 { 87 { 88 return unlikely(tif_need_resched()); 88 return unlikely(tif_need_resched()); 89 } 89 } 90 static inline bool __must_check current_clr_po 90 static inline bool __must_check current_clr_polling_and_test(void) 91 { 91 { 92 return unlikely(tif_need_resched()); 92 return unlikely(tif_need_resched()); 93 } 93 } 94 #endif 94 #endif 95 95 96 static __always_inline void current_clr_pollin 96 static __always_inline void current_clr_polling(void) 97 { 97 { 98 __current_clr_polling(); 98 __current_clr_polling(); 99 99 100 /* 100 /* 101 * Ensure we check TIF_NEED_RESCHED af 101 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. 102 * Once the bit is cleared, we'll get 102 * Once the bit is cleared, we'll get IPIs with every new 103 * TIF_NEED_RESCHED and the IPI handle 103 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also 104 * fold. 104 * fold. 105 */ 105 */ 106 smp_mb(); /* paired with resched_curr( 106 smp_mb(); /* paired with resched_curr() */ 107 107 108 preempt_fold_need_resched(); 108 preempt_fold_need_resched(); 109 } 109 } 110 110 111 #endif /* _LINUX_SCHED_IDLE_H */ 111 #endif /* _LINUX_SCHED_IDLE_H */ 112 112
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.