1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef __ASM_PREEMPT_H 3 #define __ASM_PREEMPT_H 4 5 #include <asm/current.h> 6 #include <linux/thread_info.h> 7 #include <asm/atomic_ops.h> 8 #include <asm/march.h> 9 10 #ifdef MARCH_HAS_Z196_FEATURES 11 12 /* We use the MSB mostly because its available 13 #define PREEMPT_NEED_RESCHED 0x80000000 14 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESC 15 16 static __always_inline int preempt_count(void) 17 { 18 return READ_ONCE(get_lowcore()->preemp 19 } 20 21 static __always_inline void preempt_count_set( 22 { 23 int old, new; 24 25 do { 26 old = READ_ONCE(get_lowcore()- 27 new = (old & PREEMPT_NEED_RESC 28 (pc & ~PREEMPT_NEED_RE 29 } while (__atomic_cmpxchg(&get_lowcore 30 old, new) != 31 } 32 33 static __always_inline void set_preempt_need_r 34 { 35 __atomic_and(~PREEMPT_NEED_RESCHED, &g 36 } 37 38 static __always_inline void clear_preempt_need 39 { 40 __atomic_or(PREEMPT_NEED_RESCHED, &get 41 } 42 43 static __always_inline bool test_preempt_need_ 44 { 45 return !(READ_ONCE(get_lowcore()->pree 46 } 47 48 static __always_inline void __preempt_count_ad 49 { 50 /* 51 * With some obscure config options an 52 * enabled, gcc 12 fails to handle __b 53 */ 54 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRA 55 if (__builtin_constant_p(val) 56 __atomic_add_const(val 57 return; 58 } 59 } 60 __atomic_add(val, &get_lowcore()->pree 61 } 62 63 static __always_inline void __preempt_count_su 64 { 65 __preempt_count_add(-val); 66 } 67 68 static __always_inline bool __preempt_count_de 69 { 70 return __atomic_add(-1, &get_lowcore() 71 } 72 73 static __always_inline bool should_resched(int 74 { 75 return unlikely(READ_ONCE(get_lowcore( 76 preempt_offset); 77 } 78 79 #else /* MARCH_HAS_Z196_FEATURES */ 80 81 #define PREEMPT_ENABLED (0) 82 83 static __always_inline int preempt_count(void) 84 { 85 return READ_ONCE(get_lowcore()->preemp 86 } 87 88 static __always_inline void preempt_count_set( 89 { 90 get_lowcore()->preempt_count = pc; 91 } 92 93 static __always_inline void set_preempt_need_r 94 { 95 } 96 97 static __always_inline void clear_preempt_need 98 { 99 } 100 101 static __always_inline bool test_preempt_need_ 102 { 103 return false; 104 } 105 106 static __always_inline void __preempt_count_ad 107 { 108 get_lowcore()->preempt_count += val; 109 } 110 111 static __always_inline void __preempt_count_su 112 { 113 get_lowcore()->preempt_count -= val; 114 } 115 116 static __always_inline bool __preempt_count_de 117 { 118 return !--get_lowcore()->preempt_count 119 } 120 121 static __always_inline bool should_resched(int 122 { 123 return unlikely(preempt_count() == pre 124 tif_need_resched()); 125 } 126 127 #endif /* MARCH_HAS_Z196_FEATURES */ 128 129 #define init_task_preempt_count(p) do { } 130 /* Deferred to CPU bringup time */ 131 #define init_idle_preempt_count(p, cpu) do { } 132 133 #ifdef CONFIG_PREEMPTION 134 extern void preempt_schedule(void); 135 #define __preempt_schedule() preempt_schedule( 136 extern void preempt_schedule_notrace(void); 137 #define __preempt_schedule_notrace() preempt_s 138 #endif /* CONFIG_PREEMPTION */ 139 140 #endif /* __ASM_PREEMPT_H */ 141
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.