1 /* SPDX-License-Identifier: GPL-2.0+ */ 1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 2 /* 3 * Read-Copy Update mechanism for mutual exclu 3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 4 * 4 * 5 * Copyright IBM Corporation, 2008 5 * Copyright IBM Corporation, 2008 6 * 6 * 7 * Author: Paul E. McKenney <paulmck@linux.ibm 7 * Author: Paul E. McKenney <paulmck@linux.ibm.com> 8 * 8 * 9 * For detailed explanation of Read-Copy Updat 9 * For detailed explanation of Read-Copy Update mechanism see - 10 * Documentation/RCU 10 * Documentation/RCU 11 */ 11 */ 12 #ifndef __LINUX_TINY_H 12 #ifndef __LINUX_TINY_H 13 #define __LINUX_TINY_H 13 #define __LINUX_TINY_H 14 14 15 #include <asm/param.h> /* for HZ */ 15 #include <asm/param.h> /* for HZ */ 16 16 17 struct rcu_gp_oldstate { << 18 unsigned long rgos_norm; << 19 }; << 20 << 21 // Maximum number of rcu_gp_oldstate values co << 22 // not-yet-completed RCU grace periods. << 23 #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2 << 24 << 25 /* << 26 * Are the two oldstate values the same? See << 27 * docbook header. << 28 */ << 29 static inline bool same_state_synchronize_rcu_ << 30 << 31 { << 32 return rgosp1->rgos_norm == rgosp2->rg << 33 } << 34 << 35 unsigned long get_state_synchronize_rcu(void); 17 unsigned long get_state_synchronize_rcu(void); 36 << 37 static inline void get_state_synchronize_rcu_f << 38 { << 39 rgosp->rgos_norm = get_state_synchroni << 40 } << 41 << 42 unsigned long start_poll_synchronize_rcu(void) 18 unsigned long start_poll_synchronize_rcu(void); 43 << 44 static inline void start_poll_synchronize_rcu_ << 45 { << 46 rgosp->rgos_norm = start_poll_synchron << 47 } << 48 << 49 bool poll_state_synchronize_rcu(unsigned long 19 bool poll_state_synchronize_rcu(unsigned long oldstate); 50 20 51 static inline bool poll_state_synchronize_rcu_ << 52 { << 53 return poll_state_synchronize_rcu(rgos << 54 } << 55 << 56 static inline void cond_synchronize_rcu(unsign 21 static inline void cond_synchronize_rcu(unsigned long oldstate) 57 { 22 { 58 might_sleep(); 23 might_sleep(); 59 } 24 } 60 25 61 static inline void cond_synchronize_rcu_full(s << 62 { << 63 cond_synchronize_rcu(rgosp->rgos_norm) << 64 } << 65 << 66 static inline unsigned long start_poll_synchro 26 static inline unsigned long start_poll_synchronize_rcu_expedited(void) 67 { 27 { 68 return start_poll_synchronize_rcu(); 28 return start_poll_synchronize_rcu(); 69 } 29 } 70 30 71 static inline void start_poll_synchronize_rcu_ << 72 { << 73 rgosp->rgos_norm = start_poll_synchron << 74 } << 75 << 76 static inline void cond_synchronize_rcu_expedi 31 static inline void cond_synchronize_rcu_expedited(unsigned long oldstate) 77 { 32 { 78 cond_synchronize_rcu(oldstate); 33 cond_synchronize_rcu(oldstate); 79 } 34 } 80 35 81 static inline void cond_synchronize_rcu_expedi << 82 { << 83 cond_synchronize_rcu_expedited(rgosp-> << 84 } << 85 << 86 extern void rcu_barrier(void); 36 extern void rcu_barrier(void); 87 37 88 static inline void synchronize_rcu_expedited(v 38 static inline void synchronize_rcu_expedited(void) 89 { 39 { 90 synchronize_rcu(); 40 synchronize_rcu(); 91 } 41 } 92 42 93 /* 43 /* 94 * Add one more declaration of kvfree() here. 44 * Add one more declaration of kvfree() here. It is 95 * not so straight forward to just include <li 45 * not so straight forward to just include <linux/mm.h> 96 * where it is defined due to getting many com 46 * where it is defined due to getting many compile 97 * errors caused by that include. 47 * errors caused by that include. 98 */ 48 */ 99 extern void kvfree(const void *addr); 49 extern void kvfree(const void *addr); 100 50 101 static inline void __kvfree_call_rcu(struct rc !! 51 static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 102 { 52 { 103 if (head) { 53 if (head) { 104 call_rcu(head, (rcu_callback_t !! 54 call_rcu(head, func); 105 return; 55 return; 106 } 56 } 107 57 108 // kvfree_rcu(one_arg) call. 58 // kvfree_rcu(one_arg) call. 109 might_sleep(); 59 might_sleep(); 110 synchronize_rcu(); 60 synchronize_rcu(); 111 kvfree(ptr); !! 61 kvfree((void *) func); 112 } << 113 << 114 static inline void kvfree_rcu_barrier(void) << 115 { << 116 rcu_barrier(); << 117 } 62 } 118 63 119 #ifdef CONFIG_KASAN_GENERIC 64 #ifdef CONFIG_KASAN_GENERIC 120 void kvfree_call_rcu(struct rcu_head *head, vo !! 65 void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); 121 #else 66 #else 122 static inline void kvfree_call_rcu(struct rcu_ !! 67 static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) 123 { 68 { 124 __kvfree_call_rcu(head, ptr); !! 69 __kvfree_call_rcu(head, func); 125 } 70 } 126 #endif 71 #endif 127 72 128 void rcu_qs(void); 73 void rcu_qs(void); 129 74 130 static inline void rcu_softirq_qs(void) 75 static inline void rcu_softirq_qs(void) 131 { 76 { 132 rcu_qs(); 77 rcu_qs(); 133 } 78 } 134 79 135 #define rcu_note_context_switch(preempt) \ 80 #define rcu_note_context_switch(preempt) \ 136 do { \ 81 do { \ 137 rcu_qs(); \ 82 rcu_qs(); \ 138 rcu_tasks_qs(current, (preempt 83 rcu_tasks_qs(current, (preempt)); \ 139 } while (0) 84 } while (0) 140 85 141 static inline int rcu_needs_cpu(void) 86 static inline int rcu_needs_cpu(void) 142 { 87 { 143 return 0; 88 return 0; 144 } 89 } 145 90 146 static inline void rcu_request_urgent_qs_task( << 147 << 148 /* 91 /* 149 * Take advantage of the fact that there is on 92 * Take advantage of the fact that there is only one CPU, which 150 * allows us to ignore virtualization-based co 93 * allows us to ignore virtualization-based context switches. 151 */ 94 */ 152 static inline void rcu_virt_note_context_switc !! 95 static inline void rcu_virt_note_context_switch(int cpu) { } 153 static inline void rcu_cpu_stall_reset(void) { 96 static inline void rcu_cpu_stall_reset(void) { } 154 static inline int rcu_jiffies_till_stall_check 97 static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; } 155 static inline void rcu_irq_exit_check_preempt( 98 static inline void rcu_irq_exit_check_preempt(void) { } >> 99 #define rcu_is_idle_cpu(cpu) \ >> 100 (is_idle_task(current) && !in_nmi() && !in_hardirq() && !in_serving_softirq()) 156 static inline void exit_rcu(void) { } 101 static inline void exit_rcu(void) { } 157 static inline bool rcu_preempt_need_deferred_q 102 static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) 158 { 103 { 159 return false; 104 return false; 160 } 105 } 161 static inline void rcu_preempt_deferred_qs(str 106 static inline void rcu_preempt_deferred_qs(struct task_struct *t) { } >> 107 #ifdef CONFIG_SRCU 162 void rcu_scheduler_starting(void); 108 void rcu_scheduler_starting(void); >> 109 #else /* #ifndef CONFIG_SRCU */ >> 110 static inline void rcu_scheduler_starting(void) { } >> 111 #endif /* #else #ifndef CONFIG_SRCU */ 163 static inline void rcu_end_inkernel_boot(void) 112 static inline void rcu_end_inkernel_boot(void) { } 164 static inline bool rcu_inkernel_boot_has_ended 113 static inline bool rcu_inkernel_boot_has_ended(void) { return true; } 165 static inline bool rcu_is_watching(void) { ret 114 static inline bool rcu_is_watching(void) { return true; } 166 static inline void rcu_momentary_eqs(void) { } !! 115 static inline void rcu_momentary_dyntick_idle(void) { } 167 static inline void kfree_rcu_scheduler_running 116 static inline void kfree_rcu_scheduler_running(void) { } 168 static inline bool rcu_gp_might_be_stalled(voi 117 static inline bool rcu_gp_might_be_stalled(void) { return false; } 169 118 170 /* Avoid RCU read-side critical sections leaki 119 /* Avoid RCU read-side critical sections leaking across. */ 171 static inline void rcu_all_qs(void) { barrier( 120 static inline void rcu_all_qs(void) { barrier(); } 172 121 173 /* RCUtree hotplug events */ 122 /* RCUtree hotplug events */ 174 #define rcutree_prepare_cpu NULL 123 #define rcutree_prepare_cpu NULL 175 #define rcutree_online_cpu NULL 124 #define rcutree_online_cpu NULL 176 #define rcutree_offline_cpu NULL 125 #define rcutree_offline_cpu NULL 177 #define rcutree_dead_cpu NULL 126 #define rcutree_dead_cpu NULL 178 #define rcutree_dying_cpu NULL 127 #define rcutree_dying_cpu NULL 179 static inline void rcutree_report_cpu_starting !! 128 static inline void rcu_cpu_starting(unsigned int cpu) { } 180 129 181 #endif /* __LINUX_RCUTINY_H */ 130 #endif /* __LINUX_RCUTINY_H */ 182 131
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.