1 /* SPDX-License-Identifier: GPL-2.0+ */ << 2 /* 1 /* 3 * Read-Copy Update mechanism for mutual exclu 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 3 * >> 4 * This program is free software; you can redistribute it and/or modify >> 5 * it under the terms of the GNU General Public License as published by >> 6 * the Free Software Foundation; either version 2 of the License, or >> 7 * (at your option) any later version. >> 8 * >> 9 * This program is distributed in the hope that it will be useful, >> 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of >> 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >> 12 * GNU General Public License for more details. >> 13 * >> 14 * You should have received a copy of the GNU General Public License >> 15 * along with this program; if not, you can access it online at >> 16 * http://www.gnu.org/licenses/gpl-2.0.html. >> 17 * 5 * Copyright IBM Corporation, 2008 18 * Copyright IBM Corporation, 2008 6 * 19 * 7 * Author: Dipankar Sarma <dipankar@in.ibm.com 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 8 * Paul E. McKenney <paulmck@linux.ibm !! 21 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm 9 * 22 * 10 * Based on the original work by Paul McKenney !! 23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 11 * and inputs from Rusty Russell, Andrea Arcan 24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 12 * 25 * 13 * For detailed explanation of Read-Copy Updat 26 * For detailed explanation of Read-Copy Update mechanism see - 14 * Documentation/RCU 27 * Documentation/RCU 15 */ 28 */ 16 29 17 #ifndef __LINUX_RCUTREE_H 30 #ifndef __LINUX_RCUTREE_H 18 #define __LINUX_RCUTREE_H 31 #define __LINUX_RCUTREE_H 19 32 20 void rcu_softirq_qs(void); << 21 void rcu_note_context_switch(bool preempt); 33 void rcu_note_context_switch(bool preempt); 22 int rcu_needs_cpu(void); !! 34 int rcu_needs_cpu(u64 basem, u64 *nextevt); 23 void rcu_cpu_stall_reset(void); 35 void rcu_cpu_stall_reset(void); 24 void rcu_request_urgent_qs_task(struct task_st << 25 36 26 /* 37 /* 27 * Note a virtualization-based context switch. 38 * Note a virtualization-based context switch. This is simply a 28 * wrapper around rcu_note_context_switch(), w 39 * wrapper around rcu_note_context_switch(), which allows TINY_RCU 29 * to save a few bytes. The caller must have d 40 * to save a few bytes. The caller must have disabled interrupts. 30 */ 41 */ 31 static inline void rcu_virt_note_context_switc !! 42 static inline void rcu_virt_note_context_switch(int cpu) 32 { 43 { 33 rcu_note_context_switch(false); 44 rcu_note_context_switch(false); 34 } 45 } 35 46 >> 47 void synchronize_rcu_bh(void); >> 48 void synchronize_sched_expedited(void); 36 void synchronize_rcu_expedited(void); 49 void synchronize_rcu_expedited(void); 37 void kvfree_call_rcu(struct rcu_head *head, vo << 38 50 39 void rcu_barrier(void); !! 51 void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); 40 void rcu_momentary_dyntick_idle(void); << 41 void kfree_rcu_scheduler_running(void); << 42 bool rcu_gp_might_be_stalled(void); << 43 << 44 struct rcu_gp_oldstate { << 45 unsigned long rgos_norm; << 46 unsigned long rgos_exp; << 47 }; << 48 << 49 // Maximum number of rcu_gp_oldstate values co << 50 // not-yet-completed RCU grace periods. << 51 #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4 << 52 52 53 /** 53 /** 54 * same_state_synchronize_rcu_full - Are two o !! 54 * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period 55 * @rgosp1: First old-state value. !! 55 * 56 * @rgosp2: Second old-state value. !! 56 * Wait for an RCU-bh grace period to elapse, but use a "big hammer" 57 * !! 57 * approach to force the grace period to end quickly. This consumes 58 * The two old-state values must have been obt !! 58 * significant time on all CPUs and is unfriendly to real-time workloads, 59 * get_state_synchronize_rcu_full(), start_pol !! 59 * so is thus not recommended for any sort of common-case code. In fact, 60 * or get_completed_synchronize_rcu_full(). R !! 60 * if you are using synchronize_rcu_bh_expedited() in a loop, please 61 * values are identical and @false otherwise. !! 61 * restructure your code to batch your updates, and then use a single 62 * whose lifetimes are tracked by old-state va !! 62 * synchronize_rcu_bh() instead. 63 * to a list header, allowing those structures !! 63 * 64 * !! 64 * Note that it is illegal to call this function while holding any lock 65 * Note that equality is judged on a bitwise b !! 65 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal 66 * @rcu_gp_oldstate structure with an already- !! 66 * to call this function from a CPU-hotplug notifier. Failing to observe 67 * will compare not-equal to a structure with !! 67 * these restriction will result in deadlock. 68 * in the other field. After all, the @rcu_gp << 69 * so how did such a situation come to pass in << 70 */ 68 */ 71 static inline bool same_state_synchronize_rcu_ !! 69 static inline void synchronize_rcu_bh_expedited(void) 72 << 73 { 70 { 74 return rgosp1->rgos_norm == rgosp2->rg !! 71 synchronize_sched_expedited(); 75 } 72 } 76 73 77 unsigned long start_poll_synchronize_rcu_exped !! 74 void rcu_barrier(void); 78 void start_poll_synchronize_rcu_expedited_full !! 75 void rcu_barrier_bh(void); 79 void cond_synchronize_rcu_expedited(unsigned l !! 76 void rcu_barrier_sched(void); 80 void cond_synchronize_rcu_expedited_full(struc !! 77 bool rcu_eqs_special_set(int cpu); 81 unsigned long get_state_synchronize_rcu(void); 78 unsigned long get_state_synchronize_rcu(void); 82 void get_state_synchronize_rcu_full(struct rcu << 83 unsigned long start_poll_synchronize_rcu(void) << 84 void start_poll_synchronize_rcu_full(struct rc << 85 bool poll_state_synchronize_rcu(unsigned long << 86 bool poll_state_synchronize_rcu_full(struct rc << 87 void cond_synchronize_rcu(unsigned long oldsta 79 void cond_synchronize_rcu(unsigned long oldstate); 88 void cond_synchronize_rcu_full(struct rcu_gp_o !! 80 unsigned long get_state_synchronize_sched(void); 89 !! 81 void cond_synchronize_sched(unsigned long oldstate); 90 #ifdef CONFIG_PROVE_RCU << 91 void rcu_irq_exit_check_preempt(void); << 92 #else << 93 static inline void rcu_irq_exit_check_preempt( << 94 #endif << 95 82 96 struct task_struct; !! 83 void rcu_idle_enter(void); 97 void rcu_preempt_deferred_qs(struct task_struc !! 84 void rcu_idle_exit(void); >> 85 void rcu_irq_enter(void); >> 86 void rcu_irq_exit(void); >> 87 void rcu_irq_enter_irqson(void); >> 88 void rcu_irq_exit_irqson(void); 98 89 99 void exit_rcu(void); 90 void exit_rcu(void); 100 91 101 void rcu_scheduler_starting(void); 92 void rcu_scheduler_starting(void); 102 extern int rcu_scheduler_active; !! 93 extern int rcu_scheduler_active __read_mostly; 103 void rcu_end_inkernel_boot(void); 94 void rcu_end_inkernel_boot(void); 104 bool rcu_inkernel_boot_has_ended(void); << 105 bool rcu_is_watching(void); 95 bool rcu_is_watching(void); 106 #ifndef CONFIG_PREEMPTION << 107 void rcu_all_qs(void); 96 void rcu_all_qs(void); 108 #endif << 109 97 110 /* RCUtree hotplug events */ 98 /* RCUtree hotplug events */ 111 int rcutree_prepare_cpu(unsigned int cpu); 99 int rcutree_prepare_cpu(unsigned int cpu); 112 int rcutree_online_cpu(unsigned int cpu); 100 int rcutree_online_cpu(unsigned int cpu); 113 void rcutree_report_cpu_starting(unsigned int !! 101 int rcutree_offline_cpu(unsigned int cpu); 114 << 115 #ifdef CONFIG_HOTPLUG_CPU << 116 int rcutree_dead_cpu(unsigned int cpu); 102 int rcutree_dead_cpu(unsigned int cpu); 117 int rcutree_dying_cpu(unsigned int cpu); 103 int rcutree_dying_cpu(unsigned int cpu); 118 int rcutree_offline_cpu(unsigned int cpu); !! 104 void rcu_cpu_starting(unsigned int cpu); 119 #else << 120 #define rcutree_dead_cpu NULL << 121 #define rcutree_dying_cpu NULL << 122 #define rcutree_offline_cpu NULL << 123 #endif << 124 << 125 void rcutree_migrate_callbacks(int cpu); << 126 << 127 /* Called from hotplug and also arm64 early se << 128 void rcutree_report_cpu_dead(void); << 129 105 130 #endif /* __LINUX_RCUTREE_H */ 106 #endif /* __LINUX_RCUTREE_H */ 131 107
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.