1 // SPDX-License-Identifier: GPL-2.0+ 1 2 /* 3 * RCU-based infrastructure for lightweight re 4 * 5 * Copyright (c) 2015, Red Hat, Inc. 6 * 7 * Author: Oleg Nesterov <oleg@redhat.com> 8 */ 9 10 #include <linux/rcu_sync.h> 11 #include <linux/sched.h> 12 13 enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EX 14 15 #define rss_lock gp_wait.lock 16 17 /** 18 * rcu_sync_init() - Initialize an rcu_sync st 19 * @rsp: Pointer to rcu_sync structure to be i 20 */ 21 void rcu_sync_init(struct rcu_sync *rsp) 22 { 23 memset(rsp, 0, sizeof(*rsp)); 24 init_waitqueue_head(&rsp->gp_wait); 25 } 26 27 static void rcu_sync_func(struct rcu_head *rhp 28 29 static void rcu_sync_call(struct rcu_sync *rsp 30 { 31 call_rcu_hurry(&rsp->cb_head, rcu_sync 32 } 33 34 /** 35 * rcu_sync_func() - Callback function managin 36 * @rhp: Pointer to rcu_head in rcu_sync struc 37 * 38 * This function is passed to call_rcu() funct 39 * rcu_sync_exit(), so that it is invoked afte 40 * that invocation of enter/exit. 41 * 42 * If it is called by rcu_sync_enter() it sign 43 * switched onto slow path. 44 * 45 * If it is called by rcu_sync_exit() it takes 46 * have taken place in the meantime, so that c 47 * and rcu_sync_exit() pairs need not wait for 48 * 49 * If another rcu_sync_enter() is invoked befo 50 * ended, reset state to allow the next rcu_sy 51 * readers back onto their fastpaths (after a 52 * another rcu_sync_enter() and its matching r 53 * before the grace period ended, re-invoke ca 54 * rcu_sync_exit(). Otherwise, set all state 55 * can again use their fastpaths. 56 */ 57 static void rcu_sync_func(struct rcu_head *rhp 58 { 59 struct rcu_sync *rsp = container_of(rh 60 unsigned long flags; 61 62 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 63 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 64 65 spin_lock_irqsave(&rsp->rss_lock, flag 66 if (rsp->gp_count) { 67 /* 68 * We're at least a GP after t 69 */ 70 WRITE_ONCE(rsp->gp_state, GP_P 71 wake_up_locked(&rsp->gp_wait); 72 } else if (rsp->gp_state == GP_REPLAY) 73 /* 74 * A new rcu_sync_exit() has h 75 * catch a later GP. 76 */ 77 WRITE_ONCE(rsp->gp_state, GP_E 78 rcu_sync_call(rsp); 79 } else { 80 /* 81 * We're at least a GP after t 82 * will now have observed the 83 * Let 'em rip! 84 */ 85 WRITE_ONCE(rsp->gp_state, GP_I 86 } 87 spin_unlock_irqrestore(&rsp->rss_lock, 88 } 89 90 /** 91 * rcu_sync_enter() - Force readers onto slowp 92 * @rsp: Pointer to rcu_sync structure to use 93 * 94 * This function is used by updaters who need 95 * a slowpath during the update. After this f 96 * subsequent calls to rcu_sync_is_idle() will 97 * tells readers to stay off their fastpaths. 98 * rcu_sync_exit() re-enables reader fastpaths 99 * 100 * When called in isolation, rcu_sync_enter() 101 * period, however, closely spaced calls to rc 102 * optimize away the grace-period wait via a s 103 * by rcu_sync_enter(), rcu_sync_exit(), and r 104 */ 105 void rcu_sync_enter(struct rcu_sync *rsp) 106 { 107 int gp_state; 108 109 spin_lock_irq(&rsp->rss_lock); 110 gp_state = rsp->gp_state; 111 if (gp_state == GP_IDLE) { 112 WRITE_ONCE(rsp->gp_state, GP_E 113 WARN_ON_ONCE(rsp->gp_count); 114 /* 115 * Note that we could simply d 116 * avoid the "if (gp_state == 117 * 118 * However, synchronize_rcu() 119 * or rcu_blocking_is_gp() is 120 * 121 * Another reason is that we c 122 * we are called at early boot 123 */ 124 } 125 rsp->gp_count++; 126 spin_unlock_irq(&rsp->rss_lock); 127 128 if (gp_state == GP_IDLE) { 129 /* 130 * See the comment above, this 131 * call_rcu(rcu_sync_func) whi 132 */ 133 synchronize_rcu(); 134 rcu_sync_func(&rsp->cb_head); 135 /* Not really needed, wait_eve 136 return; 137 } 138 139 wait_event(rsp->gp_wait, READ_ONCE(rsp 140 } 141 142 /** 143 * rcu_sync_exit() - Allow readers back onto f 144 * @rsp: Pointer to rcu_sync structure to use 145 * 146 * This function is used by updaters who have 147 * now allow readers to make use of their fast 148 * has elapsed. After this grace period has c 149 * calls to rcu_sync_is_idle() will return tru 150 * they can once again use their fastpaths. 151 */ 152 void rcu_sync_exit(struct rcu_sync *rsp) 153 { 154 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 155 156 spin_lock_irq(&rsp->rss_lock); 157 WARN_ON_ONCE(rsp->gp_count == 0); 158 if (!--rsp->gp_count) { 159 if (rsp->gp_state == GP_PASSED 160 WRITE_ONCE(rsp->gp_sta 161 rcu_sync_call(rsp); 162 } else if (rsp->gp_state == GP 163 WRITE_ONCE(rsp->gp_sta 164 } 165 } 166 spin_unlock_irq(&rsp->rss_lock); 167 } 168 169 /** 170 * rcu_sync_dtor() - Clean up an rcu_sync stru 171 * @rsp: Pointer to rcu_sync structure to be c 172 */ 173 void rcu_sync_dtor(struct rcu_sync *rsp) 174 { 175 int gp_state; 176 177 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 178 179 spin_lock_irq(&rsp->rss_lock); 180 WARN_ON_ONCE(rsp->gp_count); 181 if (rsp->gp_state == GP_REPLAY) 182 WRITE_ONCE(rsp->gp_state, GP_E 183 gp_state = rsp->gp_state; 184 spin_unlock_irq(&rsp->rss_lock); 185 186 if (gp_state != GP_IDLE) { 187 rcu_barrier(); 188 WARN_ON_ONCE(rsp->gp_state != 189 } 190 } 191
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.