1 // SPDX-License-Identifier: GPL-2.0+ 1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 2 /* 3 * RCU-based infrastructure for lightweight re 3 * RCU-based infrastructure for lightweight reader-writer locking 4 * 4 * 5 * Copyright (c) 2015, Red Hat, Inc. 5 * Copyright (c) 2015, Red Hat, Inc. 6 * 6 * 7 * Author: Oleg Nesterov <oleg@redhat.com> 7 * Author: Oleg Nesterov <oleg@redhat.com> 8 */ 8 */ 9 9 10 #include <linux/rcu_sync.h> 10 #include <linux/rcu_sync.h> 11 #include <linux/sched.h> 11 #include <linux/sched.h> 12 12 13 enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EX 13 enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY }; 14 14 15 #define rss_lock gp_wait.lock 15 #define rss_lock gp_wait.lock 16 16 17 /** 17 /** 18 * rcu_sync_init() - Initialize an rcu_sync st 18 * rcu_sync_init() - Initialize an rcu_sync structure 19 * @rsp: Pointer to rcu_sync structure to be i 19 * @rsp: Pointer to rcu_sync structure to be initialized 20 */ 20 */ 21 void rcu_sync_init(struct rcu_sync *rsp) 21 void rcu_sync_init(struct rcu_sync *rsp) 22 { 22 { 23 memset(rsp, 0, sizeof(*rsp)); 23 memset(rsp, 0, sizeof(*rsp)); 24 init_waitqueue_head(&rsp->gp_wait); 24 init_waitqueue_head(&rsp->gp_wait); 25 } 25 } 26 26 >> 27 /** >> 28 * rcu_sync_enter_start - Force readers onto slow path for multiple updates >> 29 * @rsp: Pointer to rcu_sync structure to use for synchronization >> 30 * >> 31 * Must be called after rcu_sync_init() and before first use. >> 32 * >> 33 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() >> 34 * pairs turn into NO-OPs. >> 35 */ >> 36 void rcu_sync_enter_start(struct rcu_sync *rsp) >> 37 { >> 38 rsp->gp_count++; >> 39 rsp->gp_state = GP_PASSED; >> 40 } >> 41 >> 42 27 static void rcu_sync_func(struct rcu_head *rhp 43 static void rcu_sync_func(struct rcu_head *rhp); 28 44 29 static void rcu_sync_call(struct rcu_sync *rsp 45 static void rcu_sync_call(struct rcu_sync *rsp) 30 { 46 { 31 call_rcu_hurry(&rsp->cb_head, rcu_sync !! 47 call_rcu(&rsp->cb_head, rcu_sync_func); 32 } 48 } 33 49 34 /** 50 /** 35 * rcu_sync_func() - Callback function managin 51 * rcu_sync_func() - Callback function managing reader access to fastpath 36 * @rhp: Pointer to rcu_head in rcu_sync struc 52 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization 37 * 53 * 38 * This function is passed to call_rcu() funct 54 * This function is passed to call_rcu() function by rcu_sync_enter() and 39 * rcu_sync_exit(), so that it is invoked afte 55 * rcu_sync_exit(), so that it is invoked after a grace period following the 40 * that invocation of enter/exit. 56 * that invocation of enter/exit. 41 * 57 * 42 * If it is called by rcu_sync_enter() it sign 58 * If it is called by rcu_sync_enter() it signals that all the readers were 43 * switched onto slow path. 59 * switched onto slow path. 44 * 60 * 45 * If it is called by rcu_sync_exit() it takes 61 * If it is called by rcu_sync_exit() it takes action based on events that 46 * have taken place in the meantime, so that c 62 * have taken place in the meantime, so that closely spaced rcu_sync_enter() 47 * and rcu_sync_exit() pairs need not wait for 63 * and rcu_sync_exit() pairs need not wait for a grace period. 48 * 64 * 49 * If another rcu_sync_enter() is invoked befo 65 * If another rcu_sync_enter() is invoked before the grace period 50 * ended, reset state to allow the next rcu_sy 66 * ended, reset state to allow the next rcu_sync_exit() to let the 51 * readers back onto their fastpaths (after a 67 * readers back onto their fastpaths (after a grace period). If both 52 * another rcu_sync_enter() and its matching r 68 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked 53 * before the grace period ended, re-invoke ca 69 * before the grace period ended, re-invoke call_rcu() on behalf of that 54 * rcu_sync_exit(). Otherwise, set all state 70 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 55 * can again use their fastpaths. 71 * can again use their fastpaths. 56 */ 72 */ 57 static void rcu_sync_func(struct rcu_head *rhp 73 static void rcu_sync_func(struct rcu_head *rhp) 58 { 74 { 59 struct rcu_sync *rsp = container_of(rh 75 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); 60 unsigned long flags; 76 unsigned long flags; 61 77 62 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 78 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); 63 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 79 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); 64 80 65 spin_lock_irqsave(&rsp->rss_lock, flag 81 spin_lock_irqsave(&rsp->rss_lock, flags); 66 if (rsp->gp_count) { 82 if (rsp->gp_count) { 67 /* 83 /* 68 * We're at least a GP after t 84 * We're at least a GP after the GP_IDLE->GP_ENTER transition. 69 */ 85 */ 70 WRITE_ONCE(rsp->gp_state, GP_P 86 WRITE_ONCE(rsp->gp_state, GP_PASSED); 71 wake_up_locked(&rsp->gp_wait); 87 wake_up_locked(&rsp->gp_wait); 72 } else if (rsp->gp_state == GP_REPLAY) 88 } else if (rsp->gp_state == GP_REPLAY) { 73 /* 89 /* 74 * A new rcu_sync_exit() has h 90 * A new rcu_sync_exit() has happened; requeue the callback to 75 * catch a later GP. 91 * catch a later GP. 76 */ 92 */ 77 WRITE_ONCE(rsp->gp_state, GP_E 93 WRITE_ONCE(rsp->gp_state, GP_EXIT); 78 rcu_sync_call(rsp); 94 rcu_sync_call(rsp); 79 } else { 95 } else { 80 /* 96 /* 81 * We're at least a GP after t !! 97 * We're at least a GP after the last rcu_sync_exit(); eveybody 82 * will now have observed the 98 * will now have observed the write side critical section. 83 * Let 'em rip! !! 99 * Let 'em rip!. 84 */ 100 */ 85 WRITE_ONCE(rsp->gp_state, GP_I 101 WRITE_ONCE(rsp->gp_state, GP_IDLE); 86 } 102 } 87 spin_unlock_irqrestore(&rsp->rss_lock, 103 spin_unlock_irqrestore(&rsp->rss_lock, flags); 88 } 104 } 89 105 90 /** 106 /** 91 * rcu_sync_enter() - Force readers onto slowp 107 * rcu_sync_enter() - Force readers onto slowpath 92 * @rsp: Pointer to rcu_sync structure to use 108 * @rsp: Pointer to rcu_sync structure to use for synchronization 93 * 109 * 94 * This function is used by updaters who need 110 * This function is used by updaters who need readers to make use of 95 * a slowpath during the update. After this f 111 * a slowpath during the update. After this function returns, all 96 * subsequent calls to rcu_sync_is_idle() will 112 * subsequent calls to rcu_sync_is_idle() will return false, which 97 * tells readers to stay off their fastpaths. 113 * tells readers to stay off their fastpaths. A later call to 98 * rcu_sync_exit() re-enables reader fastpaths !! 114 * rcu_sync_exit() re-enables reader slowpaths. 99 * 115 * 100 * When called in isolation, rcu_sync_enter() 116 * When called in isolation, rcu_sync_enter() must wait for a grace 101 * period, however, closely spaced calls to rc 117 * period, however, closely spaced calls to rcu_sync_enter() can 102 * optimize away the grace-period wait via a s 118 * optimize away the grace-period wait via a state machine implemented 103 * by rcu_sync_enter(), rcu_sync_exit(), and r 119 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func(). 104 */ 120 */ 105 void rcu_sync_enter(struct rcu_sync *rsp) 121 void rcu_sync_enter(struct rcu_sync *rsp) 106 { 122 { 107 int gp_state; 123 int gp_state; 108 124 109 spin_lock_irq(&rsp->rss_lock); 125 spin_lock_irq(&rsp->rss_lock); 110 gp_state = rsp->gp_state; 126 gp_state = rsp->gp_state; 111 if (gp_state == GP_IDLE) { 127 if (gp_state == GP_IDLE) { 112 WRITE_ONCE(rsp->gp_state, GP_E 128 WRITE_ONCE(rsp->gp_state, GP_ENTER); 113 WARN_ON_ONCE(rsp->gp_count); 129 WARN_ON_ONCE(rsp->gp_count); 114 /* 130 /* 115 * Note that we could simply d 131 * Note that we could simply do rcu_sync_call(rsp) here and 116 * avoid the "if (gp_state == 132 * avoid the "if (gp_state == GP_IDLE)" block below. 117 * 133 * 118 * However, synchronize_rcu() 134 * However, synchronize_rcu() can be faster if rcu_expedited 119 * or rcu_blocking_is_gp() is 135 * or rcu_blocking_is_gp() is true. 120 * 136 * 121 * Another reason is that we c 137 * Another reason is that we can't wait for rcu callback if 122 * we are called at early boot 138 * we are called at early boot time but this shouldn't happen. 123 */ 139 */ 124 } 140 } 125 rsp->gp_count++; 141 rsp->gp_count++; 126 spin_unlock_irq(&rsp->rss_lock); 142 spin_unlock_irq(&rsp->rss_lock); 127 143 128 if (gp_state == GP_IDLE) { 144 if (gp_state == GP_IDLE) { 129 /* 145 /* 130 * See the comment above, this 146 * See the comment above, this simply does the "synchronous" 131 * call_rcu(rcu_sync_func) whi 147 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED. 132 */ 148 */ 133 synchronize_rcu(); 149 synchronize_rcu(); 134 rcu_sync_func(&rsp->cb_head); 150 rcu_sync_func(&rsp->cb_head); 135 /* Not really needed, wait_eve 151 /* Not really needed, wait_event() would see GP_PASSED. */ 136 return; 152 return; 137 } 153 } 138 154 139 wait_event(rsp->gp_wait, READ_ONCE(rsp 155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED); 140 } 156 } 141 157 142 /** 158 /** 143 * rcu_sync_exit() - Allow readers back onto f 159 * rcu_sync_exit() - Allow readers back onto fast path after grace period 144 * @rsp: Pointer to rcu_sync structure to use 160 * @rsp: Pointer to rcu_sync structure to use for synchronization 145 * 161 * 146 * This function is used by updaters who have 162 * This function is used by updaters who have completed, and can therefore 147 * now allow readers to make use of their fast 163 * now allow readers to make use of their fastpaths after a grace period 148 * has elapsed. After this grace period has c 164 * has elapsed. After this grace period has completed, all subsequent 149 * calls to rcu_sync_is_idle() will return tru 165 * calls to rcu_sync_is_idle() will return true, which tells readers that 150 * they can once again use their fastpaths. 166 * they can once again use their fastpaths. 151 */ 167 */ 152 void rcu_sync_exit(struct rcu_sync *rsp) 168 void rcu_sync_exit(struct rcu_sync *rsp) 153 { 169 { 154 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); >> 171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0); 155 172 156 spin_lock_irq(&rsp->rss_lock); 173 spin_lock_irq(&rsp->rss_lock); 157 WARN_ON_ONCE(rsp->gp_count == 0); << 158 if (!--rsp->gp_count) { 174 if (!--rsp->gp_count) { 159 if (rsp->gp_state == GP_PASSED 175 if (rsp->gp_state == GP_PASSED) { 160 WRITE_ONCE(rsp->gp_sta 176 WRITE_ONCE(rsp->gp_state, GP_EXIT); 161 rcu_sync_call(rsp); 177 rcu_sync_call(rsp); 162 } else if (rsp->gp_state == GP 178 } else if (rsp->gp_state == GP_EXIT) { 163 WRITE_ONCE(rsp->gp_sta 179 WRITE_ONCE(rsp->gp_state, GP_REPLAY); 164 } 180 } 165 } 181 } 166 spin_unlock_irq(&rsp->rss_lock); 182 spin_unlock_irq(&rsp->rss_lock); 167 } 183 } 168 184 169 /** 185 /** 170 * rcu_sync_dtor() - Clean up an rcu_sync stru 186 * rcu_sync_dtor() - Clean up an rcu_sync structure 171 * @rsp: Pointer to rcu_sync structure to be c 187 * @rsp: Pointer to rcu_sync structure to be cleaned up 172 */ 188 */ 173 void rcu_sync_dtor(struct rcu_sync *rsp) 189 void rcu_sync_dtor(struct rcu_sync *rsp) 174 { 190 { 175 int gp_state; 191 int gp_state; 176 192 >> 193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count)); 177 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) 194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); 178 195 179 spin_lock_irq(&rsp->rss_lock); 196 spin_lock_irq(&rsp->rss_lock); 180 WARN_ON_ONCE(rsp->gp_count); << 181 if (rsp->gp_state == GP_REPLAY) 197 if (rsp->gp_state == GP_REPLAY) 182 WRITE_ONCE(rsp->gp_state, GP_E 198 WRITE_ONCE(rsp->gp_state, GP_EXIT); 183 gp_state = rsp->gp_state; 199 gp_state = rsp->gp_state; 184 spin_unlock_irq(&rsp->rss_lock); 200 spin_unlock_irq(&rsp->rss_lock); 185 201 186 if (gp_state != GP_IDLE) { 202 if (gp_state != GP_IDLE) { 187 rcu_barrier(); 203 rcu_barrier(); 188 WARN_ON_ONCE(rsp->gp_state != 204 WARN_ON_ONCE(rsp->gp_state != GP_IDLE); 189 } 205 } 190 } 206 } 191 207
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.