~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/qspinlock_paravirt.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _GEN_PV_LOCK_SLOWPATH
  3 #error "do not include this file"
  4 #endif
  5 
  6 #include <linux/hash.h>
  7 #include <linux/memblock.h>
  8 #include <linux/debug_locks.h>
  9 
 10 /*
 11  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
 12  * of spinning them.
 13  *
 14  * This relies on the architecture to provide two paravirt hypercalls:
 15  *
 16  *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
 17  *   pv_kick(cpu)             -- wakes a suspended vcpu
 18  *
 19  * Using these we implement __pv_queued_spin_lock_slowpath() and
 20  * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
 21  * native_queued_spin_unlock().
 22  */
 23 
 24 #define _Q_SLOW_VAL     (3U << _Q_LOCKED_OFFSET)
 25 
 26 /*
 27  * Queue Node Adaptive Spinning
 28  *
 29  * A queue node vCPU will stop spinning if the vCPU in the previous node is
 30  * not running. The one lock stealing attempt allowed at slowpath entry
 31  * mitigates the slight slowdown for non-overcommitted guest with this
 32  * aggressive wait-early mechanism.
 33  *
 34  * The status of the previous node will be checked at fixed interval
 35  * controlled by PV_PREV_CHECK_MASK. This is to ensure that we won't
 36  * pound on the cacheline of the previous node too heavily.
 37  */
 38 #define PV_PREV_CHECK_MASK      0xff
 39 
 40 /*
 41  * Queue node uses: vcpu_running & vcpu_halted.
 42  * Queue head uses: vcpu_running & vcpu_hashed.
 43  */
 44 enum vcpu_state {
 45         vcpu_running = 0,
 46         vcpu_halted,            /* Used only in pv_wait_node */
 47         vcpu_hashed,            /* = pv_hash'ed + vcpu_halted */
 48 };
 49 
 50 struct pv_node {
 51         struct mcs_spinlock     mcs;
 52         int                     cpu;
 53         u8                      state;
 54 };
 55 
 56 /*
 57  * Hybrid PV queued/unfair lock
 58  *
 59  * By replacing the regular queued_spin_trylock() with the function below,
 60  * it will be called once when a lock waiter enter the PV slowpath before
 61  * being queued.
 62  *
 63  * The pending bit is set by the queue head vCPU of the MCS wait queue in
 64  * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
 65  * When that bit becomes visible to the incoming waiters, no lock stealing
 66  * is allowed. The function will return immediately to make the waiters
 67  * enter the MCS wait queue. So lock starvation shouldn't happen as long
 68  * as the queued mode vCPUs are actively running to set the pending bit
 69  * and hence disabling lock stealing.
 70  *
 71  * When the pending bit isn't set, the lock waiters will stay in the unfair
 72  * mode spinning on the lock unless the MCS wait queue is empty. In this
 73  * case, the lock waiters will enter the queued mode slowpath trying to
 74  * become the queue head and set the pending bit.
 75  *
 76  * This hybrid PV queued/unfair lock combines the best attributes of a
 77  * queued lock (no lock starvation) and an unfair lock (good performance
 78  * on not heavily contended locks).
 79  */
 80 #define queued_spin_trylock(l)  pv_hybrid_queued_unfair_trylock(l)
 81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 82 {
 83         /*
 84          * Stay in unfair lock mode as long as queued mode waiters are
 85          * present in the MCS wait queue but the pending bit isn't set.
 86          */
 87         for (;;) {
 88                 int val = atomic_read(&lock->val);
 89                 u8 old = 0;
 90 
 91                 if (!(val & _Q_LOCKED_PENDING_MASK) &&
 92                     try_cmpxchg_acquire(&lock->locked, &old, _Q_LOCKED_VAL)) {
 93                         lockevent_inc(pv_lock_stealing);
 94                         return true;
 95                 }
 96                 if (!(val & _Q_TAIL_MASK) || (val & _Q_PENDING_MASK))
 97                         break;
 98 
 99                 cpu_relax();
100         }
101 
102         return false;
103 }
104 
105 /*
106  * The pending bit is used by the queue head vCPU to indicate that it
107  * is actively spinning on the lock and no lock stealing is allowed.
108  */
109 #if _Q_PENDING_BITS == 8
110 static __always_inline void set_pending(struct qspinlock *lock)
111 {
112         WRITE_ONCE(lock->pending, 1);
113 }
114 
115 /*
116  * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
117  * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
118  * lock just to be sure that it will get it.
119  */
120 static __always_inline bool trylock_clear_pending(struct qspinlock *lock)
121 {
122         u16 old = _Q_PENDING_VAL;
123 
124         return !READ_ONCE(lock->locked) &&
125                try_cmpxchg_acquire(&lock->locked_pending, &old, _Q_LOCKED_VAL);
126 }
127 #else /* _Q_PENDING_BITS == 8 */
128 static __always_inline void set_pending(struct qspinlock *lock)
129 {
130         atomic_or(_Q_PENDING_VAL, &lock->val);
131 }
132 
133 static __always_inline bool trylock_clear_pending(struct qspinlock *lock)
134 {
135         int old, new;
136 
137         old = atomic_read(&lock->val);
138         do {
139                 if (old & _Q_LOCKED_MASK)
140                         return false;
141                 /*
142                  * Try to clear pending bit & set locked bit
143                  */
144                 new = (old & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
145         } while (!atomic_try_cmpxchg_acquire (&lock->val, &old, new));
146 
147         return true;
148 }
149 #endif /* _Q_PENDING_BITS == 8 */
150 
151 /*
152  * Lock and MCS node addresses hash table for fast lookup
153  *
154  * Hashing is done on a per-cacheline basis to minimize the need to access
155  * more than one cacheline.
156  *
157  * Dynamically allocate a hash table big enough to hold at least 4X the
158  * number of possible cpus in the system. Allocation is done on page
159  * granularity. So the minimum number of hash buckets should be at least
160  * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
161  *
162  * Since we should not be holding locks from NMI context (very rare indeed) the
163  * max load factor is 0.75, which is around the point where open addressing
164  * breaks down.
165  *
166  */
167 struct pv_hash_entry {
168         struct qspinlock *lock;
169         struct pv_node   *node;
170 };
171 
172 #define PV_HE_PER_LINE  (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
173 #define PV_HE_MIN       (PAGE_SIZE / sizeof(struct pv_hash_entry))
174 
175 static struct pv_hash_entry *pv_lock_hash;
176 static unsigned int pv_lock_hash_bits __read_mostly;
177 
178 /*
179  * Allocate memory for the PV qspinlock hash buckets
180  *
181  * This function should be called from the paravirt spinlock initialization
182  * routine.
183  */
184 void __init __pv_init_lock_hash(void)
185 {
186         int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
187 
188         if (pv_hash_size < PV_HE_MIN)
189                 pv_hash_size = PV_HE_MIN;
190 
191         /*
192          * Allocate space from bootmem which should be page-size aligned
193          * and hence cacheline aligned.
194          */
195         pv_lock_hash = alloc_large_system_hash("PV qspinlock",
196                                                sizeof(struct pv_hash_entry),
197                                                pv_hash_size, 0,
198                                                HASH_EARLY | HASH_ZERO,
199                                                &pv_lock_hash_bits, NULL,
200                                                pv_hash_size, pv_hash_size);
201 }
202 
203 #define for_each_hash_entry(he, offset, hash)                                           \
204         for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;       \
205              offset < (1 << pv_lock_hash_bits);                                         \
206              offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
207 
208 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
209 {
210         unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
211         struct pv_hash_entry *he;
212         int hopcnt = 0;
213 
214         for_each_hash_entry(he, offset, hash) {
215                 struct qspinlock *old = NULL;
216                 hopcnt++;
217                 if (try_cmpxchg(&he->lock, &old, lock)) {
218                         WRITE_ONCE(he->node, node);
219                         lockevent_pv_hop(hopcnt);
220                         return &he->lock;
221                 }
222         }
223         /*
224          * Hard assume there is a free entry for us.
225          *
226          * This is guaranteed by ensuring every blocked lock only ever consumes
227          * a single entry, and since we only have 4 nesting levels per CPU
228          * and allocated 4*nr_possible_cpus(), this must be so.
229          *
230          * The single entry is guaranteed by having the lock owner unhash
231          * before it releases.
232          */
233         BUG();
234 }
235 
236 static struct pv_node *pv_unhash(struct qspinlock *lock)
237 {
238         unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
239         struct pv_hash_entry *he;
240         struct pv_node *node;
241 
242         for_each_hash_entry(he, offset, hash) {
243                 if (READ_ONCE(he->lock) == lock) {
244                         node = READ_ONCE(he->node);
245                         WRITE_ONCE(he->lock, NULL);
246                         return node;
247                 }
248         }
249         /*
250          * Hard assume we'll find an entry.
251          *
252          * This guarantees a limited lookup time and is itself guaranteed by
253          * having the lock owner do the unhash -- IFF the unlock sees the
254          * SLOW flag, there MUST be a hash entry.
255          */
256         BUG();
257 }
258 
259 /*
260  * Return true if when it is time to check the previous node which is not
261  * in a running state.
262  */
263 static inline bool
264 pv_wait_early(struct pv_node *prev, int loop)
265 {
266         if ((loop & PV_PREV_CHECK_MASK) != 0)
267                 return false;
268 
269         return READ_ONCE(prev->state) != vcpu_running;
270 }
271 
272 /*
273  * Initialize the PV part of the mcs_spinlock node.
274  */
275 static void pv_init_node(struct mcs_spinlock *node)
276 {
277         struct pv_node *pn = (struct pv_node *)node;
278 
279         BUILD_BUG_ON(sizeof(struct pv_node) > sizeof(struct qnode));
280 
281         pn->cpu = smp_processor_id();
282         pn->state = vcpu_running;
283 }
284 
285 /*
286  * Wait for node->locked to become true, halt the vcpu after a short spin.
287  * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
288  * behalf.
289  */
290 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
291 {
292         struct pv_node *pn = (struct pv_node *)node;
293         struct pv_node *pp = (struct pv_node *)prev;
294         bool wait_early;
295         int loop;
296 
297         for (;;) {
298                 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
299                         if (READ_ONCE(node->locked))
300                                 return;
301                         if (pv_wait_early(pp, loop)) {
302                                 wait_early = true;
303                                 break;
304                         }
305                         cpu_relax();
306                 }
307 
308                 /*
309                  * Order pn->state vs pn->locked thusly:
310                  *
311                  * [S] pn->state = vcpu_halted    [S] next->locked = 1
312                  *     MB                             MB
313                  * [L] pn->locked               [RmW] pn->state = vcpu_hashed
314                  *
315                  * Matches the cmpxchg() from pv_kick_node().
316                  */
317                 smp_store_mb(pn->state, vcpu_halted);
318 
319                 if (!READ_ONCE(node->locked)) {
320                         lockevent_inc(pv_wait_node);
321                         lockevent_cond_inc(pv_wait_early, wait_early);
322                         pv_wait(&pn->state, vcpu_halted);
323                 }
324 
325                 /*
326                  * If pv_kick_node() changed us to vcpu_hashed, retain that
327                  * value so that pv_wait_head_or_lock() knows to not also try
328                  * to hash this lock.
329                  */
330                 cmpxchg(&pn->state, vcpu_halted, vcpu_running);
331 
332                 /*
333                  * If the locked flag is still not set after wakeup, it is a
334                  * spurious wakeup and the vCPU should wait again. However,
335                  * there is a pretty high overhead for CPU halting and kicking.
336                  * So it is better to spin for a while in the hope that the
337                  * MCS lock will be released soon.
338                  */
339                 lockevent_cond_inc(pv_spurious_wakeup,
340                                   !READ_ONCE(node->locked));
341         }
342 
343         /*
344          * By now our node->locked should be 1 and our caller will not actually
345          * spin-wait for it. We do however rely on our caller to do a
346          * load-acquire for us.
347          */
348 }
349 
350 /*
351  * Called after setting next->locked = 1 when we're the lock owner.
352  *
353  * Instead of waking the waiters stuck in pv_wait_node() advance their state
354  * such that they're waiting in pv_wait_head_or_lock(), this avoids a
355  * wake/sleep cycle.
356  */
357 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
358 {
359         struct pv_node *pn = (struct pv_node *)node;
360         u8 old = vcpu_halted;
361         /*
362          * If the vCPU is indeed halted, advance its state to match that of
363          * pv_wait_node(). If OTOH this fails, the vCPU was running and will
364          * observe its next->locked value and advance itself.
365          *
366          * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
367          *
368          * The write to next->locked in arch_mcs_spin_unlock_contended()
369          * must be ordered before the read of pn->state in the cmpxchg()
370          * below for the code to work correctly. To guarantee full ordering
371          * irrespective of the success or failure of the cmpxchg(),
372          * a relaxed version with explicit barrier is used. The control
373          * dependency will order the reading of pn->state before any
374          * subsequent writes.
375          */
376         smp_mb__before_atomic();
377         if (!try_cmpxchg_relaxed(&pn->state, &old, vcpu_hashed))
378                 return;
379 
380         /*
381          * Put the lock into the hash table and set the _Q_SLOW_VAL.
382          *
383          * As this is the same vCPU that will check the _Q_SLOW_VAL value and
384          * the hash table later on at unlock time, no atomic instruction is
385          * needed.
386          */
387         WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
388         (void)pv_hash(lock, pn);
389 }
390 
391 /*
392  * Wait for l->locked to become clear and acquire the lock;
393  * halt the vcpu after a short spin.
394  * __pv_queued_spin_unlock() will wake us.
395  *
396  * The current value of the lock will be returned for additional processing.
397  */
398 static u32
399 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
400 {
401         struct pv_node *pn = (struct pv_node *)node;
402         struct qspinlock **lp = NULL;
403         int waitcnt = 0;
404         int loop;
405 
406         /*
407          * If pv_kick_node() already advanced our state, we don't need to
408          * insert ourselves into the hash table anymore.
409          */
410         if (READ_ONCE(pn->state) == vcpu_hashed)
411                 lp = (struct qspinlock **)1;
412 
413         /*
414          * Tracking # of slowpath locking operations
415          */
416         lockevent_inc(lock_slowpath);
417 
418         for (;; waitcnt++) {
419                 /*
420                  * Set correct vCPU state to be used by queue node wait-early
421                  * mechanism.
422                  */
423                 WRITE_ONCE(pn->state, vcpu_running);
424 
425                 /*
426                  * Set the pending bit in the active lock spinning loop to
427                  * disable lock stealing before attempting to acquire the lock.
428                  */
429                 set_pending(lock);
430                 for (loop = SPIN_THRESHOLD; loop; loop--) {
431                         if (trylock_clear_pending(lock))
432                                 goto gotlock;
433                         cpu_relax();
434                 }
435                 clear_pending(lock);
436 
437 
438                 if (!lp) { /* ONCE */
439                         lp = pv_hash(lock, pn);
440 
441                         /*
442                          * We must hash before setting _Q_SLOW_VAL, such that
443                          * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
444                          * we'll be sure to be able to observe our hash entry.
445                          *
446                          *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
447                          *       MB                           RMB
448                          * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
449                          *
450                          * Matches the smp_rmb() in __pv_queued_spin_unlock().
451                          */
452                         if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
453                                 /*
454                                  * The lock was free and now we own the lock.
455                                  * Change the lock value back to _Q_LOCKED_VAL
456                                  * and unhash the table.
457                                  */
458                                 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
459                                 WRITE_ONCE(*lp, NULL);
460                                 goto gotlock;
461                         }
462                 }
463                 WRITE_ONCE(pn->state, vcpu_hashed);
464                 lockevent_inc(pv_wait_head);
465                 lockevent_cond_inc(pv_wait_again, waitcnt);
466                 pv_wait(&lock->locked, _Q_SLOW_VAL);
467 
468                 /*
469                  * Because of lock stealing, the queue head vCPU may not be
470                  * able to acquire the lock before it has to wait again.
471                  */
472         }
473 
474         /*
475          * The cmpxchg() or xchg() call before coming here provides the
476          * acquire semantics for locking. The dummy ORing of _Q_LOCKED_VAL
477          * here is to indicate to the compiler that the value will always
478          * be nozero to enable better code optimization.
479          */
480 gotlock:
481         return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL);
482 }
483 
484 /*
485  * Include the architecture specific callee-save thunk of the
486  * __pv_queued_spin_unlock(). This thunk is put together with
487  * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
488  * function close to each other sharing consecutive instruction cachelines.
489  * Alternatively, architecture specific version of __pv_queued_spin_unlock()
490  * can be defined.
491  */
492 #include <asm/qspinlock_paravirt.h>
493 
494 /*
495  * PV versions of the unlock fastpath and slowpath functions to be used
496  * instead of queued_spin_unlock().
497  */
498 __visible __lockfunc void
499 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
500 {
501         struct pv_node *node;
502 
503         if (unlikely(locked != _Q_SLOW_VAL)) {
504                 WARN(!debug_locks_silent,
505                      "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
506                      (unsigned long)lock, atomic_read(&lock->val));
507                 return;
508         }
509 
510         /*
511          * A failed cmpxchg doesn't provide any memory-ordering guarantees,
512          * so we need a barrier to order the read of the node data in
513          * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
514          *
515          * Matches the cmpxchg() in pv_wait_head_or_lock() setting _Q_SLOW_VAL.
516          */
517         smp_rmb();
518 
519         /*
520          * Since the above failed to release, this must be the SLOW path.
521          * Therefore start by looking up the blocked node and unhashing it.
522          */
523         node = pv_unhash(lock);
524 
525         /*
526          * Now that we have a reference to the (likely) blocked pv_node,
527          * release the lock.
528          */
529         smp_store_release(&lock->locked, 0);
530 
531         /*
532          * At this point the memory pointed at by lock can be freed/reused,
533          * however we can still use the pv_node to kick the CPU.
534          * The other vCPU may not really be halted, but kicking an active
535          * vCPU is harmless other than the additional latency in completing
536          * the unlock.
537          */
538         lockevent_inc(pv_kick_unlock);
539         pv_kick(node->cpu);
540 }
541 
542 #ifndef __pv_queued_spin_unlock
543 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock)
544 {
545         u8 locked = _Q_LOCKED_VAL;
546 
547         /*
548          * We must not unlock if SLOW, because in that case we must first
549          * unhash. Otherwise it would be possible to have multiple @lock
550          * entries, which would be BAD.
551          */
552         if (try_cmpxchg_release(&lock->locked, &locked, 0))
553                 return;
554 
555         __pv_queued_spin_unlock_slowpath(lock, locked);
556 }
557 #endif /* __pv_queued_spin_unlock */
558 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php