~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/asm-generic/qrwlock.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  * Queue read/write lock
  4  *
  5  * These use generic atomic and locking routines, but depend on a fair spinlock
  6  * implementation in order to be fair themselves.  The implementation in
  7  * asm-generic/spinlock.h meets these requirements.
  8  *
  9  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
 10  *
 11  * Authors: Waiman Long <waiman.long@hp.com>
 12  */
 13 #ifndef __ASM_GENERIC_QRWLOCK_H
 14 #define __ASM_GENERIC_QRWLOCK_H
 15 
 16 #include <linux/atomic.h>
 17 #include <asm/barrier.h>
 18 #include <asm/processor.h>
 19 
 20 #include <asm-generic/qrwlock_types.h>
 21 
 22 /* Must be included from asm/spinlock.h after defining arch_spin_is_locked.  */
 23 
 24 /*
 25  * Writer states & reader shift and bias.
 26  */
 27 #define _QW_WAITING     0x100           /* A writer is waiting     */
 28 #define _QW_LOCKED      0x0ff           /* A writer holds the lock */
 29 #define _QW_WMASK       0x1ff           /* Writer mask             */
 30 #define _QR_SHIFT       9               /* Reader count shift      */
 31 #define _QR_BIAS        (1U << _QR_SHIFT)
 32 
 33 /*
 34  * External function declarations
 35  */
 36 extern void queued_read_lock_slowpath(struct qrwlock *lock);
 37 extern void queued_write_lock_slowpath(struct qrwlock *lock);
 38 
 39 /**
 40  * queued_read_trylock - try to acquire read lock of a queued rwlock
 41  * @lock : Pointer to queued rwlock structure
 42  * Return: 1 if lock acquired, 0 if failed
 43  */
 44 static inline int queued_read_trylock(struct qrwlock *lock)
 45 {
 46         int cnts;
 47 
 48         cnts = atomic_read(&lock->cnts);
 49         if (likely(!(cnts & _QW_WMASK))) {
 50                 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 51                 if (likely(!(cnts & _QW_WMASK)))
 52                         return 1;
 53                 atomic_sub(_QR_BIAS, &lock->cnts);
 54         }
 55         return 0;
 56 }
 57 
 58 /**
 59  * queued_write_trylock - try to acquire write lock of a queued rwlock
 60  * @lock : Pointer to queued rwlock structure
 61  * Return: 1 if lock acquired, 0 if failed
 62  */
 63 static inline int queued_write_trylock(struct qrwlock *lock)
 64 {
 65         int cnts;
 66 
 67         cnts = atomic_read(&lock->cnts);
 68         if (unlikely(cnts))
 69                 return 0;
 70 
 71         return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
 72                                 _QW_LOCKED));
 73 }
 74 /**
 75  * queued_read_lock - acquire read lock of a queued rwlock
 76  * @lock: Pointer to queued rwlock structure
 77  */
 78 static inline void queued_read_lock(struct qrwlock *lock)
 79 {
 80         int cnts;
 81 
 82         cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 83         if (likely(!(cnts & _QW_WMASK)))
 84                 return;
 85 
 86         /* The slowpath will decrement the reader count, if necessary. */
 87         queued_read_lock_slowpath(lock);
 88 }
 89 
 90 /**
 91  * queued_write_lock - acquire write lock of a queued rwlock
 92  * @lock : Pointer to queued rwlock structure
 93  */
 94 static inline void queued_write_lock(struct qrwlock *lock)
 95 {
 96         int cnts = 0;
 97         /* Optimize for the unfair lock case where the fair flag is 0. */
 98         if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
 99                 return;
100 
101         queued_write_lock_slowpath(lock);
102 }
103 
104 /**
105  * queued_read_unlock - release read lock of a queued rwlock
106  * @lock : Pointer to queued rwlock structure
107  */
108 static inline void queued_read_unlock(struct qrwlock *lock)
109 {
110         /*
111          * Atomically decrement the reader count
112          */
113         (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
114 }
115 
116 /**
117  * queued_write_unlock - release write lock of a queued rwlock
118  * @lock : Pointer to queued rwlock structure
119  */
120 static inline void queued_write_unlock(struct qrwlock *lock)
121 {
122         smp_store_release(&lock->wlocked, 0);
123 }
124 
125 /**
126  * queued_rwlock_is_contended - check if the lock is contended
127  * @lock : Pointer to queued rwlock structure
128  * Return: 1 if lock contended, 0 otherwise
129  */
130 static inline int queued_rwlock_is_contended(struct qrwlock *lock)
131 {
132         return arch_spin_is_locked(&lock->wait_lock);
133 }
134 
135 /*
136  * Remapping rwlock architecture specific functions to the corresponding
137  * queued rwlock functions.
138  */
139 #define arch_read_lock(l)               queued_read_lock(l)
140 #define arch_write_lock(l)              queued_write_lock(l)
141 #define arch_read_trylock(l)            queued_read_trylock(l)
142 #define arch_write_trylock(l)           queued_write_trylock(l)
143 #define arch_read_unlock(l)             queued_read_unlock(l)
144 #define arch_write_unlock(l)            queued_write_unlock(l)
145 #define arch_rwlock_is_contended(l)     queued_rwlock_is_contended(l)
146 
147 #endif /* __ASM_GENERIC_QRWLOCK_H */
148 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php