1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 #ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H 4 #define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H 5 6 #include <linux/bits.h> 7 #include <asm/barrier.h> 8 9 #ifndef _LINUX_BITOPS_H 10 #error only <linux/bitops.h> can be included directly 11 #endif 12 13 /* 14 * Generic definitions for bit operations, should not be used in regular code 15 * directly. 16 */ 17 18 /** 19 * generic___set_bit - Set a bit in memory 20 * @nr: the bit to set 21 * @addr: the address to start counting from 22 * 23 * Unlike set_bit(), this function is non-atomic and may be reordered. 24 * If it's called on the same region of memory simultaneously, the effect 25 * may be that only one operation succeeds. 26 */ 27 static __always_inline void 28 generic___set_bit(unsigned long nr, volatile unsigned long *addr) 29 { 30 unsigned long mask = BIT_MASK(nr); 31 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 32 33 *p |= mask; 34 } 35 36 static __always_inline void 37 generic___clear_bit(unsigned long nr, volatile unsigned long *addr) 38 { 39 unsigned long mask = BIT_MASK(nr); 40 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 41 42 *p &= ~mask; 43 } 44 45 /** 46 * generic___change_bit - Toggle a bit in memory 47 * @nr: the bit to change 48 * @addr: the address to start counting from 49 * 50 * Unlike change_bit(), this function is non-atomic and may be reordered. 51 * If it's called on the same region of memory simultaneously, the effect 52 * may be that only one operation succeeds. 53 */ 54 static __always_inline void 55 generic___change_bit(unsigned long nr, volatile unsigned long *addr) 56 { 57 unsigned long mask = BIT_MASK(nr); 58 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 59 60 *p ^= mask; 61 } 62 63 /** 64 * generic___test_and_set_bit - Set a bit and return its old value 65 * @nr: Bit to set 66 * @addr: Address to count from 67 * 68 * This operation is non-atomic and can be reordered. 69 * If two examples of this operation race, one can appear to succeed 70 * but actually fail. You must protect multiple accesses with a lock. 71 */ 72 static __always_inline bool 73 generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 74 { 75 unsigned long mask = BIT_MASK(nr); 76 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 77 unsigned long old = *p; 78 79 *p = old | mask; 80 return (old & mask) != 0; 81 } 82 83 /** 84 * generic___test_and_clear_bit - Clear a bit and return its old value 85 * @nr: Bit to clear 86 * @addr: Address to count from 87 * 88 * This operation is non-atomic and can be reordered. 89 * If two examples of this operation race, one can appear to succeed 90 * but actually fail. You must protect multiple accesses with a lock. 91 */ 92 static __always_inline bool 93 generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 94 { 95 unsigned long mask = BIT_MASK(nr); 96 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 97 unsigned long old = *p; 98 99 *p = old & ~mask; 100 return (old & mask) != 0; 101 } 102 103 /* WARNING: non atomic and it can be reordered! */ 104 static __always_inline bool 105 generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 106 { 107 unsigned long mask = BIT_MASK(nr); 108 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 109 unsigned long old = *p; 110 111 *p = old ^ mask; 112 return (old & mask) != 0; 113 } 114 115 /** 116 * generic_test_bit - Determine whether a bit is set 117 * @nr: bit number to test 118 * @addr: Address to start counting from 119 */ 120 static __always_inline bool 121 generic_test_bit(unsigned long nr, const volatile unsigned long *addr) 122 { 123 /* 124 * Unlike the bitops with the '__' prefix above, this one *is* atomic, 125 * so `volatile` must always stay here with no cast-aways. See 126 * `Documentation/atomic_bitops.txt` for the details. 127 */ 128 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 129 } 130 131 /** 132 * generic_test_bit_acquire - Determine, with acquire semantics, whether a bit is set 133 * @nr: bit number to test 134 * @addr: Address to start counting from 135 */ 136 static __always_inline bool 137 generic_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) 138 { 139 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 140 return 1UL & (smp_load_acquire(p) >> (nr & (BITS_PER_LONG-1))); 141 } 142 143 /* 144 * const_*() definitions provide good compile-time optimizations when 145 * the passed arguments can be resolved at compile time. 146 */ 147 #define const___set_bit generic___set_bit 148 #define const___clear_bit generic___clear_bit 149 #define const___change_bit generic___change_bit 150 #define const___test_and_set_bit generic___test_and_set_bit 151 #define const___test_and_clear_bit generic___test_and_clear_bit 152 #define const___test_and_change_bit generic___test_and_change_bit 153 #define const_test_bit_acquire generic_test_bit_acquire 154 155 /** 156 * const_test_bit - Determine whether a bit is set 157 * @nr: bit number to test 158 * @addr: Address to start counting from 159 * 160 * A version of generic_test_bit() which discards the `volatile` qualifier to 161 * allow a compiler to optimize code harder. Non-atomic and to be called only 162 * for testing compile-time constants, e.g. by the corresponding macros, not 163 * directly from "regular" code. 164 */ 165 static __always_inline bool 166 const_test_bit(unsigned long nr, const volatile unsigned long *addr) 167 { 168 const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr); 169 unsigned long mask = BIT_MASK(nr); 170 unsigned long val = *p; 171 172 return !!(val & mask); 173 } 174 175 #endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */ 176
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.