~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/include/asm/bitops-op32.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __ASM_SH_BITOPS_OP32_H
  3 #define __ASM_SH_BITOPS_OP32_H
  4 
  5 #include <linux/bits.h>
  6 
  7 /*
  8  * The bit modifying instructions on SH-2A are only capable of working
  9  * with a 3-bit immediate, which signifies the shift position for the bit
 10  * being worked on.
 11  */
 12 #if defined(__BIG_ENDIAN)
 13 #define BITOP_LE_SWIZZLE        ((BITS_PER_LONG-1) & ~0x7)
 14 #define BYTE_NUMBER(nr)         ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
 15 #define BYTE_OFFSET(nr)         ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
 16 #else
 17 #define BYTE_NUMBER(nr)         ((nr) / BITS_PER_BYTE)
 18 #define BYTE_OFFSET(nr)         ((nr) % BITS_PER_BYTE)
 19 #endif
 20 
 21 static __always_inline void
 22 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
 23 {
 24         if (__builtin_constant_p(nr)) {
 25                 __asm__ __volatile__ (
 26                         "bset.b %1, @(%O2,%0)           ! __set_bit\n\t"
 27                         : "+r" (addr)
 28                         : "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
 29                         : "t", "memory"
 30                 );
 31         } else {
 32                 unsigned long mask = BIT_MASK(nr);
 33                 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 34 
 35                 *p |= mask;
 36         }
 37 }
 38 
 39 static __always_inline void
 40 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
 41 {
 42         if (__builtin_constant_p(nr)) {
 43                 __asm__ __volatile__ (
 44                         "bclr.b %1, @(%O2,%0)           ! __clear_bit\n\t"
 45                         : "+r" (addr)
 46                         : "i" (BYTE_OFFSET(nr)),
 47                           "i" (BYTE_NUMBER(nr))
 48                         : "t", "memory"
 49                 );
 50         } else {
 51                 unsigned long mask = BIT_MASK(nr);
 52                 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 53 
 54                 *p &= ~mask;
 55         }
 56 }
 57 
 58 /**
 59  * arch___change_bit - Toggle a bit in memory
 60  * @nr: the bit to change
 61  * @addr: the address to start counting from
 62  *
 63  * Unlike change_bit(), this function is non-atomic and may be reordered.
 64  * If it's called on the same region of memory simultaneously, the effect
 65  * may be that only one operation succeeds.
 66  */
 67 static __always_inline void
 68 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
 69 {
 70         if (__builtin_constant_p(nr)) {
 71                 __asm__ __volatile__ (
 72                         "bxor.b %1, @(%O2,%0)           ! __change_bit\n\t"
 73                         : "+r" (addr)
 74                         : "i" (BYTE_OFFSET(nr)),
 75                           "i" (BYTE_NUMBER(nr))
 76                         : "t", "memory"
 77                 );
 78         } else {
 79                 unsigned long mask = BIT_MASK(nr);
 80                 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
 81 
 82                 *p ^= mask;
 83         }
 84 }
 85 
 86 /**
 87  * arch___test_and_set_bit - Set a bit and return its old value
 88  * @nr: Bit to set
 89  * @addr: Address to count from
 90  *
 91  * This operation is non-atomic and can be reordered.
 92  * If two examples of this operation race, one can appear to succeed
 93  * but actually fail.  You must protect multiple accesses with a lock.
 94  */
 95 static __always_inline bool
 96 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
 97 {
 98         unsigned long mask = BIT_MASK(nr);
 99         unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
100         unsigned long old = *p;
101 
102         *p = old | mask;
103         return (old & mask) != 0;
104 }
105 
106 /**
107  * arch___test_and_clear_bit - Clear a bit and return its old value
108  * @nr: Bit to clear
109  * @addr: Address to count from
110  *
111  * This operation is non-atomic and can be reordered.
112  * If two examples of this operation race, one can appear to succeed
113  * but actually fail.  You must protect multiple accesses with a lock.
114  */
115 static __always_inline bool
116 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
117 {
118         unsigned long mask = BIT_MASK(nr);
119         unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
120         unsigned long old = *p;
121 
122         *p = old & ~mask;
123         return (old & mask) != 0;
124 }
125 
126 /* WARNING: non atomic and it can be reordered! */
127 static __always_inline bool
128 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
129 {
130         unsigned long mask = BIT_MASK(nr);
131         unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
132         unsigned long old = *p;
133 
134         *p = old ^ mask;
135         return (old & mask) != 0;
136 }
137 
138 #define arch_test_bit generic_test_bit
139 #define arch_test_bit_acquire generic_test_bit_acquire
140 
141 #include <asm-generic/bitops/non-instrumented-non-atomic.h>
142 
143 #endif /* __ASM_SH_BITOPS_OP32_H */
144 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php