~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/include/asm/bitops.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  *    Copyright IBM Corp. 1999,2013
  4  *
  5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  6  *
  7  * The description below was taken in large parts from the powerpc
  8  * bitops header file:
  9  * Within a word, bits are numbered LSB first.  Lot's of places make
 10  * this assumption by directly testing bits with (val & (1<<nr)).
 11  * This can cause confusion for large (> 1 word) bitmaps on a
 12  * big-endian system because, unlike little endian, the number of each
 13  * bit depends on the word size.
 14  *
 15  * The bitop functions are defined to work on unsigned longs, so the bits
 16  * end up numbered:
 17  *   |63..............0|127............64|191...........128|255...........192|
 18  *
 19  * We also have special functions which work with an MSB0 encoding.
 20  * The bits are numbered:
 21  *   |0..............63|64............127|128...........191|192...........255|
 22  *
 23  * The main difference is that bit 0-63 in the bit number field needs to be
 24  * reversed compared to the LSB0 encoded bit fields. This can be achieved by
 25  * XOR with 0x3f.
 26  *
 27  */
 28 
 29 #ifndef _S390_BITOPS_H
 30 #define _S390_BITOPS_H
 31 
 32 #ifndef _LINUX_BITOPS_H
 33 #error only <linux/bitops.h> can be included directly
 34 #endif
 35 
 36 #include <linux/typecheck.h>
 37 #include <linux/compiler.h>
 38 #include <linux/types.h>
 39 #include <asm/atomic_ops.h>
 40 #include <asm/barrier.h>
 41 
 42 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 43 
 44 static inline unsigned long *
 45 __bitops_word(unsigned long nr, const volatile unsigned long *ptr)
 46 {
 47         unsigned long addr;
 48 
 49         addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
 50         return (unsigned long *)addr;
 51 }
 52 
 53 static inline unsigned long __bitops_mask(unsigned long nr)
 54 {
 55         return 1UL << (nr & (BITS_PER_LONG - 1));
 56 }
 57 
 58 static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
 59 {
 60         unsigned long *addr = __bitops_word(nr, ptr);
 61         unsigned long mask = __bitops_mask(nr);
 62 
 63         __atomic64_or(mask, (long *)addr);
 64 }
 65 
 66 static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 67 {
 68         unsigned long *addr = __bitops_word(nr, ptr);
 69         unsigned long mask = __bitops_mask(nr);
 70 
 71         __atomic64_and(~mask, (long *)addr);
 72 }
 73 
 74 static __always_inline void arch_change_bit(unsigned long nr,
 75                                             volatile unsigned long *ptr)
 76 {
 77         unsigned long *addr = __bitops_word(nr, ptr);
 78         unsigned long mask = __bitops_mask(nr);
 79 
 80         __atomic64_xor(mask, (long *)addr);
 81 }
 82 
 83 static inline bool arch_test_and_set_bit(unsigned long nr,
 84                                          volatile unsigned long *ptr)
 85 {
 86         unsigned long *addr = __bitops_word(nr, ptr);
 87         unsigned long mask = __bitops_mask(nr);
 88         unsigned long old;
 89 
 90         old = __atomic64_or_barrier(mask, (long *)addr);
 91         return old & mask;
 92 }
 93 
 94 static inline bool arch_test_and_clear_bit(unsigned long nr,
 95                                            volatile unsigned long *ptr)
 96 {
 97         unsigned long *addr = __bitops_word(nr, ptr);
 98         unsigned long mask = __bitops_mask(nr);
 99         unsigned long old;
100 
101         old = __atomic64_and_barrier(~mask, (long *)addr);
102         return old & mask;
103 }
104 
105 static inline bool arch_test_and_change_bit(unsigned long nr,
106                                             volatile unsigned long *ptr)
107 {
108         unsigned long *addr = __bitops_word(nr, ptr);
109         unsigned long mask = __bitops_mask(nr);
110         unsigned long old;
111 
112         old = __atomic64_xor_barrier(mask, (long *)addr);
113         return old & mask;
114 }
115 
116 static __always_inline void
117 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
118 {
119         unsigned long *p = __bitops_word(nr, addr);
120         unsigned long mask = __bitops_mask(nr);
121 
122         *p |= mask;
123 }
124 
125 static __always_inline void
126 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
127 {
128         unsigned long *p = __bitops_word(nr, addr);
129         unsigned long mask = __bitops_mask(nr);
130 
131         *p &= ~mask;
132 }
133 
134 static __always_inline void
135 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
136 {
137         unsigned long *p = __bitops_word(nr, addr);
138         unsigned long mask = __bitops_mask(nr);
139 
140         *p ^= mask;
141 }
142 
143 static __always_inline bool
144 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
145 {
146         unsigned long *p = __bitops_word(nr, addr);
147         unsigned long mask = __bitops_mask(nr);
148         unsigned long old;
149 
150         old = *p;
151         *p |= mask;
152         return old & mask;
153 }
154 
155 static __always_inline bool
156 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
157 {
158         unsigned long *p = __bitops_word(nr, addr);
159         unsigned long mask = __bitops_mask(nr);
160         unsigned long old;
161 
162         old = *p;
163         *p &= ~mask;
164         return old & mask;
165 }
166 
167 static __always_inline bool
168 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
169 {
170         unsigned long *p = __bitops_word(nr, addr);
171         unsigned long mask = __bitops_mask(nr);
172         unsigned long old;
173 
174         old = *p;
175         *p ^= mask;
176         return old & mask;
177 }
178 
179 #define arch_test_bit generic_test_bit
180 #define arch_test_bit_acquire generic_test_bit_acquire
181 
182 static inline bool arch_test_and_set_bit_lock(unsigned long nr,
183                                               volatile unsigned long *ptr)
184 {
185         if (arch_test_bit(nr, ptr))
186                 return true;
187         return arch_test_and_set_bit(nr, ptr);
188 }
189 
190 static inline void arch_clear_bit_unlock(unsigned long nr,
191                                          volatile unsigned long *ptr)
192 {
193         smp_mb__before_atomic();
194         arch_clear_bit(nr, ptr);
195 }
196 
197 static inline void arch___clear_bit_unlock(unsigned long nr,
198                                            volatile unsigned long *ptr)
199 {
200         smp_mb();
201         arch___clear_bit(nr, ptr);
202 }
203 
204 static inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
205                 volatile unsigned long *ptr)
206 {
207         unsigned long old;
208 
209         old = __atomic64_xor_barrier(mask, (long *)ptr);
210         return old & BIT(7);
211 }
212 #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
213 
214 #include <asm-generic/bitops/instrumented-atomic.h>
215 #include <asm-generic/bitops/instrumented-non-atomic.h>
216 #include <asm-generic/bitops/instrumented-lock.h>
217 
218 /*
219  * Functions which use MSB0 bit numbering.
220  * The bits are numbered:
221  *   |0..............63|64............127|128...........191|192...........255|
222  */
223 unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
224 unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
225                                 unsigned long offset);
226 
227 #define for_each_set_bit_inv(bit, addr, size)                           \
228         for ((bit) = find_first_bit_inv((addr), (size));                \
229              (bit) < (size);                                            \
230              (bit) = find_next_bit_inv((addr), (size), (bit) + 1))
231 
232 static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
233 {
234         return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
235 }
236 
237 static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
238 {
239         return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
240 }
241 
242 static inline bool test_and_clear_bit_inv(unsigned long nr,
243                                           volatile unsigned long *ptr)
244 {
245         return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
246 }
247 
248 static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
249 {
250         return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
251 }
252 
253 static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
254 {
255         return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
256 }
257 
258 static inline bool test_bit_inv(unsigned long nr,
259                                 const volatile unsigned long *ptr)
260 {
261         return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
262 }
263 
264 /**
265  * __flogr - find leftmost one
266  * @word - The word to search
267  *
268  * Returns the bit number of the most significant bit set,
269  * where the most significant bit has bit number 0.
270  * If no bit is set this function returns 64.
271  */
272 static inline unsigned char __flogr(unsigned long word)
273 {
274         if (__builtin_constant_p(word)) {
275                 unsigned long bit = 0;
276 
277                 if (!word)
278                         return 64;
279                 if (!(word & 0xffffffff00000000UL)) {
280                         word <<= 32;
281                         bit += 32;
282                 }
283                 if (!(word & 0xffff000000000000UL)) {
284                         word <<= 16;
285                         bit += 16;
286                 }
287                 if (!(word & 0xff00000000000000UL)) {
288                         word <<= 8;
289                         bit += 8;
290                 }
291                 if (!(word & 0xf000000000000000UL)) {
292                         word <<= 4;
293                         bit += 4;
294                 }
295                 if (!(word & 0xc000000000000000UL)) {
296                         word <<= 2;
297                         bit += 2;
298                 }
299                 if (!(word & 0x8000000000000000UL)) {
300                         word <<= 1;
301                         bit += 1;
302                 }
303                 return bit;
304         } else {
305                 union register_pair rp;
306 
307                 rp.even = word;
308                 asm volatile(
309                         "       flogr   %[rp],%[rp]\n"
310                         : [rp] "+d" (rp.pair) : : "cc");
311                 return rp.even;
312         }
313 }
314 
315 /**
316  * __ffs - find first bit in word.
317  * @word: The word to search
318  *
319  * Undefined if no bit exists, so code should check against 0 first.
320  */
321 static inline unsigned long __ffs(unsigned long word)
322 {
323         return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
324 }
325 
326 /**
327  * ffs - find first bit set
328  * @word: the word to search
329  *
330  * This is defined the same way as the libc and
331  * compiler builtin ffs routines (man ffs).
332  */
333 static inline int ffs(int word)
334 {
335         unsigned long mask = 2 * BITS_PER_LONG - 1;
336         unsigned int val = (unsigned int)word;
337 
338         return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
339 }
340 
341 /**
342  * __fls - find last (most-significant) set bit in a long word
343  * @word: the word to search
344  *
345  * Undefined if no set bit exists, so code should check against 0 first.
346  */
347 static inline unsigned long __fls(unsigned long word)
348 {
349         return __flogr(word) ^ (BITS_PER_LONG - 1);
350 }
351 
352 /**
353  * fls64 - find last set bit in a 64-bit word
354  * @word: the word to search
355  *
356  * This is defined in a similar way as the libc and compiler builtin
357  * ffsll, but returns the position of the most significant set bit.
358  *
359  * fls64(value) returns 0 if value is 0 or the position of the last
360  * set bit if value is nonzero. The last (most significant) bit is
361  * at position 64.
362  */
363 static inline int fls64(unsigned long word)
364 {
365         unsigned long mask = 2 * BITS_PER_LONG - 1;
366 
367         return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
368 }
369 
370 /**
371  * fls - find last (most-significant) bit set
372  * @word: the word to search
373  *
374  * This is defined the same way as ffs.
375  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
376  */
377 static inline int fls(unsigned int word)
378 {
379         return fls64(word);
380 }
381 
382 #include <asm/arch_hweight.h>
383 #include <asm-generic/bitops/const_hweight.h>
384 #include <asm-generic/bitops/ffz.h>
385 #include <asm-generic/bitops/sched.h>
386 #include <asm-generic/bitops/le.h>
387 #include <asm-generic/bitops/ext2-atomic-setbit.h>
388 
389 #endif /* _S390_BITOPS_H */
390 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php