~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/bitops.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_BITOPS_H
  3 #define _ASM_X86_BITOPS_H
  4 
  5 /*
  6  * Copyright 1992, Linus Torvalds.
  7  *
  8  * Note: inlines with more than a single statement should be marked
  9  * __always_inline to avoid problems with older gcc's inlining heuristics.
 10  */
 11 
 12 #ifndef _LINUX_BITOPS_H
 13 #error only <linux/bitops.h> can be included directly
 14 #endif
 15 
 16 #include <linux/compiler.h>
 17 #include <asm/alternative.h>
 18 #include <asm/rmwcc.h>
 19 #include <asm/barrier.h>
 20 
 21 #if BITS_PER_LONG == 32
 22 # define _BITOPS_LONG_SHIFT 5
 23 #elif BITS_PER_LONG == 64
 24 # define _BITOPS_LONG_SHIFT 6
 25 #else
 26 # error "Unexpected BITS_PER_LONG"
 27 #endif
 28 
 29 #define BIT_64(n)                       (U64_C(1) << (n))
 30 
 31 /*
 32  * These have to be done with inline assembly: that way the bit-setting
 33  * is guaranteed to be atomic. All bit operations return 0 if the bit
 34  * was cleared before the operation and != 0 if it was not.
 35  *
 36  * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
 37  */
 38 
 39 #define RLONG_ADDR(x)                    "m" (*(volatile long *) (x))
 40 #define WBYTE_ADDR(x)                   "+m" (*(volatile char *) (x))
 41 
 42 #define ADDR                            RLONG_ADDR(addr)
 43 
 44 /*
 45  * We do the locked ops that don't return the old value as
 46  * a mask operation on a byte.
 47  */
 48 #define CONST_MASK_ADDR(nr, addr)       WBYTE_ADDR((void *)(addr) + ((nr)>>3))
 49 #define CONST_MASK(nr)                  (1 << ((nr) & 7))
 50 
 51 static __always_inline void
 52 arch_set_bit(long nr, volatile unsigned long *addr)
 53 {
 54         if (__builtin_constant_p(nr)) {
 55                 asm volatile(LOCK_PREFIX "orb %b1,%0"
 56                         : CONST_MASK_ADDR(nr, addr)
 57                         : "iq" (CONST_MASK(nr))
 58                         : "memory");
 59         } else {
 60                 asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
 61                         : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 62         }
 63 }
 64 
 65 static __always_inline void
 66 arch___set_bit(unsigned long nr, volatile unsigned long *addr)
 67 {
 68         asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 69 }
 70 
 71 static __always_inline void
 72 arch_clear_bit(long nr, volatile unsigned long *addr)
 73 {
 74         if (__builtin_constant_p(nr)) {
 75                 asm volatile(LOCK_PREFIX "andb %b1,%0"
 76                         : CONST_MASK_ADDR(nr, addr)
 77                         : "iq" (~CONST_MASK(nr)));
 78         } else {
 79                 asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
 80                         : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
 81         }
 82 }
 83 
 84 static __always_inline void
 85 arch_clear_bit_unlock(long nr, volatile unsigned long *addr)
 86 {
 87         barrier();
 88         arch_clear_bit(nr, addr);
 89 }
 90 
 91 static __always_inline void
 92 arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
 93 {
 94         asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
 95 }
 96 
 97 static __always_inline bool arch_xor_unlock_is_negative_byte(unsigned long mask,
 98                 volatile unsigned long *addr)
 99 {
100         bool negative;
101         asm volatile(LOCK_PREFIX "xorb %2,%1"
102                 CC_SET(s)
103                 : CC_OUT(s) (negative), WBYTE_ADDR(addr)
104                 : "iq" ((char)mask) : "memory");
105         return negative;
106 }
107 #define arch_xor_unlock_is_negative_byte arch_xor_unlock_is_negative_byte
108 
109 static __always_inline void
110 arch___clear_bit_unlock(long nr, volatile unsigned long *addr)
111 {
112         arch___clear_bit(nr, addr);
113 }
114 
115 static __always_inline void
116 arch___change_bit(unsigned long nr, volatile unsigned long *addr)
117 {
118         asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
119 }
120 
121 static __always_inline void
122 arch_change_bit(long nr, volatile unsigned long *addr)
123 {
124         if (__builtin_constant_p(nr)) {
125                 asm volatile(LOCK_PREFIX "xorb %b1,%0"
126                         : CONST_MASK_ADDR(nr, addr)
127                         : "iq" (CONST_MASK(nr)));
128         } else {
129                 asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
130                         : : RLONG_ADDR(addr), "Ir" (nr) : "memory");
131         }
132 }
133 
134 static __always_inline bool
135 arch_test_and_set_bit(long nr, volatile unsigned long *addr)
136 {
137         return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, c, "Ir", nr);
138 }
139 
140 static __always_inline bool
141 arch_test_and_set_bit_lock(long nr, volatile unsigned long *addr)
142 {
143         return arch_test_and_set_bit(nr, addr);
144 }
145 
146 static __always_inline bool
147 arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
148 {
149         bool oldbit;
150 
151         asm(__ASM_SIZE(bts) " %2,%1"
152             CC_SET(c)
153             : CC_OUT(c) (oldbit)
154             : ADDR, "Ir" (nr) : "memory");
155         return oldbit;
156 }
157 
158 static __always_inline bool
159 arch_test_and_clear_bit(long nr, volatile unsigned long *addr)
160 {
161         return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), *addr, c, "Ir", nr);
162 }
163 
164 /*
165  * Note: the operation is performed atomically with respect to
166  * the local CPU, but not other CPUs. Portable code should not
167  * rely on this behaviour.
168  * KVM relies on this behaviour on x86 for modifying memory that is also
169  * accessed from a hypervisor on the same CPU if running in a VM: don't change
170  * this without also updating arch/x86/kernel/kvm.c
171  */
172 static __always_inline bool
173 arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
174 {
175         bool oldbit;
176 
177         asm volatile(__ASM_SIZE(btr) " %2,%1"
178                      CC_SET(c)
179                      : CC_OUT(c) (oldbit)
180                      : ADDR, "Ir" (nr) : "memory");
181         return oldbit;
182 }
183 
184 static __always_inline bool
185 arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
186 {
187         bool oldbit;
188 
189         asm volatile(__ASM_SIZE(btc) " %2,%1"
190                      CC_SET(c)
191                      : CC_OUT(c) (oldbit)
192                      : ADDR, "Ir" (nr) : "memory");
193 
194         return oldbit;
195 }
196 
197 static __always_inline bool
198 arch_test_and_change_bit(long nr, volatile unsigned long *addr)
199 {
200         return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
201 }
202 
203 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
204 {
205         return ((1UL << (nr & (BITS_PER_LONG-1))) &
206                 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
207 }
208 
209 static __always_inline bool constant_test_bit_acquire(long nr, const volatile unsigned long *addr)
210 {
211         bool oldbit;
212 
213         asm volatile("testb %2,%1"
214                      CC_SET(nz)
215                      : CC_OUT(nz) (oldbit)
216                      : "m" (((unsigned char *)addr)[nr >> 3]),
217                        "i" (1 << (nr & 7))
218                      :"memory");
219 
220         return oldbit;
221 }
222 
223 static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
224 {
225         bool oldbit;
226 
227         asm volatile(__ASM_SIZE(bt) " %2,%1"
228                      CC_SET(c)
229                      : CC_OUT(c) (oldbit)
230                      : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
231 
232         return oldbit;
233 }
234 
235 static __always_inline bool
236 arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
237 {
238         return __builtin_constant_p(nr) ? constant_test_bit(nr, addr) :
239                                           variable_test_bit(nr, addr);
240 }
241 
242 static __always_inline bool
243 arch_test_bit_acquire(unsigned long nr, const volatile unsigned long *addr)
244 {
245         return __builtin_constant_p(nr) ? constant_test_bit_acquire(nr, addr) :
246                                           variable_test_bit(nr, addr);
247 }
248 
249 static __always_inline unsigned long variable__ffs(unsigned long word)
250 {
251         asm("rep; bsf %1,%0"
252                 : "=r" (word)
253                 : ASM_INPUT_RM (word));
254         return word;
255 }
256 
257 /**
258  * __ffs - find first set bit in word
259  * @word: The word to search
260  *
261  * Undefined if no bit exists, so code should check against 0 first.
262  */
263 #define __ffs(word)                             \
264         (__builtin_constant_p(word) ?           \
265          (unsigned long)__builtin_ctzl(word) :  \
266          variable__ffs(word))
267 
268 static __always_inline unsigned long variable_ffz(unsigned long word)
269 {
270         asm("rep; bsf %1,%0"
271                 : "=r" (word)
272                 : "r" (~word));
273         return word;
274 }
275 
276 /**
277  * ffz - find first zero bit in word
278  * @word: The word to search
279  *
280  * Undefined if no zero exists, so code should check against ~0UL first.
281  */
282 #define ffz(word)                               \
283         (__builtin_constant_p(word) ?           \
284          (unsigned long)__builtin_ctzl(~word) : \
285          variable_ffz(word))
286 
287 /*
288  * __fls: find last set bit in word
289  * @word: The word to search
290  *
291  * Undefined if no set bit exists, so code should check against 0 first.
292  */
293 static __always_inline unsigned long __fls(unsigned long word)
294 {
295         if (__builtin_constant_p(word))
296                 return BITS_PER_LONG - 1 - __builtin_clzl(word);
297 
298         asm("bsr %1,%0"
299             : "=r" (word)
300             : ASM_INPUT_RM (word));
301         return word;
302 }
303 
304 #undef ADDR
305 
306 #ifdef __KERNEL__
307 static __always_inline int variable_ffs(int x)
308 {
309         int r;
310 
311 #ifdef CONFIG_X86_64
312         /*
313          * AMD64 says BSFL won't clobber the dest reg if x==0; Intel64 says the
314          * dest reg is undefined if x==0, but their CPU architect says its
315          * value is written to set it to the same as before, except that the
316          * top 32 bits will be cleared.
317          *
318          * We cannot do this on 32 bits because at the very least some
319          * 486 CPUs did not behave this way.
320          */
321         asm("bsfl %1,%0"
322             : "=r" (r)
323             : ASM_INPUT_RM (x), "" (-1));
324 #elif defined(CONFIG_X86_CMOV)
325         asm("bsfl %1,%0\n\t"
326             "cmovzl %2,%0"
327             : "=&r" (r) : "rm" (x), "r" (-1));
328 #else
329         asm("bsfl %1,%0\n\t"
330             "jnz 1f\n\t"
331             "movl $-1,%0\n"
332             "1:" : "=r" (r) : "rm" (x));
333 #endif
334         return r + 1;
335 }
336 
337 /**
338  * ffs - find first set bit in word
339  * @x: the word to search
340  *
341  * This is defined the same way as the libc and compiler builtin ffs
342  * routines, therefore differs in spirit from the other bitops.
343  *
344  * ffs(value) returns 0 if value is 0 or the position of the first
345  * set bit if value is nonzero. The first (least significant) bit
346  * is at position 1.
347  */
348 #define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
349 
350 /**
351  * fls - find last set bit in word
352  * @x: the word to search
353  *
354  * This is defined in a similar way as the libc and compiler builtin
355  * ffs, but returns the position of the most significant set bit.
356  *
357  * fls(value) returns 0 if value is 0 or the position of the last
358  * set bit if value is nonzero. The last (most significant) bit is
359  * at position 32.
360  */
361 static __always_inline int fls(unsigned int x)
362 {
363         int r;
364 
365         if (__builtin_constant_p(x))
366                 return x ? 32 - __builtin_clz(x) : 0;
367 
368 #ifdef CONFIG_X86_64
369         /*
370          * AMD64 says BSRL won't clobber the dest reg if x==0; Intel64 says the
371          * dest reg is undefined if x==0, but their CPU architect says its
372          * value is written to set it to the same as before, except that the
373          * top 32 bits will be cleared.
374          *
375          * We cannot do this on 32 bits because at the very least some
376          * 486 CPUs did not behave this way.
377          */
378         asm("bsrl %1,%0"
379             : "=r" (r)
380             : ASM_INPUT_RM (x), "" (-1));
381 #elif defined(CONFIG_X86_CMOV)
382         asm("bsrl %1,%0\n\t"
383             "cmovzl %2,%0"
384             : "=&r" (r) : "rm" (x), "rm" (-1));
385 #else
386         asm("bsrl %1,%0\n\t"
387             "jnz 1f\n\t"
388             "movl $-1,%0\n"
389             "1:" : "=r" (r) : "rm" (x));
390 #endif
391         return r + 1;
392 }
393 
394 /**
395  * fls64 - find last set bit in a 64-bit word
396  * @x: the word to search
397  *
398  * This is defined in a similar way as the libc and compiler builtin
399  * ffsll, but returns the position of the most significant set bit.
400  *
401  * fls64(value) returns 0 if value is 0 or the position of the last
402  * set bit if value is nonzero. The last (most significant) bit is
403  * at position 64.
404  */
405 #ifdef CONFIG_X86_64
406 static __always_inline int fls64(__u64 x)
407 {
408         int bitpos = -1;
409 
410         if (__builtin_constant_p(x))
411                 return x ? 64 - __builtin_clzll(x) : 0;
412         /*
413          * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the
414          * dest reg is undefined if x==0, but their CPU architect says its
415          * value is written to set it to the same as before.
416          */
417         asm("bsrq %1,%q0"
418             : "+r" (bitpos)
419             : ASM_INPUT_RM (x));
420         return bitpos + 1;
421 }
422 #else
423 #include <asm-generic/bitops/fls64.h>
424 #endif
425 
426 #include <asm-generic/bitops/sched.h>
427 
428 #include <asm/arch_hweight.h>
429 
430 #include <asm-generic/bitops/const_hweight.h>
431 
432 #include <asm-generic/bitops/instrumented-atomic.h>
433 #include <asm-generic/bitops/instrumented-non-atomic.h>
434 #include <asm-generic/bitops/instrumented-lock.h>
435 
436 #include <asm-generic/bitops/le.h>
437 
438 #include <asm-generic/bitops/ext2-atomic-setbit.h>
439 
440 #endif /* __KERNEL__ */
441 #endif /* _ASM_X86_BITOPS_H */
442 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php