1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 #ifndef _ALPHA_BITOPS_H 2 #ifndef __ASM_GENERIC_BITOPS_H !! 2 #define _ALPHA_BITOPS_H 3 #define __ASM_GENERIC_BITOPS_H !! 3 >> 4 #include <linux/config.h> >> 5 #include <linux/kernel.h> >> 6 #include <asm/compiler.h> >> 7 >> 8 /* >> 9 * Copyright 1994, Linus Torvalds. >> 10 */ >> 11 >> 12 /* >> 13 * These have to be done with inline assembly: that way the bit-setting >> 14 * is guaranteed to be atomic. All bit operations return 0 if the bit >> 15 * was cleared before the operation and != 0 if it was not. >> 16 * >> 17 * To get proper branch prediction for the main line, we must branch >> 18 * forward to code at the end of this object's .text section, then >> 19 * branch back to restart the operation. >> 20 * >> 21 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). >> 22 */ >> 23 >> 24 static inline void >> 25 set_bit(unsigned long nr, volatile void * addr) >> 26 { >> 27 unsigned long temp; >> 28 int *m = ((int *) addr) + (nr >> 5); >> 29 >> 30 __asm__ __volatile__( >> 31 "1: ldl_l %0,%3\n" >> 32 " bis %0,%2,%0\n" >> 33 " stl_c %0,%1\n" >> 34 " beq %0,2f\n" >> 35 ".subsection 2\n" >> 36 "2: br 1b\n" >> 37 ".previous" >> 38 :"=&r" (temp), "=m" (*m) >> 39 :"Ir" (1UL << (nr & 31)), "m" (*m)); >> 40 } >> 41 >> 42 /* >> 43 * WARNING: non atomic version. >> 44 */ >> 45 static inline void >> 46 __set_bit(unsigned long nr, volatile void * addr) >> 47 { >> 48 int *m = ((int *) addr) + (nr >> 5); >> 49 >> 50 *m |= 1 << (nr & 31); >> 51 } >> 52 >> 53 #define smp_mb__before_clear_bit() smp_mb() >> 54 #define smp_mb__after_clear_bit() smp_mb() >> 55 >> 56 static inline void >> 57 clear_bit(unsigned long nr, volatile void * addr) >> 58 { >> 59 unsigned long temp; >> 60 int *m = ((int *) addr) + (nr >> 5); >> 61 >> 62 __asm__ __volatile__( >> 63 "1: ldl_l %0,%3\n" >> 64 " bic %0,%2,%0\n" >> 65 " stl_c %0,%1\n" >> 66 " beq %0,2f\n" >> 67 ".subsection 2\n" >> 68 "2: br 1b\n" >> 69 ".previous" >> 70 :"=&r" (temp), "=m" (*m) >> 71 :"Ir" (1UL << (nr & 31)), "m" (*m)); >> 72 } >> 73 >> 74 /* >> 75 * WARNING: non atomic version. >> 76 */ >> 77 static __inline__ void >> 78 __clear_bit(unsigned long nr, volatile void * addr) >> 79 { >> 80 int *m = ((int *) addr) + (nr >> 5); >> 81 >> 82 *m &= ~(1 << (nr & 31)); >> 83 } >> 84 >> 85 static inline void >> 86 change_bit(unsigned long nr, volatile void * addr) >> 87 { >> 88 unsigned long temp; >> 89 int *m = ((int *) addr) + (nr >> 5); >> 90 >> 91 __asm__ __volatile__( >> 92 "1: ldl_l %0,%3\n" >> 93 " xor %0,%2,%0\n" >> 94 " stl_c %0,%1\n" >> 95 " beq %0,2f\n" >> 96 ".subsection 2\n" >> 97 "2: br 1b\n" >> 98 ".previous" >> 99 :"=&r" (temp), "=m" (*m) >> 100 :"Ir" (1UL << (nr & 31)), "m" (*m)); >> 101 } >> 102 >> 103 /* >> 104 * WARNING: non atomic version. >> 105 */ >> 106 static __inline__ void >> 107 __change_bit(unsigned long nr, volatile void * addr) >> 108 { >> 109 int *m = ((int *) addr) + (nr >> 5); >> 110 >> 111 *m ^= 1 << (nr & 31); >> 112 } >> 113 >> 114 static inline int >> 115 test_and_set_bit(unsigned long nr, volatile void *addr) >> 116 { >> 117 unsigned long oldbit; >> 118 unsigned long temp; >> 119 int *m = ((int *) addr) + (nr >> 5); >> 120 >> 121 __asm__ __volatile__( >> 122 "1: ldl_l %0,%4\n" >> 123 " and %0,%3,%2\n" >> 124 " bne %2,2f\n" >> 125 " xor %0,%3,%0\n" >> 126 " stl_c %0,%1\n" >> 127 " beq %0,3f\n" >> 128 "2:\n" >> 129 #ifdef CONFIG_SMP >> 130 " mb\n" >> 131 #endif >> 132 ".subsection 2\n" >> 133 "3: br 1b\n" >> 134 ".previous" >> 135 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) >> 136 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); >> 137 >> 138 return oldbit != 0; >> 139 } >> 140 >> 141 /* >> 142 * WARNING: non atomic version. >> 143 */ >> 144 static inline int >> 145 __test_and_set_bit(unsigned long nr, volatile void * addr) >> 146 { >> 147 unsigned long mask = 1 << (nr & 0x1f); >> 148 int *m = ((int *) addr) + (nr >> 5); >> 149 int old = *m; >> 150 >> 151 *m = old | mask; >> 152 return (old & mask) != 0; >> 153 } >> 154 >> 155 static inline int >> 156 test_and_clear_bit(unsigned long nr, volatile void * addr) >> 157 { >> 158 unsigned long oldbit; >> 159 unsigned long temp; >> 160 int *m = ((int *) addr) + (nr >> 5); >> 161 >> 162 __asm__ __volatile__( >> 163 "1: ldl_l %0,%4\n" >> 164 " and %0,%3,%2\n" >> 165 " beq %2,2f\n" >> 166 " xor %0,%3,%0\n" >> 167 " stl_c %0,%1\n" >> 168 " beq %0,3f\n" >> 169 "2:\n" >> 170 #ifdef CONFIG_SMP >> 171 " mb\n" >> 172 #endif >> 173 ".subsection 2\n" >> 174 "3: br 1b\n" >> 175 ".previous" >> 176 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) >> 177 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); >> 178 >> 179 return oldbit != 0; >> 180 } >> 181 >> 182 /* >> 183 * WARNING: non atomic version. >> 184 */ >> 185 static inline int >> 186 __test_and_clear_bit(unsigned long nr, volatile void * addr) >> 187 { >> 188 unsigned long mask = 1 << (nr & 0x1f); >> 189 int *m = ((int *) addr) + (nr >> 5); >> 190 int old = *m; >> 191 >> 192 *m = old & ~mask; >> 193 return (old & mask) != 0; >> 194 } >> 195 >> 196 static inline int >> 197 test_and_change_bit(unsigned long nr, volatile void * addr) >> 198 { >> 199 unsigned long oldbit; >> 200 unsigned long temp; >> 201 int *m = ((int *) addr) + (nr >> 5); >> 202 >> 203 __asm__ __volatile__( >> 204 "1: ldl_l %0,%4\n" >> 205 " and %0,%3,%2\n" >> 206 " xor %0,%3,%0\n" >> 207 " stl_c %0,%1\n" >> 208 " beq %0,3f\n" >> 209 #ifdef CONFIG_SMP >> 210 " mb\n" >> 211 #endif >> 212 ".subsection 2\n" >> 213 "3: br 1b\n" >> 214 ".previous" >> 215 :"=&r" (temp), "=m" (*m), "=&r" (oldbit) >> 216 :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); >> 217 >> 218 return oldbit != 0; >> 219 } >> 220 >> 221 /* >> 222 * WARNING: non atomic version. >> 223 */ >> 224 static __inline__ int >> 225 __test_and_change_bit(unsigned long nr, volatile void * addr) >> 226 { >> 227 unsigned long mask = 1 << (nr & 0x1f); >> 228 int *m = ((int *) addr) + (nr >> 5); >> 229 int old = *m; >> 230 >> 231 *m = old ^ mask; >> 232 return (old & mask) != 0; >> 233 } >> 234 >> 235 static inline int >> 236 test_bit(int nr, const volatile void * addr) >> 237 { >> 238 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; >> 239 } 4 240 5 /* 241 /* 6 * For the benefit of those who are trying to !! 242 * ffz = Find First Zero in word. Undefined if no zero exists, 7 * architecture, here are some C-language equi !! 243 * so code should check against ~0UL first.. 8 * generate reasonable code, so take a look at << 9 * out before rolling your own buggy implement << 10 * 244 * 11 * C language equivalents written by Theodore !! 245 * Do a binary search on the bits. Due to the nature of large >> 246 * constants on the alpha, it is worthwhile to split the search. >> 247 */ >> 248 static inline unsigned long ffz_b(unsigned long x) >> 249 { >> 250 unsigned long sum, x1, x2, x4; >> 251 >> 252 x = ~x & -~x; /* set first 0 bit, clear others */ >> 253 x1 = x & 0xAA; >> 254 x2 = x & 0xCC; >> 255 x4 = x & 0xF0; >> 256 sum = x2 ? 2 : 0; >> 257 sum += (x4 != 0) * 4; >> 258 sum += (x1 != 0); >> 259 >> 260 return sum; >> 261 } >> 262 >> 263 static inline unsigned long ffz(unsigned long word) >> 264 { >> 265 #if defined(__alpha_cix__) && defined(__alpha_fix__) >> 266 /* Whee. EV67 can calculate it directly. */ >> 267 return __kernel_cttz(~word); >> 268 #else >> 269 unsigned long bits, qofs, bofs; >> 270 >> 271 bits = __kernel_cmpbge(word, ~0UL); >> 272 qofs = ffz_b(bits); >> 273 bits = __kernel_extbl(word, qofs); >> 274 bofs = ffz_b(bits); >> 275 >> 276 return qofs*8 + bofs; >> 277 #endif >> 278 } >> 279 >> 280 /* >> 281 * __ffs = Find First set bit in word. Undefined if no set bit exists. >> 282 */ >> 283 static inline unsigned long __ffs(unsigned long word) >> 284 { >> 285 #if defined(__alpha_cix__) && defined(__alpha_fix__) >> 286 /* Whee. EV67 can calculate it directly. */ >> 287 return __kernel_cttz(word); >> 288 #else >> 289 unsigned long bits, qofs, bofs; >> 290 >> 291 bits = __kernel_cmpbge(0, word); >> 292 qofs = ffz_b(bits); >> 293 bits = __kernel_extbl(word, qofs); >> 294 bofs = ffz_b(~bits); >> 295 >> 296 return qofs*8 + bofs; >> 297 #endif >> 298 } >> 299 >> 300 #ifdef __KERNEL__ >> 301 >> 302 /* >> 303 * ffs: find first bit set. This is defined the same way as >> 304 * the libc and compiler builtin ffs routines, therefore >> 305 * differs in spirit from the above __ffs. >> 306 */ >> 307 >> 308 static inline int ffs(int word) >> 309 { >> 310 int result = __ffs(word) + 1; >> 311 return word ? result : 0; >> 312 } >> 313 >> 314 /* >> 315 * fls: find last bit set. >> 316 */ >> 317 #if defined(__alpha_cix__) && defined(__alpha_fix__) >> 318 static inline int fls(int word) >> 319 { >> 320 return 64 - __kernel_ctlz(word & 0xffffffff); >> 321 } >> 322 #else >> 323 #define fls generic_fls >> 324 #endif >> 325 >> 326 /* Compute powers of two for the given integer. */ >> 327 static inline int floor_log2(unsigned long word) >> 328 { >> 329 #if defined(__alpha_cix__) && defined(__alpha_fix__) >> 330 return 63 - __kernel_ctlz(word); >> 331 #else >> 332 long bit; >> 333 for (bit = -1; word ; bit++) >> 334 word >>= 1; >> 335 return bit; >> 336 #endif >> 337 } >> 338 >> 339 static inline int ceil_log2(unsigned int word) >> 340 { >> 341 long bit = floor_log2(word); >> 342 return bit + (word > (1UL << bit)); >> 343 } >> 344 >> 345 /* >> 346 * hweightN: returns the hamming weight (i.e. the number >> 347 * of bits set) of a N-bit word 12 */ 348 */ 13 349 14 #include <linux/irqflags.h> !! 350 #if defined(__alpha_cix__) && defined(__alpha_fix__) 15 #include <linux/compiler.h> !! 351 /* Whee. EV67 can calculate it directly. */ 16 #include <asm/barrier.h> !! 352 static inline unsigned long hweight64(unsigned long w) >> 353 { >> 354 return __kernel_ctpop(w); >> 355 } 17 356 18 #include <asm-generic/bitops/__ffs.h> !! 357 #define hweight32(x) hweight64((x) & 0xfffffffful) 19 #include <asm-generic/bitops/ffz.h> !! 358 #define hweight16(x) hweight64((x) & 0xfffful) 20 #include <asm-generic/bitops/fls.h> !! 359 #define hweight8(x) hweight64((x) & 0xfful) 21 #include <asm-generic/bitops/__fls.h> !! 360 #else 22 #include <asm-generic/bitops/fls64.h> !! 361 static inline unsigned long hweight64(unsigned long w) >> 362 { >> 363 unsigned long result; >> 364 for (result = 0; w ; w >>= 1) >> 365 result += (w & 1); >> 366 return result; >> 367 } 23 368 24 #ifndef _LINUX_BITOPS_H !! 369 #define hweight32(x) generic_hweight32(x) 25 #error only <linux/bitops.h> can be included d !! 370 #define hweight16(x) generic_hweight16(x) >> 371 #define hweight8(x) generic_hweight8(x) 26 #endif 372 #endif 27 373 28 #include <asm-generic/bitops/sched.h> !! 374 #endif /* __KERNEL__ */ 29 #include <asm-generic/bitops/ffs.h> !! 375 30 #include <asm-generic/bitops/hweight.h> !! 376 /* 31 #include <asm-generic/bitops/lock.h> !! 377 * Find next zero bit in a bitmap reasonably efficiently.. >> 378 */ >> 379 static inline unsigned long >> 380 find_next_zero_bit(void * addr, unsigned long size, unsigned long offset) >> 381 { >> 382 unsigned long * p = ((unsigned long *) addr) + (offset >> 6); >> 383 unsigned long result = offset & ~63UL; >> 384 unsigned long tmp; >> 385 >> 386 if (offset >= size) >> 387 return size; >> 388 size -= result; >> 389 offset &= 63UL; >> 390 if (offset) { >> 391 tmp = *(p++); >> 392 tmp |= ~0UL >> (64-offset); >> 393 if (size < 64) >> 394 goto found_first; >> 395 if (~tmp) >> 396 goto found_middle; >> 397 size -= 64; >> 398 result += 64; >> 399 } >> 400 while (size & ~63UL) { >> 401 if (~(tmp = *(p++))) >> 402 goto found_middle; >> 403 result += 64; >> 404 size -= 64; >> 405 } >> 406 if (!size) >> 407 return result; >> 408 tmp = *p; >> 409 found_first: >> 410 tmp |= ~0UL << size; >> 411 if (tmp == ~0UL) /* Are any bits zero? */ >> 412 return result + size; /* Nope. */ >> 413 found_middle: >> 414 return result + ffz(tmp); >> 415 } >> 416 >> 417 /* >> 418 * Find next one bit in a bitmap reasonably efficiently. >> 419 */ >> 420 static inline unsigned long >> 421 find_next_bit(void * addr, unsigned long size, unsigned long offset) >> 422 { >> 423 unsigned long * p = ((unsigned long *) addr) + (offset >> 6); >> 424 unsigned long result = offset & ~63UL; >> 425 unsigned long tmp; >> 426 >> 427 if (offset >= size) >> 428 return size; >> 429 size -= result; >> 430 offset &= 63UL; >> 431 if (offset) { >> 432 tmp = *(p++); >> 433 tmp &= ~0UL << offset; >> 434 if (size < 64) >> 435 goto found_first; >> 436 if (tmp) >> 437 goto found_middle; >> 438 size -= 64; >> 439 result += 64; >> 440 } >> 441 while (size & ~63UL) { >> 442 if ((tmp = *(p++))) >> 443 goto found_middle; >> 444 result += 64; >> 445 size -= 64; >> 446 } >> 447 if (!size) >> 448 return result; >> 449 tmp = *p; >> 450 found_first: >> 451 tmp &= ~0UL >> (64 - size); >> 452 if (!tmp) >> 453 return result + size; >> 454 found_middle: >> 455 return result + __ffs(tmp); >> 456 } >> 457 >> 458 /* >> 459 * The optimizer actually does good code for this case. >> 460 */ >> 461 #define find_first_zero_bit(addr, size) \ >> 462 find_next_zero_bit((addr), (size), 0) >> 463 #define find_first_bit(addr, size) \ >> 464 find_next_bit((addr), (size), 0) >> 465 >> 466 #ifdef __KERNEL__ >> 467 >> 468 /* >> 469 * Every architecture must define this function. It's the fastest >> 470 * way of searching a 140-bit bitmap where the first 100 bits are >> 471 * unlikely to be set. It's guaranteed that at least one of the 140 >> 472 * bits is set. >> 473 */ >> 474 static inline unsigned long >> 475 sched_find_first_bit(unsigned long b[3]) >> 476 { >> 477 unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; >> 478 unsigned long ofs; >> 479 >> 480 ofs = (b1 ? 64 : 128); >> 481 b1 = (b1 ? b1 : b2); >> 482 ofs = (b0 ? 0 : ofs); >> 483 b0 = (b0 ? b0 : b1); >> 484 >> 485 return __ffs(b0) + ofs; >> 486 } >> 487 >> 488 >> 489 #define ext2_set_bit __test_and_set_bit >> 490 #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) >> 491 #define ext2_clear_bit __test_and_clear_bit >> 492 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) >> 493 #define ext2_test_bit test_bit >> 494 #define ext2_find_first_zero_bit find_first_zero_bit >> 495 #define ext2_find_next_zero_bit find_next_zero_bit >> 496 >> 497 /* Bitmap functions for the minix filesystem. */ >> 498 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) >> 499 #define minix_set_bit(nr,addr) __set_bit(nr,addr) >> 500 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) >> 501 #define minix_test_bit(nr,addr) test_bit(nr,addr) >> 502 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 32 503 33 #include <asm-generic/bitops/atomic.h> !! 504 #endif /* __KERNEL__ */ 34 #include <asm-generic/bitops/non-atomic.h> << 35 #include <asm-generic/bitops/le.h> << 36 #include <asm-generic/bitops/ext2-atomic.h> << 37 505 38 #endif /* __ASM_GENERIC_BITOPS_H */ !! 506 #endif /* _ALPHA_BITOPS_H */ 39 507
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.