1 /* 1 2 * This file is subject to the terms and condi 3 * License. See the file "COPYING" in the mai 4 * for more details. 5 * 6 * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 7 * Copyright (c) 1999, 2000 Silicon Graphics, 8 */ 9 #ifndef _ASM_BITOPS_H 10 #define _ASM_BITOPS_H 11 12 #ifndef _LINUX_BITOPS_H 13 #error only <linux/bitops.h> can be included d 14 #endif 15 16 #include <linux/bits.h> 17 #include <linux/compiler.h> 18 #include <linux/types.h> 19 #include <asm/asm.h> 20 #include <asm/barrier.h> 21 #include <asm/byteorder.h> /* sig 22 #include <asm/compiler.h> 23 #include <asm/cpu-features.h> 24 #include <asm/sgidefs.h> 25 26 #define __bit_op(mem, insn, inputs...) do { 27 unsigned long __temp; 28 29 asm volatile( 30 " .set push 31 " .set " MIPS_ISA_LEV 32 " " __SYNC(full, loongson3_war) 33 "1: " __stringify(LONG_LL) " 34 " " insn " 35 " " __stringify(LONG_SC) " 36 " " __stringify(SC_BEQZ) " 37 " .set pop 38 : "=&r"(__temp), "+" GCC_OFF_SMALL_ASM 39 : inputs 40 : __LLSC_CLOBBER); 41 } while (0) 42 43 #define __test_bit_op(mem, ll_dst, insn, input 44 unsigned long __orig, __temp; 45 46 asm volatile( 47 " .set push 48 " .set " MIPS_ISA_LEV 49 " " __SYNC(full, loongson3_war) 50 "1: " __stringify(LONG_LL) " " 51 " " insn " 52 " " __stringify(LONG_SC) " 53 " " __stringify(SC_BEQZ) " 54 " .set pop 55 : "=&r"(__orig), "=&r"(__temp), 56 "+" GCC_OFF_SMALL_ASM()(mem) 57 : inputs 58 : __LLSC_CLOBBER); 59 60 __orig; 61 }) 62 63 /* 64 * These are the "slower" versions of the func 65 * These functions call raw_local_irq_{save,re 66 */ 67 void __mips_set_bit(unsigned long nr, volatile 68 void __mips_clear_bit(unsigned long nr, volati 69 void __mips_change_bit(unsigned long nr, volat 70 int __mips_test_and_set_bit_lock(unsigned long 71 volatile unsi 72 int __mips_test_and_clear_bit(unsigned long nr 73 volatile unsigne 74 int __mips_test_and_change_bit(unsigned long n 75 volatile unsign 76 bool __mips_xor_is_negative_byte(unsigned long 77 volatile unsigned long *addr); 78 79 /* 80 * set_bit - Atomically set a bit in memory 81 * @nr: the bit to set 82 * @addr: the address to start counting from 83 * 84 * This function is atomic and may not be reor 85 * if you do not require the atomic guarantees 86 * Note that @nr may be almost arbitrarily lar 87 * restricted to acting on a single-word quant 88 */ 89 static inline void set_bit(unsigned long nr, v 90 { 91 volatile unsigned long *m = &addr[BIT_ 92 int bit = nr % BITS_PER_LONG; 93 94 if (!kernel_uses_llsc) { 95 __mips_set_bit(nr, addr); 96 return; 97 } 98 99 if ((MIPS_ISA_REV >= 2) && __builtin_c 100 __bit_op(*m, __stringify(LONG_ 101 return; 102 } 103 104 __bit_op(*m, "or\t%0, %2", "ir"(BIT(bi 105 } 106 107 /* 108 * clear_bit - Clears a bit in memory 109 * @nr: Bit to clear 110 * @addr: Address to start counting from 111 * 112 * clear_bit() is atomic and may not be reorde 113 * not contain a memory barrier, so if it is u 114 * you should call smp_mb__before_atomic() and 115 * in order to ensure changes are visible on o 116 */ 117 static inline void clear_bit(unsigned long nr, 118 { 119 volatile unsigned long *m = &addr[BIT_ 120 int bit = nr % BITS_PER_LONG; 121 122 if (!kernel_uses_llsc) { 123 __mips_clear_bit(nr, addr); 124 return; 125 } 126 127 if ((MIPS_ISA_REV >= 2) && __builtin_c 128 __bit_op(*m, __stringify(LONG_ 129 return; 130 } 131 132 __bit_op(*m, "and\t%0, %2", "ir"(~BIT( 133 } 134 135 /* 136 * clear_bit_unlock - Clears a bit in memory 137 * @nr: Bit to clear 138 * @addr: Address to start counting from 139 * 140 * clear_bit() is atomic and implies release s 141 * operation. It can be used for an unlock. 142 */ 143 static inline void clear_bit_unlock(unsigned l 144 { 145 smp_mb__before_atomic(); 146 clear_bit(nr, addr); 147 } 148 149 /* 150 * change_bit - Toggle a bit in memory 151 * @nr: Bit to change 152 * @addr: Address to start counting from 153 * 154 * change_bit() is atomic and may not be reord 155 * Note that @nr may be almost arbitrarily lar 156 * restricted to acting on a single-word quant 157 */ 158 static inline void change_bit(unsigned long nr 159 { 160 volatile unsigned long *m = &addr[BIT_ 161 int bit = nr % BITS_PER_LONG; 162 163 if (!kernel_uses_llsc) { 164 __mips_change_bit(nr, addr); 165 return; 166 } 167 168 __bit_op(*m, "xor\t%0, %2", "ir"(BIT(b 169 } 170 171 /* 172 * test_and_set_bit_lock - Set a bit and retur 173 * @nr: Bit to set 174 * @addr: Address to count from 175 * 176 * This operation is atomic and implies acquir 177 * after the memory operation. 178 */ 179 static inline int test_and_set_bit_lock(unsign 180 volatile unsigned long *addr) 181 { 182 volatile unsigned long *m = &addr[BIT_ 183 int bit = nr % BITS_PER_LONG; 184 unsigned long res, orig; 185 186 if (!kernel_uses_llsc) { 187 res = __mips_test_and_set_bit_ 188 } else { 189 orig = __test_bit_op(*m, "%0", 190 "or\t%1, 191 "ir"(BIT( 192 res = (orig & BIT(bit)) != 0; 193 } 194 195 smp_llsc_mb(); 196 197 return res; 198 } 199 200 /* 201 * test_and_set_bit - Set a bit and return its 202 * @nr: Bit to set 203 * @addr: Address to count from 204 * 205 * This operation is atomic and cannot be reor 206 * It also implies a memory barrier. 207 */ 208 static inline int test_and_set_bit(unsigned lo 209 volatile unsigned long *addr) 210 { 211 smp_mb__before_atomic(); 212 return test_and_set_bit_lock(nr, addr) 213 } 214 215 /* 216 * test_and_clear_bit - Clear a bit and return 217 * @nr: Bit to clear 218 * @addr: Address to count from 219 * 220 * This operation is atomic and cannot be reor 221 * It also implies a memory barrier. 222 */ 223 static inline int test_and_clear_bit(unsigned 224 volatile unsigned long *addr) 225 { 226 volatile unsigned long *m = &addr[BIT_ 227 int bit = nr % BITS_PER_LONG; 228 unsigned long res, orig; 229 230 smp_mb__before_atomic(); 231 232 if (!kernel_uses_llsc) { 233 res = __mips_test_and_clear_bi 234 } else if ((MIPS_ISA_REV >= 2) && __bu 235 res = __test_bit_op(*m, "%1", 236 __stringif 237 __stringif 238 "i"(bit)); 239 } else { 240 orig = __test_bit_op(*m, "%0", 241 "or\t%1, 242 "xor\t%1, 243 "ir"(BIT( 244 res = (orig & BIT(bit)) != 0; 245 } 246 247 smp_llsc_mb(); 248 249 return res; 250 } 251 252 /* 253 * test_and_change_bit - Change a bit and retu 254 * @nr: Bit to change 255 * @addr: Address to count from 256 * 257 * This operation is atomic and cannot be reor 258 * It also implies a memory barrier. 259 */ 260 static inline int test_and_change_bit(unsigned 261 volatile unsigned long *addr) 262 { 263 volatile unsigned long *m = &addr[BIT_ 264 int bit = nr % BITS_PER_LONG; 265 unsigned long res, orig; 266 267 smp_mb__before_atomic(); 268 269 if (!kernel_uses_llsc) { 270 res = __mips_test_and_change_b 271 } else { 272 orig = __test_bit_op(*m, "%0", 273 "xor\t%1, 274 "ir"(BIT( 275 res = (orig & BIT(bit)) != 0; 276 } 277 278 smp_llsc_mb(); 279 280 return res; 281 } 282 283 static inline bool xor_unlock_is_negative_byte 284 volatile unsigned long *p) 285 { 286 unsigned long orig; 287 bool res; 288 289 smp_mb__before_atomic(); 290 291 if (!kernel_uses_llsc) { 292 res = __mips_xor_is_negative_b 293 } else { 294 orig = __test_bit_op(*p, "%0", 295 "xor\t%1, 296 "ir"(mask 297 res = (orig & BIT(7)) != 0; 298 } 299 300 smp_llsc_mb(); 301 302 return res; 303 } 304 305 #undef __bit_op 306 #undef __test_bit_op 307 308 #include <asm-generic/bitops/non-atomic.h> 309 310 /* 311 * __clear_bit_unlock - Clears a bit in memory 312 * @nr: Bit to clear 313 * @addr: Address to start counting from 314 * 315 * __clear_bit() is non-atomic and implies rel 316 * operation. It can be used for an unlock if 317 * modify other bits in the word. 318 */ 319 static inline void __clear_bit_unlock(unsigned 320 { 321 smp_mb__before_llsc(); 322 __clear_bit(nr, addr); 323 nudge_writes(); 324 } 325 326 /* 327 * Return the bit position (0..63) of the most 328 * Returns -1 if no 1 bit exists 329 */ 330 static __always_inline unsigned long __fls(uns 331 { 332 int num; 333 334 if (BITS_PER_LONG == 32 && !__builtin_ 335 __builtin_constant_p(cpu_has_clo_c 336 __asm__( 337 " .set push 338 " .set "MIPS_ISA_LEVE 339 " clz %0, %1 340 " .set pop 341 : "=r" (num) 342 : "r" (word)); 343 344 return 31 - num; 345 } 346 347 if (BITS_PER_LONG == 64 && !__builtin_ 348 __builtin_constant_p(cpu_has_mips6 349 __asm__( 350 " .set push 351 " .set "MIPS_ISA_LEVE 352 " dclz %0, %1 353 " .set pop 354 : "=r" (num) 355 : "r" (word)); 356 357 return 63 - num; 358 } 359 360 num = BITS_PER_LONG - 1; 361 362 #if BITS_PER_LONG == 64 363 if (!(word & (~0ul << 32))) { 364 num -= 32; 365 word <<= 32; 366 } 367 #endif 368 if (!(word & (~0ul << (BITS_PER_LONG-1 369 num -= 16; 370 word <<= 16; 371 } 372 if (!(word & (~0ul << (BITS_PER_LONG-8 373 num -= 8; 374 word <<= 8; 375 } 376 if (!(word & (~0ul << (BITS_PER_LONG-4 377 num -= 4; 378 word <<= 4; 379 } 380 if (!(word & (~0ul << (BITS_PER_LONG-2 381 num -= 2; 382 word <<= 2; 383 } 384 if (!(word & (~0ul << (BITS_PER_LONG-1 385 num -= 1; 386 return num; 387 } 388 389 /* 390 * __ffs - find first bit in word. 391 * @word: The word to search 392 * 393 * Returns 0..SZLONG-1 394 * Undefined if no bit exists, so code should 395 */ 396 static __always_inline unsigned long __ffs(uns 397 { 398 return __fls(word & -word); 399 } 400 401 /* 402 * fls - find last bit set. 403 * @word: The word to search 404 * 405 * This is defined the same way as ffs. 406 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000 407 */ 408 static inline int fls(unsigned int x) 409 { 410 int r; 411 412 if (!__builtin_constant_p(x) && 413 __builtin_constant_p(cpu_has_clo_c 414 __asm__( 415 " .set push 416 " .set "MIPS_ISA_LEVE 417 " clz %0, %1 418 " .set pop 419 : "=r" (x) 420 : "r" (x)); 421 422 return 32 - x; 423 } 424 425 r = 32; 426 if (!x) 427 return 0; 428 if (!(x & 0xffff0000u)) { 429 x <<= 16; 430 r -= 16; 431 } 432 if (!(x & 0xff000000u)) { 433 x <<= 8; 434 r -= 8; 435 } 436 if (!(x & 0xf0000000u)) { 437 x <<= 4; 438 r -= 4; 439 } 440 if (!(x & 0xc0000000u)) { 441 x <<= 2; 442 r -= 2; 443 } 444 if (!(x & 0x80000000u)) { 445 x <<= 1; 446 r -= 1; 447 } 448 return r; 449 } 450 451 #include <asm-generic/bitops/fls64.h> 452 453 /* 454 * ffs - find first bit set. 455 * @word: The word to search 456 * 457 * This is defined the same way as 458 * the libc and compiler builtin ffs routines, 459 * differs in spirit from the below ffz (man f 460 */ 461 static inline int ffs(int word) 462 { 463 if (!word) 464 return 0; 465 466 return fls(word & -word); 467 } 468 469 #include <asm-generic/bitops/ffz.h> 470 471 #ifdef __KERNEL__ 472 473 #include <asm-generic/bitops/sched.h> 474 475 #include <asm/arch_hweight.h> 476 #include <asm-generic/bitops/const_hweight.h> 477 478 #include <asm-generic/bitops/le.h> 479 #include <asm-generic/bitops/ext2-atomic.h> 480 481 #endif /* __KERNEL__ */ 482 483 #endif /* _ASM_BITOPS_H */ 484
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.