1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * lib/bitmap.c 3 * lib/bitmap.c 4 * Helper functions for bitmap.h. 4 * Helper functions for bitmap.h. 5 */ 5 */ 6 6 7 #include <linux/bitmap.h> 7 #include <linux/bitmap.h> 8 #include <linux/bitops.h> 8 #include <linux/bitops.h> 9 #include <linux/ctype.h> 9 #include <linux/ctype.h> 10 #include <linux/device.h> 10 #include <linux/device.h> 11 #include <linux/export.h> 11 #include <linux/export.h> 12 #include <linux/slab.h> 12 #include <linux/slab.h> 13 13 14 /** 14 /** 15 * DOC: bitmap introduction 15 * DOC: bitmap introduction 16 * 16 * 17 * bitmaps provide an array of bits, implement 17 * bitmaps provide an array of bits, implemented using an 18 * array of unsigned longs. The number of val 18 * array of unsigned longs. The number of valid bits in a 19 * given bitmap does _not_ need to be an exact 19 * given bitmap does _not_ need to be an exact multiple of 20 * BITS_PER_LONG. 20 * BITS_PER_LONG. 21 * 21 * 22 * The possible unused bits in the last, parti 22 * The possible unused bits in the last, partially used word 23 * of a bitmap are 'don't care'. The implemen 23 * of a bitmap are 'don't care'. The implementation makes 24 * no particular effort to keep them zero. It 24 * no particular effort to keep them zero. It ensures that 25 * their value will not affect the results of 25 * their value will not affect the results of any operation. 26 * The bitmap operations that return Boolean ( 26 * The bitmap operations that return Boolean (bitmap_empty, 27 * for example) or scalar (bitmap_weight, for 27 * for example) or scalar (bitmap_weight, for example) results 28 * carefully filter out these unused bits from 28 * carefully filter out these unused bits from impacting their 29 * results. 29 * results. 30 * 30 * 31 * The byte ordering of bitmaps is more natura 31 * The byte ordering of bitmaps is more natural on little 32 * endian architectures. See the big-endian h 32 * endian architectures. See the big-endian headers 33 * include/asm-ppc64/bitops.h and include/asm- 33 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h 34 * for the best explanations of this ordering. 34 * for the best explanations of this ordering. 35 */ 35 */ 36 36 37 bool __bitmap_equal(const unsigned long *bitma 37 bool __bitmap_equal(const unsigned long *bitmap1, 38 const unsigned long *bitma 38 const unsigned long *bitmap2, unsigned int bits) 39 { 39 { 40 unsigned int k, lim = bits/BITS_PER_LO 40 unsigned int k, lim = bits/BITS_PER_LONG; 41 for (k = 0; k < lim; ++k) 41 for (k = 0; k < lim; ++k) 42 if (bitmap1[k] != bitmap2[k]) 42 if (bitmap1[k] != bitmap2[k]) 43 return false; 43 return false; 44 44 45 if (bits % BITS_PER_LONG) 45 if (bits % BITS_PER_LONG) 46 if ((bitmap1[k] ^ bitmap2[k]) 46 if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 47 return false; 47 return false; 48 48 49 return true; 49 return true; 50 } 50 } 51 EXPORT_SYMBOL(__bitmap_equal); 51 EXPORT_SYMBOL(__bitmap_equal); 52 52 53 bool __bitmap_or_equal(const unsigned long *bi 53 bool __bitmap_or_equal(const unsigned long *bitmap1, 54 const unsigned long *bi 54 const unsigned long *bitmap2, 55 const unsigned long *bi 55 const unsigned long *bitmap3, 56 unsigned int bits) 56 unsigned int bits) 57 { 57 { 58 unsigned int k, lim = bits / BITS_PER_ 58 unsigned int k, lim = bits / BITS_PER_LONG; 59 unsigned long tmp; 59 unsigned long tmp; 60 60 61 for (k = 0; k < lim; ++k) { 61 for (k = 0; k < lim; ++k) { 62 if ((bitmap1[k] | bitmap2[k]) 62 if ((bitmap1[k] | bitmap2[k]) != bitmap3[k]) 63 return false; 63 return false; 64 } 64 } 65 65 66 if (!(bits % BITS_PER_LONG)) 66 if (!(bits % BITS_PER_LONG)) 67 return true; 67 return true; 68 68 69 tmp = (bitmap1[k] | bitmap2[k]) ^ bitm 69 tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k]; 70 return (tmp & BITMAP_LAST_WORD_MASK(bi 70 return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0; 71 } 71 } 72 72 73 void __bitmap_complement(unsigned long *dst, c 73 void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) 74 { 74 { 75 unsigned int k, lim = BITS_TO_LONGS(bi 75 unsigned int k, lim = BITS_TO_LONGS(bits); 76 for (k = 0; k < lim; ++k) 76 for (k = 0; k < lim; ++k) 77 dst[k] = ~src[k]; 77 dst[k] = ~src[k]; 78 } 78 } 79 EXPORT_SYMBOL(__bitmap_complement); 79 EXPORT_SYMBOL(__bitmap_complement); 80 80 81 /** 81 /** 82 * __bitmap_shift_right - logical right shift 82 * __bitmap_shift_right - logical right shift of the bits in a bitmap 83 * @dst : destination bitmap 83 * @dst : destination bitmap 84 * @src : source bitmap 84 * @src : source bitmap 85 * @shift : shift by this many bits 85 * @shift : shift by this many bits 86 * @nbits : bitmap size, in bits 86 * @nbits : bitmap size, in bits 87 * 87 * 88 * Shifting right (dividing) means moving bits 88 * Shifting right (dividing) means moving bits in the MS -> LS bit 89 * direction. Zeros are fed into the vacated 89 * direction. Zeros are fed into the vacated MS positions and the 90 * LS bits shifted off the bottom are lost. 90 * LS bits shifted off the bottom are lost. 91 */ 91 */ 92 void __bitmap_shift_right(unsigned long *dst, 92 void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, 93 unsigned shift, unsign 93 unsigned shift, unsigned nbits) 94 { 94 { 95 unsigned k, lim = BITS_TO_LONGS(nbits) 95 unsigned k, lim = BITS_TO_LONGS(nbits); 96 unsigned off = shift/BITS_PER_LONG, re 96 unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 97 unsigned long mask = BITMAP_LAST_WORD_ 97 unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); 98 for (k = 0; off + k < lim; ++k) { 98 for (k = 0; off + k < lim; ++k) { 99 unsigned long upper, lower; 99 unsigned long upper, lower; 100 100 101 /* 101 /* 102 * If shift is not word aligne 102 * If shift is not word aligned, take lower rem bits of 103 * word above and make them th 103 * word above and make them the top rem bits of result. 104 */ 104 */ 105 if (!rem || off + k + 1 >= lim 105 if (!rem || off + k + 1 >= lim) 106 upper = 0; 106 upper = 0; 107 else { 107 else { 108 upper = src[off + k + 108 upper = src[off + k + 1]; 109 if (off + k + 1 == lim 109 if (off + k + 1 == lim - 1) 110 upper &= mask; 110 upper &= mask; 111 upper <<= (BITS_PER_LO 111 upper <<= (BITS_PER_LONG - rem); 112 } 112 } 113 lower = src[off + k]; 113 lower = src[off + k]; 114 if (off + k == lim - 1) 114 if (off + k == lim - 1) 115 lower &= mask; 115 lower &= mask; 116 lower >>= rem; 116 lower >>= rem; 117 dst[k] = lower | upper; 117 dst[k] = lower | upper; 118 } 118 } 119 if (off) 119 if (off) 120 memset(&dst[lim - off], 0, off 120 memset(&dst[lim - off], 0, off*sizeof(unsigned long)); 121 } 121 } 122 EXPORT_SYMBOL(__bitmap_shift_right); 122 EXPORT_SYMBOL(__bitmap_shift_right); 123 123 124 124 125 /** 125 /** 126 * __bitmap_shift_left - logical left shift of 126 * __bitmap_shift_left - logical left shift of the bits in a bitmap 127 * @dst : destination bitmap 127 * @dst : destination bitmap 128 * @src : source bitmap 128 * @src : source bitmap 129 * @shift : shift by this many bits 129 * @shift : shift by this many bits 130 * @nbits : bitmap size, in bits 130 * @nbits : bitmap size, in bits 131 * 131 * 132 * Shifting left (multiplying) means moving bi 132 * Shifting left (multiplying) means moving bits in the LS -> MS 133 * direction. Zeros are fed into the vacated 133 * direction. Zeros are fed into the vacated LS bit positions 134 * and those MS bits shifted off the top are l 134 * and those MS bits shifted off the top are lost. 135 */ 135 */ 136 136 137 void __bitmap_shift_left(unsigned long *dst, c 137 void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, 138 unsigned int shift, un 138 unsigned int shift, unsigned int nbits) 139 { 139 { 140 int k; 140 int k; 141 unsigned int lim = BITS_TO_LONGS(nbits 141 unsigned int lim = BITS_TO_LONGS(nbits); 142 unsigned int off = shift/BITS_PER_LONG 142 unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 143 for (k = lim - off - 1; k >= 0; --k) { 143 for (k = lim - off - 1; k >= 0; --k) { 144 unsigned long upper, lower; 144 unsigned long upper, lower; 145 145 146 /* 146 /* 147 * If shift is not word aligne 147 * If shift is not word aligned, take upper rem bits of 148 * word below and make them th 148 * word below and make them the bottom rem bits of result. 149 */ 149 */ 150 if (rem && k > 0) 150 if (rem && k > 0) 151 lower = src[k - 1] >> 151 lower = src[k - 1] >> (BITS_PER_LONG - rem); 152 else 152 else 153 lower = 0; 153 lower = 0; 154 upper = src[k] << rem; 154 upper = src[k] << rem; 155 dst[k + off] = lower | upper; 155 dst[k + off] = lower | upper; 156 } 156 } 157 if (off) 157 if (off) 158 memset(dst, 0, off*sizeof(unsi 158 memset(dst, 0, off*sizeof(unsigned long)); 159 } 159 } 160 EXPORT_SYMBOL(__bitmap_shift_left); 160 EXPORT_SYMBOL(__bitmap_shift_left); 161 161 162 /** 162 /** 163 * bitmap_cut() - remove bit region from bitma 163 * bitmap_cut() - remove bit region from bitmap and right shift remaining bits 164 * @dst: destination bitmap, might overlap wit 164 * @dst: destination bitmap, might overlap with src 165 * @src: source bitmap 165 * @src: source bitmap 166 * @first: start bit of region to be removed 166 * @first: start bit of region to be removed 167 * @cut: number of bits to remove 167 * @cut: number of bits to remove 168 * @nbits: bitmap size, in bits 168 * @nbits: bitmap size, in bits 169 * 169 * 170 * Set the n-th bit of @dst iff the n-th bit o 170 * Set the n-th bit of @dst iff the n-th bit of @src is set and 171 * n is less than @first, or the m-th bit of @ 171 * n is less than @first, or the m-th bit of @src is set for any 172 * m such that @first <= n < nbits, and m = n 172 * m such that @first <= n < nbits, and m = n + @cut. 173 * 173 * 174 * In pictures, example for a big-endian 32-bi 174 * In pictures, example for a big-endian 32-bit architecture: 175 * 175 * 176 * The @src bitmap is:: 176 * The @src bitmap is:: 177 * 177 * 178 * 31 63 178 * 31 63 179 * | | 179 * | | 180 * 10000000 11000001 11110010 00010101 1000 180 * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101 181 * | | | 181 * | | | | 182 * 16 14 0 182 * 16 14 0 32 183 * 183 * 184 * if @cut is 3, and @first is 14, bits 14-16 184 * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is:: 185 * 185 * 186 * 31 63 186 * 31 63 187 * | | 187 * | | 188 * 10110000 00011000 00110010 00010101 0001 188 * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010 189 * | | 189 * | | | 190 * 14 (bit 17 0 190 * 14 (bit 17 0 32 191 * from @src) 191 * from @src) 192 * 192 * 193 * Note that @dst and @src might overlap parti 193 * Note that @dst and @src might overlap partially or entirely. 194 * 194 * 195 * This is implemented in the obvious way, wit 195 * This is implemented in the obvious way, with a shift and carry 196 * step for each moved bit. Optimisation is le 196 * step for each moved bit. Optimisation is left as an exercise 197 * for the compiler. 197 * for the compiler. 198 */ 198 */ 199 void bitmap_cut(unsigned long *dst, const unsi 199 void bitmap_cut(unsigned long *dst, const unsigned long *src, 200 unsigned int first, unsigned i 200 unsigned int first, unsigned int cut, unsigned int nbits) 201 { 201 { 202 unsigned int len = BITS_TO_LONGS(nbits 202 unsigned int len = BITS_TO_LONGS(nbits); 203 unsigned long keep = 0, carry; 203 unsigned long keep = 0, carry; 204 int i; 204 int i; 205 205 206 if (first % BITS_PER_LONG) { 206 if (first % BITS_PER_LONG) { 207 keep = src[first / BITS_PER_LO 207 keep = src[first / BITS_PER_LONG] & 208 (~0UL >> (BITS_PER_LONG 208 (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG)); 209 } 209 } 210 210 211 memmove(dst, src, len * sizeof(*dst)); 211 memmove(dst, src, len * sizeof(*dst)); 212 212 213 while (cut--) { 213 while (cut--) { 214 for (i = first / BITS_PER_LONG 214 for (i = first / BITS_PER_LONG; i < len; i++) { 215 if (i < len - 1) 215 if (i < len - 1) 216 carry = dst[i 216 carry = dst[i + 1] & 1UL; 217 else 217 else 218 carry = 0; 218 carry = 0; 219 219 220 dst[i] = (dst[i] >> 1) 220 dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1)); 221 } 221 } 222 } 222 } 223 223 224 dst[first / BITS_PER_LONG] &= ~0UL << 224 dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG); 225 dst[first / BITS_PER_LONG] |= keep; 225 dst[first / BITS_PER_LONG] |= keep; 226 } 226 } 227 EXPORT_SYMBOL(bitmap_cut); 227 EXPORT_SYMBOL(bitmap_cut); 228 228 229 bool __bitmap_and(unsigned long *dst, const un 229 bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 230 const unsigned 230 const unsigned long *bitmap2, unsigned int bits) 231 { 231 { 232 unsigned int k; 232 unsigned int k; 233 unsigned int lim = bits/BITS_PER_LONG; 233 unsigned int lim = bits/BITS_PER_LONG; 234 unsigned long result = 0; 234 unsigned long result = 0; 235 235 236 for (k = 0; k < lim; k++) 236 for (k = 0; k < lim; k++) 237 result |= (dst[k] = bitmap1[k] 237 result |= (dst[k] = bitmap1[k] & bitmap2[k]); 238 if (bits % BITS_PER_LONG) 238 if (bits % BITS_PER_LONG) 239 result |= (dst[k] = bitmap1[k] 239 result |= (dst[k] = bitmap1[k] & bitmap2[k] & 240 BITMAP_LAST_WORD_MA 240 BITMAP_LAST_WORD_MASK(bits)); 241 return result != 0; 241 return result != 0; 242 } 242 } 243 EXPORT_SYMBOL(__bitmap_and); 243 EXPORT_SYMBOL(__bitmap_and); 244 244 245 void __bitmap_or(unsigned long *dst, const uns 245 void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 246 const unsigned 246 const unsigned long *bitmap2, unsigned int bits) 247 { 247 { 248 unsigned int k; 248 unsigned int k; 249 unsigned int nr = BITS_TO_LONGS(bits); 249 unsigned int nr = BITS_TO_LONGS(bits); 250 250 251 for (k = 0; k < nr; k++) 251 for (k = 0; k < nr; k++) 252 dst[k] = bitmap1[k] | bitmap2[ 252 dst[k] = bitmap1[k] | bitmap2[k]; 253 } 253 } 254 EXPORT_SYMBOL(__bitmap_or); 254 EXPORT_SYMBOL(__bitmap_or); 255 255 256 void __bitmap_xor(unsigned long *dst, const un 256 void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, 257 const unsigned 257 const unsigned long *bitmap2, unsigned int bits) 258 { 258 { 259 unsigned int k; 259 unsigned int k; 260 unsigned int nr = BITS_TO_LONGS(bits); 260 unsigned int nr = BITS_TO_LONGS(bits); 261 261 262 for (k = 0; k < nr; k++) 262 for (k = 0; k < nr; k++) 263 dst[k] = bitmap1[k] ^ bitmap2[ 263 dst[k] = bitmap1[k] ^ bitmap2[k]; 264 } 264 } 265 EXPORT_SYMBOL(__bitmap_xor); 265 EXPORT_SYMBOL(__bitmap_xor); 266 266 267 bool __bitmap_andnot(unsigned long *dst, const 267 bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 268 const unsigned 268 const unsigned long *bitmap2, unsigned int bits) 269 { 269 { 270 unsigned int k; 270 unsigned int k; 271 unsigned int lim = bits/BITS_PER_LONG; 271 unsigned int lim = bits/BITS_PER_LONG; 272 unsigned long result = 0; 272 unsigned long result = 0; 273 273 274 for (k = 0; k < lim; k++) 274 for (k = 0; k < lim; k++) 275 result |= (dst[k] = bitmap1[k] 275 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); 276 if (bits % BITS_PER_LONG) 276 if (bits % BITS_PER_LONG) 277 result |= (dst[k] = bitmap1[k] 277 result |= (dst[k] = bitmap1[k] & ~bitmap2[k] & 278 BITMAP_LAST_WORD_MA 278 BITMAP_LAST_WORD_MASK(bits)); 279 return result != 0; 279 return result != 0; 280 } 280 } 281 EXPORT_SYMBOL(__bitmap_andnot); 281 EXPORT_SYMBOL(__bitmap_andnot); 282 282 283 void __bitmap_replace(unsigned long *dst, 283 void __bitmap_replace(unsigned long *dst, 284 const unsigned long *old 284 const unsigned long *old, const unsigned long *new, 285 const unsigned long *mas 285 const unsigned long *mask, unsigned int nbits) 286 { 286 { 287 unsigned int k; 287 unsigned int k; 288 unsigned int nr = BITS_TO_LONGS(nbits) 288 unsigned int nr = BITS_TO_LONGS(nbits); 289 289 290 for (k = 0; k < nr; k++) 290 for (k = 0; k < nr; k++) 291 dst[k] = (old[k] & ~mask[k]) | 291 dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]); 292 } 292 } 293 EXPORT_SYMBOL(__bitmap_replace); 293 EXPORT_SYMBOL(__bitmap_replace); 294 294 295 bool __bitmap_intersects(const unsigned long * 295 bool __bitmap_intersects(const unsigned long *bitmap1, 296 const unsigned long * 296 const unsigned long *bitmap2, unsigned int bits) 297 { 297 { 298 unsigned int k, lim = bits/BITS_PER_LO 298 unsigned int k, lim = bits/BITS_PER_LONG; 299 for (k = 0; k < lim; ++k) 299 for (k = 0; k < lim; ++k) 300 if (bitmap1[k] & bitmap2[k]) 300 if (bitmap1[k] & bitmap2[k]) 301 return true; 301 return true; 302 302 303 if (bits % BITS_PER_LONG) 303 if (bits % BITS_PER_LONG) 304 if ((bitmap1[k] & bitmap2[k]) 304 if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 305 return true; 305 return true; 306 return false; 306 return false; 307 } 307 } 308 EXPORT_SYMBOL(__bitmap_intersects); 308 EXPORT_SYMBOL(__bitmap_intersects); 309 309 310 bool __bitmap_subset(const unsigned long *bitm 310 bool __bitmap_subset(const unsigned long *bitmap1, 311 const unsigned long *bitm 311 const unsigned long *bitmap2, unsigned int bits) 312 { 312 { 313 unsigned int k, lim = bits/BITS_PER_LO 313 unsigned int k, lim = bits/BITS_PER_LONG; 314 for (k = 0; k < lim; ++k) 314 for (k = 0; k < lim; ++k) 315 if (bitmap1[k] & ~bitmap2[k]) 315 if (bitmap1[k] & ~bitmap2[k]) 316 return false; 316 return false; 317 317 318 if (bits % BITS_PER_LONG) 318 if (bits % BITS_PER_LONG) 319 if ((bitmap1[k] & ~bitmap2[k]) 319 if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 320 return false; 320 return false; 321 return true; 321 return true; 322 } 322 } 323 EXPORT_SYMBOL(__bitmap_subset); 323 EXPORT_SYMBOL(__bitmap_subset); 324 324 325 #define BITMAP_WEIGHT(FETCH, bits) \ 325 #define BITMAP_WEIGHT(FETCH, bits) \ 326 ({ 326 ({ \ 327 unsigned int __bits = (bits), idx, w = 327 unsigned int __bits = (bits), idx, w = 0; \ 328 328 \ 329 for (idx = 0; idx < __bits / BITS_PER_ 329 for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \ 330 w += hweight_long(FETCH); 330 w += hweight_long(FETCH); \ 331 331 \ 332 if (__bits % BITS_PER_LONG) 332 if (__bits % BITS_PER_LONG) \ 333 w += hweight_long((FETCH) & BI 333 w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \ 334 334 \ 335 w; 335 w; \ 336 }) 336 }) 337 337 338 unsigned int __bitmap_weight(const unsigned lo 338 unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) 339 { 339 { 340 return BITMAP_WEIGHT(bitmap[idx], bits 340 return BITMAP_WEIGHT(bitmap[idx], bits); 341 } 341 } 342 EXPORT_SYMBOL(__bitmap_weight); 342 EXPORT_SYMBOL(__bitmap_weight); 343 343 344 unsigned int __bitmap_weight_and(const unsigne 344 unsigned int __bitmap_weight_and(const unsigned long *bitmap1, 345 const unsigned 345 const unsigned long *bitmap2, unsigned int bits) 346 { 346 { 347 return BITMAP_WEIGHT(bitmap1[idx] & bi 347 return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits); 348 } 348 } 349 EXPORT_SYMBOL(__bitmap_weight_and); 349 EXPORT_SYMBOL(__bitmap_weight_and); 350 350 351 unsigned int __bitmap_weight_andnot(const unsi 351 unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1, 352 const unsigned 352 const unsigned long *bitmap2, unsigned int bits) 353 { 353 { 354 return BITMAP_WEIGHT(bitmap1[idx] & ~b 354 return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits); 355 } 355 } 356 EXPORT_SYMBOL(__bitmap_weight_andnot); 356 EXPORT_SYMBOL(__bitmap_weight_andnot); 357 357 358 void __bitmap_set(unsigned long *map, unsigned 358 void __bitmap_set(unsigned long *map, unsigned int start, int len) 359 { 359 { 360 unsigned long *p = map + BIT_WORD(star 360 unsigned long *p = map + BIT_WORD(start); 361 const unsigned int size = start + len; 361 const unsigned int size = start + len; 362 int bits_to_set = BITS_PER_LONG - (sta 362 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 363 unsigned long mask_to_set = BITMAP_FIR 363 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 364 364 365 while (len - bits_to_set >= 0) { 365 while (len - bits_to_set >= 0) { 366 *p |= mask_to_set; 366 *p |= mask_to_set; 367 len -= bits_to_set; 367 len -= bits_to_set; 368 bits_to_set = BITS_PER_LONG; 368 bits_to_set = BITS_PER_LONG; 369 mask_to_set = ~0UL; 369 mask_to_set = ~0UL; 370 p++; 370 p++; 371 } 371 } 372 if (len) { 372 if (len) { 373 mask_to_set &= BITMAP_LAST_WOR 373 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 374 *p |= mask_to_set; 374 *p |= mask_to_set; 375 } 375 } 376 } 376 } 377 EXPORT_SYMBOL(__bitmap_set); 377 EXPORT_SYMBOL(__bitmap_set); 378 378 379 void __bitmap_clear(unsigned long *map, unsign 379 void __bitmap_clear(unsigned long *map, unsigned int start, int len) 380 { 380 { 381 unsigned long *p = map + BIT_WORD(star 381 unsigned long *p = map + BIT_WORD(start); 382 const unsigned int size = start + len; 382 const unsigned int size = start + len; 383 int bits_to_clear = BITS_PER_LONG - (s 383 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 384 unsigned long mask_to_clear = BITMAP_F 384 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 385 385 386 while (len - bits_to_clear >= 0) { 386 while (len - bits_to_clear >= 0) { 387 *p &= ~mask_to_clear; 387 *p &= ~mask_to_clear; 388 len -= bits_to_clear; 388 len -= bits_to_clear; 389 bits_to_clear = BITS_PER_LONG; 389 bits_to_clear = BITS_PER_LONG; 390 mask_to_clear = ~0UL; 390 mask_to_clear = ~0UL; 391 p++; 391 p++; 392 } 392 } 393 if (len) { 393 if (len) { 394 mask_to_clear &= BITMAP_LAST_W 394 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 395 *p &= ~mask_to_clear; 395 *p &= ~mask_to_clear; 396 } 396 } 397 } 397 } 398 EXPORT_SYMBOL(__bitmap_clear); 398 EXPORT_SYMBOL(__bitmap_clear); 399 399 400 /** 400 /** 401 * bitmap_find_next_zero_area_off - find a con 401 * bitmap_find_next_zero_area_off - find a contiguous aligned zero area 402 * @map: The address to base the search on 402 * @map: The address to base the search on 403 * @size: The bitmap size in bits 403 * @size: The bitmap size in bits 404 * @start: The bitnumber to start searching at 404 * @start: The bitnumber to start searching at 405 * @nr: The number of zeroed bits we're lookin 405 * @nr: The number of zeroed bits we're looking for 406 * @align_mask: Alignment mask for zero area 406 * @align_mask: Alignment mask for zero area 407 * @align_offset: Alignment offset for zero ar 407 * @align_offset: Alignment offset for zero area. 408 * 408 * 409 * The @align_mask should be one less than a p 409 * The @align_mask should be one less than a power of 2; the effect is that 410 * the bit offset of all zero areas this funct 410 * the bit offset of all zero areas this function finds plus @align_offset 411 * is multiple of that power of 2. 411 * is multiple of that power of 2. 412 */ 412 */ 413 unsigned long bitmap_find_next_zero_area_off(u 413 unsigned long bitmap_find_next_zero_area_off(unsigned long *map, 414 u 414 unsigned long size, 415 u 415 unsigned long start, 416 u 416 unsigned int nr, 417 u 417 unsigned long align_mask, 418 u 418 unsigned long align_offset) 419 { 419 { 420 unsigned long index, end, i; 420 unsigned long index, end, i; 421 again: 421 again: 422 index = find_next_zero_bit(map, size, 422 index = find_next_zero_bit(map, size, start); 423 423 424 /* Align allocation */ 424 /* Align allocation */ 425 index = __ALIGN_MASK(index + align_off 425 index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; 426 426 427 end = index + nr; 427 end = index + nr; 428 if (end > size) 428 if (end > size) 429 return end; 429 return end; 430 i = find_next_bit(map, end, index); 430 i = find_next_bit(map, end, index); 431 if (i < end) { 431 if (i < end) { 432 start = i + 1; 432 start = i + 1; 433 goto again; 433 goto again; 434 } 434 } 435 return index; 435 return index; 436 } 436 } 437 EXPORT_SYMBOL(bitmap_find_next_zero_area_off); 437 EXPORT_SYMBOL(bitmap_find_next_zero_area_off); 438 438 439 /** 439 /** 440 * bitmap_pos_to_ord - find ordinal of set bit 440 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap 441 * @buf: pointer to a bitmap 441 * @buf: pointer to a bitmap 442 * @pos: a bit position in @buf (0 <= @po 442 * @pos: a bit position in @buf (0 <= @pos < @nbits) 443 * @nbits: number of valid bit positions 443 * @nbits: number of valid bit positions in @buf 444 * 444 * 445 * Map the bit at position @pos in @buf (of le 445 * Map the bit at position @pos in @buf (of length @nbits) to the 446 * ordinal of which set bit it is. If it is n 446 * ordinal of which set bit it is. If it is not set or if @pos 447 * is not a valid bit position, map to -1. 447 * is not a valid bit position, map to -1. 448 * 448 * 449 * If for example, just bits 4 through 7 are s 449 * If for example, just bits 4 through 7 are set in @buf, then @pos 450 * values 4 through 7 will get mapped to 0 thr 450 * values 4 through 7 will get mapped to 0 through 3, respectively, 451 * and other @pos values will get mapped to -1 451 * and other @pos values will get mapped to -1. When @pos value 7 452 * gets mapped to (returns) @ord value 3 in th 452 * gets mapped to (returns) @ord value 3 in this example, that means 453 * that bit 7 is the 3rd (starting with 0th) s 453 * that bit 7 is the 3rd (starting with 0th) set bit in @buf. 454 * 454 * 455 * The bit positions 0 through @bits are valid 455 * The bit positions 0 through @bits are valid positions in @buf. 456 */ 456 */ 457 static int bitmap_pos_to_ord(const unsigned lo 457 static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits) 458 { 458 { 459 if (pos >= nbits || !test_bit(pos, buf 459 if (pos >= nbits || !test_bit(pos, buf)) 460 return -1; 460 return -1; 461 461 462 return bitmap_weight(buf, pos); 462 return bitmap_weight(buf, pos); 463 } 463 } 464 464 465 /** 465 /** 466 * bitmap_remap - Apply map defined by a pair 466 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap 467 * @dst: remapped result 467 * @dst: remapped result 468 * @src: subset to be remapped 468 * @src: subset to be remapped 469 * @old: defines domain of map 469 * @old: defines domain of map 470 * @new: defines range of map 470 * @new: defines range of map 471 * @nbits: number of bits in each of thes 471 * @nbits: number of bits in each of these bitmaps 472 * 472 * 473 * Let @old and @new define a mapping of bit p 473 * Let @old and @new define a mapping of bit positions, such that 474 * whatever position is held by the n-th set b 474 * whatever position is held by the n-th set bit in @old is mapped 475 * to the n-th set bit in @new. In the more g 475 * to the n-th set bit in @new. In the more general case, allowing 476 * for the possibility that the weight 'w' of 476 * for the possibility that the weight 'w' of @new is less than the 477 * weight of @old, map the position of the n-t 477 * weight of @old, map the position of the n-th set bit in @old to 478 * the position of the m-th set bit in @new, w 478 * the position of the m-th set bit in @new, where m == n % w. 479 * 479 * 480 * If either of the @old and @new bitmaps are 480 * If either of the @old and @new bitmaps are empty, or if @src and 481 * @dst point to the same location, then this 481 * @dst point to the same location, then this routine copies @src 482 * to @dst. 482 * to @dst. 483 * 483 * 484 * The positions of unset bits in @old are map 484 * The positions of unset bits in @old are mapped to themselves 485 * (the identity map). 485 * (the identity map). 486 * 486 * 487 * Apply the above specified mapping to @src, 487 * Apply the above specified mapping to @src, placing the result in 488 * @dst, clearing any bits previously set in @ 488 * @dst, clearing any bits previously set in @dst. 489 * 489 * 490 * For example, lets say that @old has bits 4 490 * For example, lets say that @old has bits 4 through 7 set, and 491 * @new has bits 12 through 15 set. This defi 491 * @new has bits 12 through 15 set. This defines the mapping of bit 492 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 492 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 493 * bit positions unchanged. So if say @src co 493 * bit positions unchanged. So if say @src comes into this routine 494 * with bits 1, 5 and 7 set, then @dst should 494 * with bits 1, 5 and 7 set, then @dst should leave with bits 1, 495 * 13 and 15 set. 495 * 13 and 15 set. 496 */ 496 */ 497 void bitmap_remap(unsigned long *dst, const un 497 void bitmap_remap(unsigned long *dst, const unsigned long *src, 498 const unsigned long *old, cons 498 const unsigned long *old, const unsigned long *new, 499 unsigned int nbits) 499 unsigned int nbits) 500 { 500 { 501 unsigned int oldbit, w; 501 unsigned int oldbit, w; 502 502 503 if (dst == src) /* following d 503 if (dst == src) /* following doesn't handle inplace remaps */ 504 return; 504 return; 505 bitmap_zero(dst, nbits); 505 bitmap_zero(dst, nbits); 506 506 507 w = bitmap_weight(new, nbits); 507 w = bitmap_weight(new, nbits); 508 for_each_set_bit(oldbit, src, nbits) { 508 for_each_set_bit(oldbit, src, nbits) { 509 int n = bitmap_pos_to_ord(old, 509 int n = bitmap_pos_to_ord(old, oldbit, nbits); 510 510 511 if (n < 0 || w == 0) 511 if (n < 0 || w == 0) 512 set_bit(oldbit, dst); 512 set_bit(oldbit, dst); /* identity map */ 513 else 513 else 514 set_bit(find_nth_bit(n 514 set_bit(find_nth_bit(new, nbits, n % w), dst); 515 } 515 } 516 } 516 } 517 EXPORT_SYMBOL(bitmap_remap); 517 EXPORT_SYMBOL(bitmap_remap); 518 518 519 /** 519 /** 520 * bitmap_bitremap - Apply map defined by a pa 520 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit 521 * @oldbit: bit position to be mapped 521 * @oldbit: bit position to be mapped 522 * @old: defines domain of map 522 * @old: defines domain of map 523 * @new: defines range of map 523 * @new: defines range of map 524 * @bits: number of bits in each of these 524 * @bits: number of bits in each of these bitmaps 525 * 525 * 526 * Let @old and @new define a mapping of bit p 526 * Let @old and @new define a mapping of bit positions, such that 527 * whatever position is held by the n-th set b 527 * whatever position is held by the n-th set bit in @old is mapped 528 * to the n-th set bit in @new. In the more g 528 * to the n-th set bit in @new. In the more general case, allowing 529 * for the possibility that the weight 'w' of 529 * for the possibility that the weight 'w' of @new is less than the 530 * weight of @old, map the position of the n-t 530 * weight of @old, map the position of the n-th set bit in @old to 531 * the position of the m-th set bit in @new, w 531 * the position of the m-th set bit in @new, where m == n % w. 532 * 532 * 533 * The positions of unset bits in @old are map 533 * The positions of unset bits in @old are mapped to themselves 534 * (the identity map). 534 * (the identity map). 535 * 535 * 536 * Apply the above specified mapping to bit po 536 * Apply the above specified mapping to bit position @oldbit, returning 537 * the new bit position. 537 * the new bit position. 538 * 538 * 539 * For example, lets say that @old has bits 4 539 * For example, lets say that @old has bits 4 through 7 set, and 540 * @new has bits 12 through 15 set. This defi 540 * @new has bits 12 through 15 set. This defines the mapping of bit 541 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 541 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 542 * bit positions unchanged. So if say @oldbit 542 * bit positions unchanged. So if say @oldbit is 5, then this routine 543 * returns 13. 543 * returns 13. 544 */ 544 */ 545 int bitmap_bitremap(int oldbit, const unsigned 545 int bitmap_bitremap(int oldbit, const unsigned long *old, 546 const unsigned 546 const unsigned long *new, int bits) 547 { 547 { 548 int w = bitmap_weight(new, bits); 548 int w = bitmap_weight(new, bits); 549 int n = bitmap_pos_to_ord(old, oldbit, 549 int n = bitmap_pos_to_ord(old, oldbit, bits); 550 if (n < 0 || w == 0) 550 if (n < 0 || w == 0) 551 return oldbit; 551 return oldbit; 552 else 552 else 553 return find_nth_bit(new, bits, 553 return find_nth_bit(new, bits, n % w); 554 } 554 } 555 EXPORT_SYMBOL(bitmap_bitremap); 555 EXPORT_SYMBOL(bitmap_bitremap); 556 556 557 #ifdef CONFIG_NUMA 557 #ifdef CONFIG_NUMA 558 /** 558 /** 559 * bitmap_onto - translate one bitmap relative 559 * bitmap_onto - translate one bitmap relative to another 560 * @dst: resulting translated bitmap 560 * @dst: resulting translated bitmap 561 * @orig: original untranslated bitmap 561 * @orig: original untranslated bitmap 562 * @relmap: bitmap relative to which tran 562 * @relmap: bitmap relative to which translated 563 * @bits: number of bits in each of these 563 * @bits: number of bits in each of these bitmaps 564 * 564 * 565 * Set the n-th bit of @dst iff there exists s 565 * Set the n-th bit of @dst iff there exists some m such that the 566 * n-th bit of @relmap is set, the m-th bit of 566 * n-th bit of @relmap is set, the m-th bit of @orig is set, and 567 * the n-th bit of @relmap is also the m-th _s 567 * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. 568 * (If you understood the previous sentence th 568 * (If you understood the previous sentence the first time your 569 * read it, you're overqualified for your curr 569 * read it, you're overqualified for your current job.) 570 * 570 * 571 * In other words, @orig is mapped onto (surje 571 * In other words, @orig is mapped onto (surjectively) @dst, 572 * using the map { <n, m> | the n-th bit of @r 572 * using the map { <n, m> | the n-th bit of @relmap is the 573 * m-th set bit of @relmap }. 573 * m-th set bit of @relmap }. 574 * 574 * 575 * Any set bits in @orig above bit number W, w 575 * Any set bits in @orig above bit number W, where W is the 576 * weight of (number of set bits in) @relmap a 576 * weight of (number of set bits in) @relmap are mapped nowhere. 577 * In particular, if for all bits m set in @or 577 * In particular, if for all bits m set in @orig, m >= W, then 578 * @dst will end up empty. In situations wher 578 * @dst will end up empty. In situations where the possibility 579 * of such an empty result is not desired, one 579 * of such an empty result is not desired, one way to avoid it is 580 * to use the bitmap_fold() operator, below, t 580 * to use the bitmap_fold() operator, below, to first fold the 581 * @orig bitmap over itself so that all its se 581 * @orig bitmap over itself so that all its set bits x are in the 582 * range 0 <= x < W. The bitmap_fold() operat 582 * range 0 <= x < W. The bitmap_fold() operator does this by 583 * setting the bit (m % W) in @dst, for each b 583 * setting the bit (m % W) in @dst, for each bit (m) set in @orig. 584 * 584 * 585 * Example [1] for bitmap_onto(): 585 * Example [1] for bitmap_onto(): 586 * Let's say @relmap has bits 30-39 set, and 586 * Let's say @relmap has bits 30-39 set, and @orig has bits 587 * 1, 3, 5, 7, 9 and 11 set. Then on return 587 * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, 588 * @dst will have bits 31, 33, 35, 37 and 39 588 * @dst will have bits 31, 33, 35, 37 and 39 set. 589 * 589 * 590 * When bit 0 is set in @orig, it means turn 590 * When bit 0 is set in @orig, it means turn on the bit in 591 * @dst corresponding to whatever is the firs 591 * @dst corresponding to whatever is the first bit (if any) 592 * that is turned on in @relmap. Since bit 0 592 * that is turned on in @relmap. Since bit 0 was off in the 593 * above example, we leave off that bit (bit 593 * above example, we leave off that bit (bit 30) in @dst. 594 * 594 * 595 * When bit 1 is set in @orig (as in the abov 595 * When bit 1 is set in @orig (as in the above example), it 596 * means turn on the bit in @dst correspondin 596 * means turn on the bit in @dst corresponding to whatever 597 * is the second bit that is turned on in @re 597 * is the second bit that is turned on in @relmap. The second 598 * bit in @relmap that was turned on in the a 598 * bit in @relmap that was turned on in the above example was 599 * bit 31, so we turned on bit 31 in @dst. 599 * bit 31, so we turned on bit 31 in @dst. 600 * 600 * 601 * Similarly, we turned on bits 33, 35, 37 an 601 * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, 602 * because they were the 4th, 6th, 8th and 10 602 * because they were the 4th, 6th, 8th and 10th set bits 603 * set in @relmap, and the 4th, 6th, 8th and 603 * set in @relmap, and the 4th, 6th, 8th and 10th bits of 604 * @orig (i.e. bits 3, 5, 7 and 9) were also 604 * @orig (i.e. bits 3, 5, 7 and 9) were also set. 605 * 605 * 606 * When bit 11 is set in @orig, it means turn 606 * When bit 11 is set in @orig, it means turn on the bit in 607 * @dst corresponding to whatever is the twel 607 * @dst corresponding to whatever is the twelfth bit that is 608 * turned on in @relmap. In the above exampl 608 * turned on in @relmap. In the above example, there were 609 * only ten bits turned on in @relmap (30..39 609 * only ten bits turned on in @relmap (30..39), so that bit 610 * 11 was set in @orig had no affect on @dst. 610 * 11 was set in @orig had no affect on @dst. 611 * 611 * 612 * Example [2] for bitmap_fold() + bitmap_onto 612 * Example [2] for bitmap_fold() + bitmap_onto(): 613 * Let's say @relmap has these ten bits set:: 613 * Let's say @relmap has these ten bits set:: 614 * 614 * 615 * 40 41 42 43 45 48 53 61 74 95 615 * 40 41 42 43 45 48 53 61 74 95 616 * 616 * 617 * (for the curious, that's 40 plus the first 617 * (for the curious, that's 40 plus the first ten terms of the 618 * Fibonacci sequence.) 618 * Fibonacci sequence.) 619 * 619 * 620 * Further lets say we use the following code 620 * Further lets say we use the following code, invoking 621 * bitmap_fold() then bitmap_onto, as suggest 621 * bitmap_fold() then bitmap_onto, as suggested above to 622 * avoid the possibility of an empty @dst res 622 * avoid the possibility of an empty @dst result:: 623 * 623 * 624 * unsigned long *tmp; // a temporary 624 * unsigned long *tmp; // a temporary bitmap's bits 625 * 625 * 626 * bitmap_fold(tmp, orig, bitmap_weight(r 626 * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); 627 * bitmap_onto(dst, tmp, relmap, bits); 627 * bitmap_onto(dst, tmp, relmap, bits); 628 * 628 * 629 * Then this table shows what various values 629 * Then this table shows what various values of @dst would be, for 630 * various @orig's. I list the zero-based po 630 * various @orig's. I list the zero-based positions of each set bit. 631 * The tmp column shows the intermediate resu 631 * The tmp column shows the intermediate result, as computed by 632 * using bitmap_fold() to fold the @orig bitm 632 * using bitmap_fold() to fold the @orig bitmap modulo ten 633 * (the weight of @relmap): 633 * (the weight of @relmap): 634 * 634 * 635 * =============== ============== ======= 635 * =============== ============== ================= 636 * @orig tmp @dst 636 * @orig tmp @dst 637 * 0 0 40 637 * 0 0 40 638 * 1 1 41 638 * 1 1 41 639 * 9 9 95 639 * 9 9 95 640 * 10 0 40 [#f1 640 * 10 0 40 [#f1]_ 641 * 1 3 5 7 1 3 5 7 41 43 4 641 * 1 3 5 7 1 3 5 7 41 43 48 61 642 * 0 1 2 3 4 0 1 2 3 4 40 41 4 642 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 643 * 0 9 18 27 0 9 8 7 40 61 7 643 * 0 9 18 27 0 9 8 7 40 61 74 95 644 * 0 10 20 30 0 40 644 * 0 10 20 30 0 40 645 * 0 11 22 33 0 1 2 3 40 41 4 645 * 0 11 22 33 0 1 2 3 40 41 42 43 646 * 0 12 24 36 0 2 4 6 40 42 4 646 * 0 12 24 36 0 2 4 6 40 42 45 53 647 * 78 102 211 1 2 8 41 42 7 647 * 78 102 211 1 2 8 41 42 74 [#f1]_ 648 * =============== ============== ======= 648 * =============== ============== ================= 649 * 649 * 650 * .. [#f1] 650 * .. [#f1] 651 * 651 * 652 * For these marked lines, if we hadn't fi 652 * For these marked lines, if we hadn't first done bitmap_fold() 653 * into tmp, then the @dst result would ha 653 * into tmp, then the @dst result would have been empty. 654 * 654 * 655 * If either of @orig or @relmap is empty (no 655 * If either of @orig or @relmap is empty (no set bits), then @dst 656 * will be returned empty. 656 * will be returned empty. 657 * 657 * 658 * If (as explained above) the only set bits i 658 * If (as explained above) the only set bits in @orig are in positions 659 * m where m >= W, (where W is the weight of @ 659 * m where m >= W, (where W is the weight of @relmap) then @dst will 660 * once again be returned empty. 660 * once again be returned empty. 661 * 661 * 662 * All bits in @dst not set by the above rule 662 * All bits in @dst not set by the above rule are cleared. 663 */ 663 */ 664 void bitmap_onto(unsigned long *dst, const uns 664 void bitmap_onto(unsigned long *dst, const unsigned long *orig, 665 const unsigned long *r 665 const unsigned long *relmap, unsigned int bits) 666 { 666 { 667 unsigned int n, m; /* same meanin 667 unsigned int n, m; /* same meaning as in above comment */ 668 668 669 if (dst == orig) /* following d 669 if (dst == orig) /* following doesn't handle inplace mappings */ 670 return; 670 return; 671 bitmap_zero(dst, bits); 671 bitmap_zero(dst, bits); 672 672 673 /* 673 /* 674 * The following code is a more effici 674 * The following code is a more efficient, but less 675 * obvious, equivalent to the loop: 675 * obvious, equivalent to the loop: 676 * for (m = 0; m < bitmap_weight( 676 * for (m = 0; m < bitmap_weight(relmap, bits); m++) { 677 * n = find_nth_bit(orig, 677 * n = find_nth_bit(orig, bits, m); 678 * if (test_bit(m, orig)) 678 * if (test_bit(m, orig)) 679 * set_bit(n, dst 679 * set_bit(n, dst); 680 * } 680 * } 681 */ 681 */ 682 682 683 m = 0; 683 m = 0; 684 for_each_set_bit(n, relmap, bits) { 684 for_each_set_bit(n, relmap, bits) { 685 /* m == bitmap_pos_to_ord(relm 685 /* m == bitmap_pos_to_ord(relmap, n, bits) */ 686 if (test_bit(m, orig)) 686 if (test_bit(m, orig)) 687 set_bit(n, dst); 687 set_bit(n, dst); 688 m++; 688 m++; 689 } 689 } 690 } 690 } 691 691 692 /** 692 /** 693 * bitmap_fold - fold larger bitmap into small 693 * bitmap_fold - fold larger bitmap into smaller, modulo specified size 694 * @dst: resulting smaller bitmap 694 * @dst: resulting smaller bitmap 695 * @orig: original larger bitmap 695 * @orig: original larger bitmap 696 * @sz: specified size 696 * @sz: specified size 697 * @nbits: number of bits in each of thes 697 * @nbits: number of bits in each of these bitmaps 698 * 698 * 699 * For each bit oldbit in @orig, set bit oldbi 699 * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. 700 * Clear all other bits in @dst. See further 700 * Clear all other bits in @dst. See further the comment and 701 * Example [2] for bitmap_onto() for why and h 701 * Example [2] for bitmap_onto() for why and how to use this. 702 */ 702 */ 703 void bitmap_fold(unsigned long *dst, const uns 703 void bitmap_fold(unsigned long *dst, const unsigned long *orig, 704 unsigned int sz, unsig 704 unsigned int sz, unsigned int nbits) 705 { 705 { 706 unsigned int oldbit; 706 unsigned int oldbit; 707 707 708 if (dst == orig) /* following d 708 if (dst == orig) /* following doesn't handle inplace mappings */ 709 return; 709 return; 710 bitmap_zero(dst, nbits); 710 bitmap_zero(dst, nbits); 711 711 712 for_each_set_bit(oldbit, orig, nbits) 712 for_each_set_bit(oldbit, orig, nbits) 713 set_bit(oldbit % sz, dst); 713 set_bit(oldbit % sz, dst); 714 } 714 } 715 #endif /* CONFIG_NUMA */ 715 #endif /* CONFIG_NUMA */ 716 716 717 unsigned long *bitmap_alloc(unsigned int nbits 717 unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags) 718 { 718 { 719 return kmalloc_array(BITS_TO_LONGS(nbi 719 return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), 720 flags); 720 flags); 721 } 721 } 722 EXPORT_SYMBOL(bitmap_alloc); 722 EXPORT_SYMBOL(bitmap_alloc); 723 723 724 unsigned long *bitmap_zalloc(unsigned int nbit 724 unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) 725 { 725 { 726 return bitmap_alloc(nbits, flags | __G 726 return bitmap_alloc(nbits, flags | __GFP_ZERO); 727 } 727 } 728 EXPORT_SYMBOL(bitmap_zalloc); 728 EXPORT_SYMBOL(bitmap_zalloc); 729 729 730 unsigned long *bitmap_alloc_node(unsigned int 730 unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node) 731 { 731 { 732 return kmalloc_array_node(BITS_TO_LONG 732 return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long), 733 flags, node) 733 flags, node); 734 } 734 } 735 EXPORT_SYMBOL(bitmap_alloc_node); 735 EXPORT_SYMBOL(bitmap_alloc_node); 736 736 737 unsigned long *bitmap_zalloc_node(unsigned int 737 unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node) 738 { 738 { 739 return bitmap_alloc_node(nbits, flags 739 return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node); 740 } 740 } 741 EXPORT_SYMBOL(bitmap_zalloc_node); 741 EXPORT_SYMBOL(bitmap_zalloc_node); 742 742 743 void bitmap_free(const unsigned long *bitmap) 743 void bitmap_free(const unsigned long *bitmap) 744 { 744 { 745 kfree(bitmap); 745 kfree(bitmap); 746 } 746 } 747 EXPORT_SYMBOL(bitmap_free); 747 EXPORT_SYMBOL(bitmap_free); 748 748 749 static void devm_bitmap_free(void *data) 749 static void devm_bitmap_free(void *data) 750 { 750 { 751 unsigned long *bitmap = data; 751 unsigned long *bitmap = data; 752 752 753 bitmap_free(bitmap); 753 bitmap_free(bitmap); 754 } 754 } 755 755 756 unsigned long *devm_bitmap_alloc(struct device 756 unsigned long *devm_bitmap_alloc(struct device *dev, 757 unsigned int 757 unsigned int nbits, gfp_t flags) 758 { 758 { 759 unsigned long *bitmap; 759 unsigned long *bitmap; 760 int ret; 760 int ret; 761 761 762 bitmap = bitmap_alloc(nbits, flags); 762 bitmap = bitmap_alloc(nbits, flags); 763 if (!bitmap) 763 if (!bitmap) 764 return NULL; 764 return NULL; 765 765 766 ret = devm_add_action_or_reset(dev, de 766 ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); 767 if (ret) 767 if (ret) 768 return NULL; 768 return NULL; 769 769 770 return bitmap; 770 return bitmap; 771 } 771 } 772 EXPORT_SYMBOL_GPL(devm_bitmap_alloc); 772 EXPORT_SYMBOL_GPL(devm_bitmap_alloc); 773 773 774 unsigned long *devm_bitmap_zalloc(struct devic 774 unsigned long *devm_bitmap_zalloc(struct device *dev, 775 unsigned int 775 unsigned int nbits, gfp_t flags) 776 { 776 { 777 return devm_bitmap_alloc(dev, nbits, f 777 return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); 778 } 778 } 779 EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); 779 EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); 780 780 781 #if BITS_PER_LONG == 64 781 #if BITS_PER_LONG == 64 782 /** 782 /** 783 * bitmap_from_arr32 - copy the contents of u3 783 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap 784 * @bitmap: array of unsigned longs, the 784 * @bitmap: array of unsigned longs, the destination bitmap 785 * @buf: array of u32 (in host byte order 785 * @buf: array of u32 (in host byte order), the source bitmap 786 * @nbits: number of bits in @bitmap 786 * @nbits: number of bits in @bitmap 787 */ 787 */ 788 void bitmap_from_arr32(unsigned long *bitmap, 788 void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) 789 { 789 { 790 unsigned int i, halfwords; 790 unsigned int i, halfwords; 791 791 792 halfwords = DIV_ROUND_UP(nbits, 32); 792 halfwords = DIV_ROUND_UP(nbits, 32); 793 for (i = 0; i < halfwords; i++) { 793 for (i = 0; i < halfwords; i++) { 794 bitmap[i/2] = (unsigned long) 794 bitmap[i/2] = (unsigned long) buf[i]; 795 if (++i < halfwords) 795 if (++i < halfwords) 796 bitmap[i/2] |= ((unsig 796 bitmap[i/2] |= ((unsigned long) buf[i]) << 32; 797 } 797 } 798 798 799 /* Clear tail bits in last word beyond 799 /* Clear tail bits in last word beyond nbits. */ 800 if (nbits % BITS_PER_LONG) 800 if (nbits % BITS_PER_LONG) 801 bitmap[(halfwords - 1) / 2] &= 801 bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); 802 } 802 } 803 EXPORT_SYMBOL(bitmap_from_arr32); 803 EXPORT_SYMBOL(bitmap_from_arr32); 804 804 805 /** 805 /** 806 * bitmap_to_arr32 - copy the contents of bitm 806 * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits 807 * @buf: array of u32 (in host byte order 807 * @buf: array of u32 (in host byte order), the dest bitmap 808 * @bitmap: array of unsigned longs, the 808 * @bitmap: array of unsigned longs, the source bitmap 809 * @nbits: number of bits in @bitmap 809 * @nbits: number of bits in @bitmap 810 */ 810 */ 811 void bitmap_to_arr32(u32 *buf, const unsigned 811 void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) 812 { 812 { 813 unsigned int i, halfwords; 813 unsigned int i, halfwords; 814 814 815 halfwords = DIV_ROUND_UP(nbits, 32); 815 halfwords = DIV_ROUND_UP(nbits, 32); 816 for (i = 0; i < halfwords; i++) { 816 for (i = 0; i < halfwords; i++) { 817 buf[i] = (u32) (bitmap[i/2] & 817 buf[i] = (u32) (bitmap[i/2] & UINT_MAX); 818 if (++i < halfwords) 818 if (++i < halfwords) 819 buf[i] = (u32) (bitmap 819 buf[i] = (u32) (bitmap[i/2] >> 32); 820 } 820 } 821 821 822 /* Clear tail bits in last element of 822 /* Clear tail bits in last element of array beyond nbits. */ 823 if (nbits % BITS_PER_LONG) 823 if (nbits % BITS_PER_LONG) 824 buf[halfwords - 1] &= (u32) (U 824 buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); 825 } 825 } 826 EXPORT_SYMBOL(bitmap_to_arr32); 826 EXPORT_SYMBOL(bitmap_to_arr32); 827 #endif 827 #endif 828 828 829 #if BITS_PER_LONG == 32 829 #if BITS_PER_LONG == 32 830 /** 830 /** 831 * bitmap_from_arr64 - copy the contents of u6 831 * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap 832 * @bitmap: array of unsigned longs, the 832 * @bitmap: array of unsigned longs, the destination bitmap 833 * @buf: array of u64 (in host byte order 833 * @buf: array of u64 (in host byte order), the source bitmap 834 * @nbits: number of bits in @bitmap 834 * @nbits: number of bits in @bitmap 835 */ 835 */ 836 void bitmap_from_arr64(unsigned long *bitmap, 836 void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) 837 { 837 { 838 int n; 838 int n; 839 839 840 for (n = nbits; n > 0; n -= 64) { 840 for (n = nbits; n > 0; n -= 64) { 841 u64 val = *buf++; 841 u64 val = *buf++; 842 842 843 *bitmap++ = val; 843 *bitmap++ = val; 844 if (n > 32) 844 if (n > 32) 845 *bitmap++ = val >> 32; 845 *bitmap++ = val >> 32; 846 } 846 } 847 847 848 /* 848 /* 849 * Clear tail bits in the last word be 849 * Clear tail bits in the last word beyond nbits. 850 * 850 * 851 * Negative index is OK because here w 851 * Negative index is OK because here we point to the word next 852 * to the last word of the bitmap, exc 852 * to the last word of the bitmap, except for nbits == 0, which 853 * is tested implicitly. 853 * is tested implicitly. 854 */ 854 */ 855 if (nbits % BITS_PER_LONG) 855 if (nbits % BITS_PER_LONG) 856 bitmap[-1] &= BITMAP_LAST_WORD 856 bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); 857 } 857 } 858 EXPORT_SYMBOL(bitmap_from_arr64); 858 EXPORT_SYMBOL(bitmap_from_arr64); 859 859 860 /** 860 /** 861 * bitmap_to_arr64 - copy the contents of bitm 861 * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits 862 * @buf: array of u64 (in host byte order 862 * @buf: array of u64 (in host byte order), the dest bitmap 863 * @bitmap: array of unsigned longs, the 863 * @bitmap: array of unsigned longs, the source bitmap 864 * @nbits: number of bits in @bitmap 864 * @nbits: number of bits in @bitmap 865 */ 865 */ 866 void bitmap_to_arr64(u64 *buf, const unsigned 866 void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) 867 { 867 { 868 const unsigned long *end = bitmap + BI 868 const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); 869 869 870 while (bitmap < end) { 870 while (bitmap < end) { 871 *buf = *bitmap++; 871 *buf = *bitmap++; 872 if (bitmap < end) 872 if (bitmap < end) 873 *buf |= (u64)(*bitmap+ 873 *buf |= (u64)(*bitmap++) << 32; 874 buf++; 874 buf++; 875 } 875 } 876 876 877 /* Clear tail bits in the last element 877 /* Clear tail bits in the last element of array beyond nbits. */ 878 if (nbits % 64) 878 if (nbits % 64) 879 buf[-1] &= GENMASK_ULL((nbits 879 buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0); 880 } 880 } 881 EXPORT_SYMBOL(bitmap_to_arr64); 881 EXPORT_SYMBOL(bitmap_to_arr64); 882 #endif 882 #endif 883 883
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.