1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * lib/bitmap.c 3 * lib/bitmap.c 4 * Helper functions for bitmap.h. 4 * Helper functions for bitmap.h. 5 */ 5 */ 6 6 7 #include <linux/bitmap.h> 7 #include <linux/bitmap.h> 8 #include <linux/bitops.h> 8 #include <linux/bitops.h> 9 #include <linux/ctype.h> 9 #include <linux/ctype.h> 10 #include <linux/device.h> 10 #include <linux/device.h> 11 #include <linux/export.h> 11 #include <linux/export.h> 12 #include <linux/slab.h> 12 #include <linux/slab.h> 13 13 14 /** 14 /** 15 * DOC: bitmap introduction 15 * DOC: bitmap introduction 16 * 16 * 17 * bitmaps provide an array of bits, implement 17 * bitmaps provide an array of bits, implemented using an 18 * array of unsigned longs. The number of val 18 * array of unsigned longs. The number of valid bits in a 19 * given bitmap does _not_ need to be an exact 19 * given bitmap does _not_ need to be an exact multiple of 20 * BITS_PER_LONG. 20 * BITS_PER_LONG. 21 * 21 * 22 * The possible unused bits in the last, parti 22 * The possible unused bits in the last, partially used word 23 * of a bitmap are 'don't care'. The implemen 23 * of a bitmap are 'don't care'. The implementation makes 24 * no particular effort to keep them zero. It 24 * no particular effort to keep them zero. It ensures that 25 * their value will not affect the results of 25 * their value will not affect the results of any operation. 26 * The bitmap operations that return Boolean ( 26 * The bitmap operations that return Boolean (bitmap_empty, 27 * for example) or scalar (bitmap_weight, for 27 * for example) or scalar (bitmap_weight, for example) results 28 * carefully filter out these unused bits from 28 * carefully filter out these unused bits from impacting their 29 * results. 29 * results. 30 * 30 * 31 * The byte ordering of bitmaps is more natura 31 * The byte ordering of bitmaps is more natural on little 32 * endian architectures. See the big-endian h 32 * endian architectures. See the big-endian headers 33 * include/asm-ppc64/bitops.h and include/asm- 33 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h 34 * for the best explanations of this ordering. 34 * for the best explanations of this ordering. 35 */ 35 */ 36 36 37 bool __bitmap_equal(const unsigned long *bitma 37 bool __bitmap_equal(const unsigned long *bitmap1, 38 const unsigned long *bitma 38 const unsigned long *bitmap2, unsigned int bits) 39 { 39 { 40 unsigned int k, lim = bits/BITS_PER_LO 40 unsigned int k, lim = bits/BITS_PER_LONG; 41 for (k = 0; k < lim; ++k) 41 for (k = 0; k < lim; ++k) 42 if (bitmap1[k] != bitmap2[k]) 42 if (bitmap1[k] != bitmap2[k]) 43 return false; 43 return false; 44 44 45 if (bits % BITS_PER_LONG) 45 if (bits % BITS_PER_LONG) 46 if ((bitmap1[k] ^ bitmap2[k]) 46 if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 47 return false; 47 return false; 48 48 49 return true; 49 return true; 50 } 50 } 51 EXPORT_SYMBOL(__bitmap_equal); 51 EXPORT_SYMBOL(__bitmap_equal); 52 52 53 bool __bitmap_or_equal(const unsigned long *bi 53 bool __bitmap_or_equal(const unsigned long *bitmap1, 54 const unsigned long *bi 54 const unsigned long *bitmap2, 55 const unsigned long *bi 55 const unsigned long *bitmap3, 56 unsigned int bits) 56 unsigned int bits) 57 { 57 { 58 unsigned int k, lim = bits / BITS_PER_ 58 unsigned int k, lim = bits / BITS_PER_LONG; 59 unsigned long tmp; 59 unsigned long tmp; 60 60 61 for (k = 0; k < lim; ++k) { 61 for (k = 0; k < lim; ++k) { 62 if ((bitmap1[k] | bitmap2[k]) 62 if ((bitmap1[k] | bitmap2[k]) != bitmap3[k]) 63 return false; 63 return false; 64 } 64 } 65 65 66 if (!(bits % BITS_PER_LONG)) 66 if (!(bits % BITS_PER_LONG)) 67 return true; 67 return true; 68 68 69 tmp = (bitmap1[k] | bitmap2[k]) ^ bitm 69 tmp = (bitmap1[k] | bitmap2[k]) ^ bitmap3[k]; 70 return (tmp & BITMAP_LAST_WORD_MASK(bi 70 return (tmp & BITMAP_LAST_WORD_MASK(bits)) == 0; 71 } 71 } 72 72 73 void __bitmap_complement(unsigned long *dst, c 73 void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) 74 { 74 { 75 unsigned int k, lim = BITS_TO_LONGS(bi 75 unsigned int k, lim = BITS_TO_LONGS(bits); 76 for (k = 0; k < lim; ++k) 76 for (k = 0; k < lim; ++k) 77 dst[k] = ~src[k]; 77 dst[k] = ~src[k]; 78 } 78 } 79 EXPORT_SYMBOL(__bitmap_complement); 79 EXPORT_SYMBOL(__bitmap_complement); 80 80 81 /** 81 /** 82 * __bitmap_shift_right - logical right shift 82 * __bitmap_shift_right - logical right shift of the bits in a bitmap 83 * @dst : destination bitmap 83 * @dst : destination bitmap 84 * @src : source bitmap 84 * @src : source bitmap 85 * @shift : shift by this many bits 85 * @shift : shift by this many bits 86 * @nbits : bitmap size, in bits 86 * @nbits : bitmap size, in bits 87 * 87 * 88 * Shifting right (dividing) means moving bits 88 * Shifting right (dividing) means moving bits in the MS -> LS bit 89 * direction. Zeros are fed into the vacated 89 * direction. Zeros are fed into the vacated MS positions and the 90 * LS bits shifted off the bottom are lost. 90 * LS bits shifted off the bottom are lost. 91 */ 91 */ 92 void __bitmap_shift_right(unsigned long *dst, 92 void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, 93 unsigned shift, unsign 93 unsigned shift, unsigned nbits) 94 { 94 { 95 unsigned k, lim = BITS_TO_LONGS(nbits) 95 unsigned k, lim = BITS_TO_LONGS(nbits); 96 unsigned off = shift/BITS_PER_LONG, re 96 unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 97 unsigned long mask = BITMAP_LAST_WORD_ 97 unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); 98 for (k = 0; off + k < lim; ++k) { 98 for (k = 0; off + k < lim; ++k) { 99 unsigned long upper, lower; 99 unsigned long upper, lower; 100 100 101 /* 101 /* 102 * If shift is not word aligne 102 * If shift is not word aligned, take lower rem bits of 103 * word above and make them th 103 * word above and make them the top rem bits of result. 104 */ 104 */ 105 if (!rem || off + k + 1 >= lim 105 if (!rem || off + k + 1 >= lim) 106 upper = 0; 106 upper = 0; 107 else { 107 else { 108 upper = src[off + k + 108 upper = src[off + k + 1]; 109 if (off + k + 1 == lim 109 if (off + k + 1 == lim - 1) 110 upper &= mask; 110 upper &= mask; 111 upper <<= (BITS_PER_LO 111 upper <<= (BITS_PER_LONG - rem); 112 } 112 } 113 lower = src[off + k]; 113 lower = src[off + k]; 114 if (off + k == lim - 1) 114 if (off + k == lim - 1) 115 lower &= mask; 115 lower &= mask; 116 lower >>= rem; 116 lower >>= rem; 117 dst[k] = lower | upper; 117 dst[k] = lower | upper; 118 } 118 } 119 if (off) 119 if (off) 120 memset(&dst[lim - off], 0, off 120 memset(&dst[lim - off], 0, off*sizeof(unsigned long)); 121 } 121 } 122 EXPORT_SYMBOL(__bitmap_shift_right); 122 EXPORT_SYMBOL(__bitmap_shift_right); 123 123 124 124 125 /** 125 /** 126 * __bitmap_shift_left - logical left shift of 126 * __bitmap_shift_left - logical left shift of the bits in a bitmap 127 * @dst : destination bitmap 127 * @dst : destination bitmap 128 * @src : source bitmap 128 * @src : source bitmap 129 * @shift : shift by this many bits 129 * @shift : shift by this many bits 130 * @nbits : bitmap size, in bits 130 * @nbits : bitmap size, in bits 131 * 131 * 132 * Shifting left (multiplying) means moving bi 132 * Shifting left (multiplying) means moving bits in the LS -> MS 133 * direction. Zeros are fed into the vacated 133 * direction. Zeros are fed into the vacated LS bit positions 134 * and those MS bits shifted off the top are l 134 * and those MS bits shifted off the top are lost. 135 */ 135 */ 136 136 137 void __bitmap_shift_left(unsigned long *dst, c 137 void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, 138 unsigned int shift, un 138 unsigned int shift, unsigned int nbits) 139 { 139 { 140 int k; 140 int k; 141 unsigned int lim = BITS_TO_LONGS(nbits 141 unsigned int lim = BITS_TO_LONGS(nbits); 142 unsigned int off = shift/BITS_PER_LONG 142 unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; 143 for (k = lim - off - 1; k >= 0; --k) { 143 for (k = lim - off - 1; k >= 0; --k) { 144 unsigned long upper, lower; 144 unsigned long upper, lower; 145 145 146 /* 146 /* 147 * If shift is not word aligne 147 * If shift is not word aligned, take upper rem bits of 148 * word below and make them th 148 * word below and make them the bottom rem bits of result. 149 */ 149 */ 150 if (rem && k > 0) 150 if (rem && k > 0) 151 lower = src[k - 1] >> 151 lower = src[k - 1] >> (BITS_PER_LONG - rem); 152 else 152 else 153 lower = 0; 153 lower = 0; 154 upper = src[k] << rem; 154 upper = src[k] << rem; 155 dst[k + off] = lower | upper; 155 dst[k + off] = lower | upper; 156 } 156 } 157 if (off) 157 if (off) 158 memset(dst, 0, off*sizeof(unsi 158 memset(dst, 0, off*sizeof(unsigned long)); 159 } 159 } 160 EXPORT_SYMBOL(__bitmap_shift_left); 160 EXPORT_SYMBOL(__bitmap_shift_left); 161 161 162 /** 162 /** 163 * bitmap_cut() - remove bit region from bitma 163 * bitmap_cut() - remove bit region from bitmap and right shift remaining bits 164 * @dst: destination bitmap, might overlap wit 164 * @dst: destination bitmap, might overlap with src 165 * @src: source bitmap 165 * @src: source bitmap 166 * @first: start bit of region to be removed 166 * @first: start bit of region to be removed 167 * @cut: number of bits to remove 167 * @cut: number of bits to remove 168 * @nbits: bitmap size, in bits 168 * @nbits: bitmap size, in bits 169 * 169 * 170 * Set the n-th bit of @dst iff the n-th bit o 170 * Set the n-th bit of @dst iff the n-th bit of @src is set and 171 * n is less than @first, or the m-th bit of @ 171 * n is less than @first, or the m-th bit of @src is set for any 172 * m such that @first <= n < nbits, and m = n 172 * m such that @first <= n < nbits, and m = n + @cut. 173 * 173 * 174 * In pictures, example for a big-endian 32-bi 174 * In pictures, example for a big-endian 32-bit architecture: 175 * 175 * 176 * The @src bitmap is:: 176 * The @src bitmap is:: 177 * 177 * 178 * 31 63 178 * 31 63 179 * | | 179 * | | 180 * 10000000 11000001 11110010 00010101 1000 180 * 10000000 11000001 11110010 00010101 10000000 11000001 01110010 00010101 181 * | | | 181 * | | | | 182 * 16 14 0 182 * 16 14 0 32 183 * 183 * 184 * if @cut is 3, and @first is 14, bits 14-16 184 * if @cut is 3, and @first is 14, bits 14-16 in @src are cut and @dst is:: 185 * 185 * 186 * 31 63 186 * 31 63 187 * | | 187 * | | 188 * 10110000 00011000 00110010 00010101 0001 188 * 10110000 00011000 00110010 00010101 00010000 00011000 00101110 01000010 189 * | | 189 * | | | 190 * 14 (bit 17 0 190 * 14 (bit 17 0 32 191 * from @src) 191 * from @src) 192 * 192 * 193 * Note that @dst and @src might overlap parti 193 * Note that @dst and @src might overlap partially or entirely. 194 * 194 * 195 * This is implemented in the obvious way, wit 195 * This is implemented in the obvious way, with a shift and carry 196 * step for each moved bit. Optimisation is le 196 * step for each moved bit. Optimisation is left as an exercise 197 * for the compiler. 197 * for the compiler. 198 */ 198 */ 199 void bitmap_cut(unsigned long *dst, const unsi 199 void bitmap_cut(unsigned long *dst, const unsigned long *src, 200 unsigned int first, unsigned i 200 unsigned int first, unsigned int cut, unsigned int nbits) 201 { 201 { 202 unsigned int len = BITS_TO_LONGS(nbits 202 unsigned int len = BITS_TO_LONGS(nbits); 203 unsigned long keep = 0, carry; 203 unsigned long keep = 0, carry; 204 int i; 204 int i; 205 205 206 if (first % BITS_PER_LONG) { 206 if (first % BITS_PER_LONG) { 207 keep = src[first / BITS_PER_LO 207 keep = src[first / BITS_PER_LONG] & 208 (~0UL >> (BITS_PER_LONG 208 (~0UL >> (BITS_PER_LONG - first % BITS_PER_LONG)); 209 } 209 } 210 210 211 memmove(dst, src, len * sizeof(*dst)); 211 memmove(dst, src, len * sizeof(*dst)); 212 212 213 while (cut--) { 213 while (cut--) { 214 for (i = first / BITS_PER_LONG 214 for (i = first / BITS_PER_LONG; i < len; i++) { 215 if (i < len - 1) 215 if (i < len - 1) 216 carry = dst[i 216 carry = dst[i + 1] & 1UL; 217 else 217 else 218 carry = 0; 218 carry = 0; 219 219 220 dst[i] = (dst[i] >> 1) 220 dst[i] = (dst[i] >> 1) | (carry << (BITS_PER_LONG - 1)); 221 } 221 } 222 } 222 } 223 223 224 dst[first / BITS_PER_LONG] &= ~0UL << 224 dst[first / BITS_PER_LONG] &= ~0UL << (first % BITS_PER_LONG); 225 dst[first / BITS_PER_LONG] |= keep; 225 dst[first / BITS_PER_LONG] |= keep; 226 } 226 } 227 EXPORT_SYMBOL(bitmap_cut); 227 EXPORT_SYMBOL(bitmap_cut); 228 228 229 bool __bitmap_and(unsigned long *dst, const un 229 bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 230 const unsigned 230 const unsigned long *bitmap2, unsigned int bits) 231 { 231 { 232 unsigned int k; 232 unsigned int k; 233 unsigned int lim = bits/BITS_PER_LONG; 233 unsigned int lim = bits/BITS_PER_LONG; 234 unsigned long result = 0; 234 unsigned long result = 0; 235 235 236 for (k = 0; k < lim; k++) 236 for (k = 0; k < lim; k++) 237 result |= (dst[k] = bitmap1[k] 237 result |= (dst[k] = bitmap1[k] & bitmap2[k]); 238 if (bits % BITS_PER_LONG) 238 if (bits % BITS_PER_LONG) 239 result |= (dst[k] = bitmap1[k] 239 result |= (dst[k] = bitmap1[k] & bitmap2[k] & 240 BITMAP_LAST_WORD_MA 240 BITMAP_LAST_WORD_MASK(bits)); 241 return result != 0; 241 return result != 0; 242 } 242 } 243 EXPORT_SYMBOL(__bitmap_and); 243 EXPORT_SYMBOL(__bitmap_and); 244 244 245 void __bitmap_or(unsigned long *dst, const uns 245 void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, 246 const unsigned 246 const unsigned long *bitmap2, unsigned int bits) 247 { 247 { 248 unsigned int k; 248 unsigned int k; 249 unsigned int nr = BITS_TO_LONGS(bits); 249 unsigned int nr = BITS_TO_LONGS(bits); 250 250 251 for (k = 0; k < nr; k++) 251 for (k = 0; k < nr; k++) 252 dst[k] = bitmap1[k] | bitmap2[ 252 dst[k] = bitmap1[k] | bitmap2[k]; 253 } 253 } 254 EXPORT_SYMBOL(__bitmap_or); 254 EXPORT_SYMBOL(__bitmap_or); 255 255 256 void __bitmap_xor(unsigned long *dst, const un 256 void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, 257 const unsigned 257 const unsigned long *bitmap2, unsigned int bits) 258 { 258 { 259 unsigned int k; 259 unsigned int k; 260 unsigned int nr = BITS_TO_LONGS(bits); 260 unsigned int nr = BITS_TO_LONGS(bits); 261 261 262 for (k = 0; k < nr; k++) 262 for (k = 0; k < nr; k++) 263 dst[k] = bitmap1[k] ^ bitmap2[ 263 dst[k] = bitmap1[k] ^ bitmap2[k]; 264 } 264 } 265 EXPORT_SYMBOL(__bitmap_xor); 265 EXPORT_SYMBOL(__bitmap_xor); 266 266 267 bool __bitmap_andnot(unsigned long *dst, const 267 bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, 268 const unsigned 268 const unsigned long *bitmap2, unsigned int bits) 269 { 269 { 270 unsigned int k; 270 unsigned int k; 271 unsigned int lim = bits/BITS_PER_LONG; 271 unsigned int lim = bits/BITS_PER_LONG; 272 unsigned long result = 0; 272 unsigned long result = 0; 273 273 274 for (k = 0; k < lim; k++) 274 for (k = 0; k < lim; k++) 275 result |= (dst[k] = bitmap1[k] 275 result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); 276 if (bits % BITS_PER_LONG) 276 if (bits % BITS_PER_LONG) 277 result |= (dst[k] = bitmap1[k] 277 result |= (dst[k] = bitmap1[k] & ~bitmap2[k] & 278 BITMAP_LAST_WORD_MA 278 BITMAP_LAST_WORD_MASK(bits)); 279 return result != 0; 279 return result != 0; 280 } 280 } 281 EXPORT_SYMBOL(__bitmap_andnot); 281 EXPORT_SYMBOL(__bitmap_andnot); 282 282 283 void __bitmap_replace(unsigned long *dst, 283 void __bitmap_replace(unsigned long *dst, 284 const unsigned long *old 284 const unsigned long *old, const unsigned long *new, 285 const unsigned long *mas 285 const unsigned long *mask, unsigned int nbits) 286 { 286 { 287 unsigned int k; 287 unsigned int k; 288 unsigned int nr = BITS_TO_LONGS(nbits) 288 unsigned int nr = BITS_TO_LONGS(nbits); 289 289 290 for (k = 0; k < nr; k++) 290 for (k = 0; k < nr; k++) 291 dst[k] = (old[k] & ~mask[k]) | 291 dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]); 292 } 292 } 293 EXPORT_SYMBOL(__bitmap_replace); 293 EXPORT_SYMBOL(__bitmap_replace); 294 294 295 bool __bitmap_intersects(const unsigned long * 295 bool __bitmap_intersects(const unsigned long *bitmap1, 296 const unsigned long * 296 const unsigned long *bitmap2, unsigned int bits) 297 { 297 { 298 unsigned int k, lim = bits/BITS_PER_LO 298 unsigned int k, lim = bits/BITS_PER_LONG; 299 for (k = 0; k < lim; ++k) 299 for (k = 0; k < lim; ++k) 300 if (bitmap1[k] & bitmap2[k]) 300 if (bitmap1[k] & bitmap2[k]) 301 return true; 301 return true; 302 302 303 if (bits % BITS_PER_LONG) 303 if (bits % BITS_PER_LONG) 304 if ((bitmap1[k] & bitmap2[k]) 304 if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 305 return true; 305 return true; 306 return false; 306 return false; 307 } 307 } 308 EXPORT_SYMBOL(__bitmap_intersects); 308 EXPORT_SYMBOL(__bitmap_intersects); 309 309 310 bool __bitmap_subset(const unsigned long *bitm 310 bool __bitmap_subset(const unsigned long *bitmap1, 311 const unsigned long *bitm 311 const unsigned long *bitmap2, unsigned int bits) 312 { 312 { 313 unsigned int k, lim = bits/BITS_PER_LO 313 unsigned int k, lim = bits/BITS_PER_LONG; 314 for (k = 0; k < lim; ++k) 314 for (k = 0; k < lim; ++k) 315 if (bitmap1[k] & ~bitmap2[k]) 315 if (bitmap1[k] & ~bitmap2[k]) 316 return false; 316 return false; 317 317 318 if (bits % BITS_PER_LONG) 318 if (bits % BITS_PER_LONG) 319 if ((bitmap1[k] & ~bitmap2[k]) 319 if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) 320 return false; 320 return false; 321 return true; 321 return true; 322 } 322 } 323 EXPORT_SYMBOL(__bitmap_subset); 323 EXPORT_SYMBOL(__bitmap_subset); 324 324 325 #define BITMAP_WEIGHT(FETCH, bits) \ 325 #define BITMAP_WEIGHT(FETCH, bits) \ 326 ({ 326 ({ \ 327 unsigned int __bits = (bits), idx, w = 327 unsigned int __bits = (bits), idx, w = 0; \ 328 328 \ 329 for (idx = 0; idx < __bits / BITS_PER_ 329 for (idx = 0; idx < __bits / BITS_PER_LONG; idx++) \ 330 w += hweight_long(FETCH); 330 w += hweight_long(FETCH); \ 331 331 \ 332 if (__bits % BITS_PER_LONG) 332 if (__bits % BITS_PER_LONG) \ 333 w += hweight_long((FETCH) & BI 333 w += hweight_long((FETCH) & BITMAP_LAST_WORD_MASK(__bits)); \ 334 334 \ 335 w; 335 w; \ 336 }) 336 }) 337 337 338 unsigned int __bitmap_weight(const unsigned lo 338 unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) 339 { 339 { 340 return BITMAP_WEIGHT(bitmap[idx], bits 340 return BITMAP_WEIGHT(bitmap[idx], bits); 341 } 341 } 342 EXPORT_SYMBOL(__bitmap_weight); 342 EXPORT_SYMBOL(__bitmap_weight); 343 343 344 unsigned int __bitmap_weight_and(const unsigne 344 unsigned int __bitmap_weight_and(const unsigned long *bitmap1, 345 const unsigned 345 const unsigned long *bitmap2, unsigned int bits) 346 { 346 { 347 return BITMAP_WEIGHT(bitmap1[idx] & bi 347 return BITMAP_WEIGHT(bitmap1[idx] & bitmap2[idx], bits); 348 } 348 } 349 EXPORT_SYMBOL(__bitmap_weight_and); 349 EXPORT_SYMBOL(__bitmap_weight_and); 350 350 351 unsigned int __bitmap_weight_andnot(const unsi << 352 const unsigned << 353 { << 354 return BITMAP_WEIGHT(bitmap1[idx] & ~b << 355 } << 356 EXPORT_SYMBOL(__bitmap_weight_andnot); << 357 << 358 void __bitmap_set(unsigned long *map, unsigned 351 void __bitmap_set(unsigned long *map, unsigned int start, int len) 359 { 352 { 360 unsigned long *p = map + BIT_WORD(star 353 unsigned long *p = map + BIT_WORD(start); 361 const unsigned int size = start + len; 354 const unsigned int size = start + len; 362 int bits_to_set = BITS_PER_LONG - (sta 355 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); 363 unsigned long mask_to_set = BITMAP_FIR 356 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); 364 357 365 while (len - bits_to_set >= 0) { 358 while (len - bits_to_set >= 0) { 366 *p |= mask_to_set; 359 *p |= mask_to_set; 367 len -= bits_to_set; 360 len -= bits_to_set; 368 bits_to_set = BITS_PER_LONG; 361 bits_to_set = BITS_PER_LONG; 369 mask_to_set = ~0UL; 362 mask_to_set = ~0UL; 370 p++; 363 p++; 371 } 364 } 372 if (len) { 365 if (len) { 373 mask_to_set &= BITMAP_LAST_WOR 366 mask_to_set &= BITMAP_LAST_WORD_MASK(size); 374 *p |= mask_to_set; 367 *p |= mask_to_set; 375 } 368 } 376 } 369 } 377 EXPORT_SYMBOL(__bitmap_set); 370 EXPORT_SYMBOL(__bitmap_set); 378 371 379 void __bitmap_clear(unsigned long *map, unsign 372 void __bitmap_clear(unsigned long *map, unsigned int start, int len) 380 { 373 { 381 unsigned long *p = map + BIT_WORD(star 374 unsigned long *p = map + BIT_WORD(start); 382 const unsigned int size = start + len; 375 const unsigned int size = start + len; 383 int bits_to_clear = BITS_PER_LONG - (s 376 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); 384 unsigned long mask_to_clear = BITMAP_F 377 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); 385 378 386 while (len - bits_to_clear >= 0) { 379 while (len - bits_to_clear >= 0) { 387 *p &= ~mask_to_clear; 380 *p &= ~mask_to_clear; 388 len -= bits_to_clear; 381 len -= bits_to_clear; 389 bits_to_clear = BITS_PER_LONG; 382 bits_to_clear = BITS_PER_LONG; 390 mask_to_clear = ~0UL; 383 mask_to_clear = ~0UL; 391 p++; 384 p++; 392 } 385 } 393 if (len) { 386 if (len) { 394 mask_to_clear &= BITMAP_LAST_W 387 mask_to_clear &= BITMAP_LAST_WORD_MASK(size); 395 *p &= ~mask_to_clear; 388 *p &= ~mask_to_clear; 396 } 389 } 397 } 390 } 398 EXPORT_SYMBOL(__bitmap_clear); 391 EXPORT_SYMBOL(__bitmap_clear); 399 392 400 /** 393 /** 401 * bitmap_find_next_zero_area_off - find a con 394 * bitmap_find_next_zero_area_off - find a contiguous aligned zero area 402 * @map: The address to base the search on 395 * @map: The address to base the search on 403 * @size: The bitmap size in bits 396 * @size: The bitmap size in bits 404 * @start: The bitnumber to start searching at 397 * @start: The bitnumber to start searching at 405 * @nr: The number of zeroed bits we're lookin 398 * @nr: The number of zeroed bits we're looking for 406 * @align_mask: Alignment mask for zero area 399 * @align_mask: Alignment mask for zero area 407 * @align_offset: Alignment offset for zero ar 400 * @align_offset: Alignment offset for zero area. 408 * 401 * 409 * The @align_mask should be one less than a p 402 * The @align_mask should be one less than a power of 2; the effect is that 410 * the bit offset of all zero areas this funct 403 * the bit offset of all zero areas this function finds plus @align_offset 411 * is multiple of that power of 2. 404 * is multiple of that power of 2. 412 */ 405 */ 413 unsigned long bitmap_find_next_zero_area_off(u 406 unsigned long bitmap_find_next_zero_area_off(unsigned long *map, 414 u 407 unsigned long size, 415 u 408 unsigned long start, 416 u 409 unsigned int nr, 417 u 410 unsigned long align_mask, 418 u 411 unsigned long align_offset) 419 { 412 { 420 unsigned long index, end, i; 413 unsigned long index, end, i; 421 again: 414 again: 422 index = find_next_zero_bit(map, size, 415 index = find_next_zero_bit(map, size, start); 423 416 424 /* Align allocation */ 417 /* Align allocation */ 425 index = __ALIGN_MASK(index + align_off 418 index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset; 426 419 427 end = index + nr; 420 end = index + nr; 428 if (end > size) 421 if (end > size) 429 return end; 422 return end; 430 i = find_next_bit(map, end, index); 423 i = find_next_bit(map, end, index); 431 if (i < end) { 424 if (i < end) { 432 start = i + 1; 425 start = i + 1; 433 goto again; 426 goto again; 434 } 427 } 435 return index; 428 return index; 436 } 429 } 437 EXPORT_SYMBOL(bitmap_find_next_zero_area_off); 430 EXPORT_SYMBOL(bitmap_find_next_zero_area_off); 438 431 439 /** 432 /** 440 * bitmap_pos_to_ord - find ordinal of set bit 433 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap 441 * @buf: pointer to a bitmap 434 * @buf: pointer to a bitmap 442 * @pos: a bit position in @buf (0 <= @po 435 * @pos: a bit position in @buf (0 <= @pos < @nbits) 443 * @nbits: number of valid bit positions 436 * @nbits: number of valid bit positions in @buf 444 * 437 * 445 * Map the bit at position @pos in @buf (of le 438 * Map the bit at position @pos in @buf (of length @nbits) to the 446 * ordinal of which set bit it is. If it is n 439 * ordinal of which set bit it is. If it is not set or if @pos 447 * is not a valid bit position, map to -1. 440 * is not a valid bit position, map to -1. 448 * 441 * 449 * If for example, just bits 4 through 7 are s 442 * If for example, just bits 4 through 7 are set in @buf, then @pos 450 * values 4 through 7 will get mapped to 0 thr 443 * values 4 through 7 will get mapped to 0 through 3, respectively, 451 * and other @pos values will get mapped to -1 444 * and other @pos values will get mapped to -1. When @pos value 7 452 * gets mapped to (returns) @ord value 3 in th 445 * gets mapped to (returns) @ord value 3 in this example, that means 453 * that bit 7 is the 3rd (starting with 0th) s 446 * that bit 7 is the 3rd (starting with 0th) set bit in @buf. 454 * 447 * 455 * The bit positions 0 through @bits are valid 448 * The bit positions 0 through @bits are valid positions in @buf. 456 */ 449 */ 457 static int bitmap_pos_to_ord(const unsigned lo 450 static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits) 458 { 451 { 459 if (pos >= nbits || !test_bit(pos, buf 452 if (pos >= nbits || !test_bit(pos, buf)) 460 return -1; 453 return -1; 461 454 462 return bitmap_weight(buf, pos); 455 return bitmap_weight(buf, pos); 463 } 456 } 464 457 465 /** 458 /** 466 * bitmap_remap - Apply map defined by a pair 459 * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap 467 * @dst: remapped result 460 * @dst: remapped result 468 * @src: subset to be remapped 461 * @src: subset to be remapped 469 * @old: defines domain of map 462 * @old: defines domain of map 470 * @new: defines range of map 463 * @new: defines range of map 471 * @nbits: number of bits in each of thes 464 * @nbits: number of bits in each of these bitmaps 472 * 465 * 473 * Let @old and @new define a mapping of bit p 466 * Let @old and @new define a mapping of bit positions, such that 474 * whatever position is held by the n-th set b 467 * whatever position is held by the n-th set bit in @old is mapped 475 * to the n-th set bit in @new. In the more g 468 * to the n-th set bit in @new. In the more general case, allowing 476 * for the possibility that the weight 'w' of 469 * for the possibility that the weight 'w' of @new is less than the 477 * weight of @old, map the position of the n-t 470 * weight of @old, map the position of the n-th set bit in @old to 478 * the position of the m-th set bit in @new, w 471 * the position of the m-th set bit in @new, where m == n % w. 479 * 472 * 480 * If either of the @old and @new bitmaps are 473 * If either of the @old and @new bitmaps are empty, or if @src and 481 * @dst point to the same location, then this 474 * @dst point to the same location, then this routine copies @src 482 * to @dst. 475 * to @dst. 483 * 476 * 484 * The positions of unset bits in @old are map 477 * The positions of unset bits in @old are mapped to themselves 485 * (the identity map). 478 * (the identity map). 486 * 479 * 487 * Apply the above specified mapping to @src, 480 * Apply the above specified mapping to @src, placing the result in 488 * @dst, clearing any bits previously set in @ 481 * @dst, clearing any bits previously set in @dst. 489 * 482 * 490 * For example, lets say that @old has bits 4 483 * For example, lets say that @old has bits 4 through 7 set, and 491 * @new has bits 12 through 15 set. This defi 484 * @new has bits 12 through 15 set. This defines the mapping of bit 492 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 485 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 493 * bit positions unchanged. So if say @src co 486 * bit positions unchanged. So if say @src comes into this routine 494 * with bits 1, 5 and 7 set, then @dst should 487 * with bits 1, 5 and 7 set, then @dst should leave with bits 1, 495 * 13 and 15 set. 488 * 13 and 15 set. 496 */ 489 */ 497 void bitmap_remap(unsigned long *dst, const un 490 void bitmap_remap(unsigned long *dst, const unsigned long *src, 498 const unsigned long *old, cons 491 const unsigned long *old, const unsigned long *new, 499 unsigned int nbits) 492 unsigned int nbits) 500 { 493 { 501 unsigned int oldbit, w; 494 unsigned int oldbit, w; 502 495 503 if (dst == src) /* following d 496 if (dst == src) /* following doesn't handle inplace remaps */ 504 return; 497 return; 505 bitmap_zero(dst, nbits); 498 bitmap_zero(dst, nbits); 506 499 507 w = bitmap_weight(new, nbits); 500 w = bitmap_weight(new, nbits); 508 for_each_set_bit(oldbit, src, nbits) { 501 for_each_set_bit(oldbit, src, nbits) { 509 int n = bitmap_pos_to_ord(old, 502 int n = bitmap_pos_to_ord(old, oldbit, nbits); 510 503 511 if (n < 0 || w == 0) 504 if (n < 0 || w == 0) 512 set_bit(oldbit, dst); 505 set_bit(oldbit, dst); /* identity map */ 513 else 506 else 514 set_bit(find_nth_bit(n 507 set_bit(find_nth_bit(new, nbits, n % w), dst); 515 } 508 } 516 } 509 } 517 EXPORT_SYMBOL(bitmap_remap); 510 EXPORT_SYMBOL(bitmap_remap); 518 511 519 /** 512 /** 520 * bitmap_bitremap - Apply map defined by a pa 513 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit 521 * @oldbit: bit position to be mapped 514 * @oldbit: bit position to be mapped 522 * @old: defines domain of map 515 * @old: defines domain of map 523 * @new: defines range of map 516 * @new: defines range of map 524 * @bits: number of bits in each of these 517 * @bits: number of bits in each of these bitmaps 525 * 518 * 526 * Let @old and @new define a mapping of bit p 519 * Let @old and @new define a mapping of bit positions, such that 527 * whatever position is held by the n-th set b 520 * whatever position is held by the n-th set bit in @old is mapped 528 * to the n-th set bit in @new. In the more g 521 * to the n-th set bit in @new. In the more general case, allowing 529 * for the possibility that the weight 'w' of 522 * for the possibility that the weight 'w' of @new is less than the 530 * weight of @old, map the position of the n-t 523 * weight of @old, map the position of the n-th set bit in @old to 531 * the position of the m-th set bit in @new, w 524 * the position of the m-th set bit in @new, where m == n % w. 532 * 525 * 533 * The positions of unset bits in @old are map 526 * The positions of unset bits in @old are mapped to themselves 534 * (the identity map). 527 * (the identity map). 535 * 528 * 536 * Apply the above specified mapping to bit po 529 * Apply the above specified mapping to bit position @oldbit, returning 537 * the new bit position. 530 * the new bit position. 538 * 531 * 539 * For example, lets say that @old has bits 4 532 * For example, lets say that @old has bits 4 through 7 set, and 540 * @new has bits 12 through 15 set. This defi 533 * @new has bits 12 through 15 set. This defines the mapping of bit 541 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 534 * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other 542 * bit positions unchanged. So if say @oldbit 535 * bit positions unchanged. So if say @oldbit is 5, then this routine 543 * returns 13. 536 * returns 13. 544 */ 537 */ 545 int bitmap_bitremap(int oldbit, const unsigned 538 int bitmap_bitremap(int oldbit, const unsigned long *old, 546 const unsigned 539 const unsigned long *new, int bits) 547 { 540 { 548 int w = bitmap_weight(new, bits); 541 int w = bitmap_weight(new, bits); 549 int n = bitmap_pos_to_ord(old, oldbit, 542 int n = bitmap_pos_to_ord(old, oldbit, bits); 550 if (n < 0 || w == 0) 543 if (n < 0 || w == 0) 551 return oldbit; 544 return oldbit; 552 else 545 else 553 return find_nth_bit(new, bits, 546 return find_nth_bit(new, bits, n % w); 554 } 547 } 555 EXPORT_SYMBOL(bitmap_bitremap); 548 EXPORT_SYMBOL(bitmap_bitremap); 556 549 557 #ifdef CONFIG_NUMA 550 #ifdef CONFIG_NUMA 558 /** 551 /** 559 * bitmap_onto - translate one bitmap relative 552 * bitmap_onto - translate one bitmap relative to another 560 * @dst: resulting translated bitmap 553 * @dst: resulting translated bitmap 561 * @orig: original untranslated bitmap 554 * @orig: original untranslated bitmap 562 * @relmap: bitmap relative to which tran 555 * @relmap: bitmap relative to which translated 563 * @bits: number of bits in each of these 556 * @bits: number of bits in each of these bitmaps 564 * 557 * 565 * Set the n-th bit of @dst iff there exists s 558 * Set the n-th bit of @dst iff there exists some m such that the 566 * n-th bit of @relmap is set, the m-th bit of 559 * n-th bit of @relmap is set, the m-th bit of @orig is set, and 567 * the n-th bit of @relmap is also the m-th _s 560 * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. 568 * (If you understood the previous sentence th 561 * (If you understood the previous sentence the first time your 569 * read it, you're overqualified for your curr 562 * read it, you're overqualified for your current job.) 570 * 563 * 571 * In other words, @orig is mapped onto (surje 564 * In other words, @orig is mapped onto (surjectively) @dst, 572 * using the map { <n, m> | the n-th bit of @r 565 * using the map { <n, m> | the n-th bit of @relmap is the 573 * m-th set bit of @relmap }. 566 * m-th set bit of @relmap }. 574 * 567 * 575 * Any set bits in @orig above bit number W, w 568 * Any set bits in @orig above bit number W, where W is the 576 * weight of (number of set bits in) @relmap a 569 * weight of (number of set bits in) @relmap are mapped nowhere. 577 * In particular, if for all bits m set in @or 570 * In particular, if for all bits m set in @orig, m >= W, then 578 * @dst will end up empty. In situations wher 571 * @dst will end up empty. In situations where the possibility 579 * of such an empty result is not desired, one 572 * of such an empty result is not desired, one way to avoid it is 580 * to use the bitmap_fold() operator, below, t 573 * to use the bitmap_fold() operator, below, to first fold the 581 * @orig bitmap over itself so that all its se 574 * @orig bitmap over itself so that all its set bits x are in the 582 * range 0 <= x < W. The bitmap_fold() operat 575 * range 0 <= x < W. The bitmap_fold() operator does this by 583 * setting the bit (m % W) in @dst, for each b 576 * setting the bit (m % W) in @dst, for each bit (m) set in @orig. 584 * 577 * 585 * Example [1] for bitmap_onto(): 578 * Example [1] for bitmap_onto(): 586 * Let's say @relmap has bits 30-39 set, and 579 * Let's say @relmap has bits 30-39 set, and @orig has bits 587 * 1, 3, 5, 7, 9 and 11 set. Then on return 580 * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, 588 * @dst will have bits 31, 33, 35, 37 and 39 581 * @dst will have bits 31, 33, 35, 37 and 39 set. 589 * 582 * 590 * When bit 0 is set in @orig, it means turn 583 * When bit 0 is set in @orig, it means turn on the bit in 591 * @dst corresponding to whatever is the firs 584 * @dst corresponding to whatever is the first bit (if any) 592 * that is turned on in @relmap. Since bit 0 585 * that is turned on in @relmap. Since bit 0 was off in the 593 * above example, we leave off that bit (bit 586 * above example, we leave off that bit (bit 30) in @dst. 594 * 587 * 595 * When bit 1 is set in @orig (as in the abov 588 * When bit 1 is set in @orig (as in the above example), it 596 * means turn on the bit in @dst correspondin 589 * means turn on the bit in @dst corresponding to whatever 597 * is the second bit that is turned on in @re 590 * is the second bit that is turned on in @relmap. The second 598 * bit in @relmap that was turned on in the a 591 * bit in @relmap that was turned on in the above example was 599 * bit 31, so we turned on bit 31 in @dst. 592 * bit 31, so we turned on bit 31 in @dst. 600 * 593 * 601 * Similarly, we turned on bits 33, 35, 37 an 594 * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, 602 * because they were the 4th, 6th, 8th and 10 595 * because they were the 4th, 6th, 8th and 10th set bits 603 * set in @relmap, and the 4th, 6th, 8th and 596 * set in @relmap, and the 4th, 6th, 8th and 10th bits of 604 * @orig (i.e. bits 3, 5, 7 and 9) were also 597 * @orig (i.e. bits 3, 5, 7 and 9) were also set. 605 * 598 * 606 * When bit 11 is set in @orig, it means turn 599 * When bit 11 is set in @orig, it means turn on the bit in 607 * @dst corresponding to whatever is the twel 600 * @dst corresponding to whatever is the twelfth bit that is 608 * turned on in @relmap. In the above exampl 601 * turned on in @relmap. In the above example, there were 609 * only ten bits turned on in @relmap (30..39 602 * only ten bits turned on in @relmap (30..39), so that bit 610 * 11 was set in @orig had no affect on @dst. 603 * 11 was set in @orig had no affect on @dst. 611 * 604 * 612 * Example [2] for bitmap_fold() + bitmap_onto 605 * Example [2] for bitmap_fold() + bitmap_onto(): 613 * Let's say @relmap has these ten bits set:: 606 * Let's say @relmap has these ten bits set:: 614 * 607 * 615 * 40 41 42 43 45 48 53 61 74 95 608 * 40 41 42 43 45 48 53 61 74 95 616 * 609 * 617 * (for the curious, that's 40 plus the first 610 * (for the curious, that's 40 plus the first ten terms of the 618 * Fibonacci sequence.) 611 * Fibonacci sequence.) 619 * 612 * 620 * Further lets say we use the following code 613 * Further lets say we use the following code, invoking 621 * bitmap_fold() then bitmap_onto, as suggest 614 * bitmap_fold() then bitmap_onto, as suggested above to 622 * avoid the possibility of an empty @dst res 615 * avoid the possibility of an empty @dst result:: 623 * 616 * 624 * unsigned long *tmp; // a temporary 617 * unsigned long *tmp; // a temporary bitmap's bits 625 * 618 * 626 * bitmap_fold(tmp, orig, bitmap_weight(r 619 * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); 627 * bitmap_onto(dst, tmp, relmap, bits); 620 * bitmap_onto(dst, tmp, relmap, bits); 628 * 621 * 629 * Then this table shows what various values 622 * Then this table shows what various values of @dst would be, for 630 * various @orig's. I list the zero-based po 623 * various @orig's. I list the zero-based positions of each set bit. 631 * The tmp column shows the intermediate resu 624 * The tmp column shows the intermediate result, as computed by 632 * using bitmap_fold() to fold the @orig bitm 625 * using bitmap_fold() to fold the @orig bitmap modulo ten 633 * (the weight of @relmap): 626 * (the weight of @relmap): 634 * 627 * 635 * =============== ============== ======= 628 * =============== ============== ================= 636 * @orig tmp @dst 629 * @orig tmp @dst 637 * 0 0 40 630 * 0 0 40 638 * 1 1 41 631 * 1 1 41 639 * 9 9 95 632 * 9 9 95 640 * 10 0 40 [#f1 633 * 10 0 40 [#f1]_ 641 * 1 3 5 7 1 3 5 7 41 43 4 634 * 1 3 5 7 1 3 5 7 41 43 48 61 642 * 0 1 2 3 4 0 1 2 3 4 40 41 4 635 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 643 * 0 9 18 27 0 9 8 7 40 61 7 636 * 0 9 18 27 0 9 8 7 40 61 74 95 644 * 0 10 20 30 0 40 637 * 0 10 20 30 0 40 645 * 0 11 22 33 0 1 2 3 40 41 4 638 * 0 11 22 33 0 1 2 3 40 41 42 43 646 * 0 12 24 36 0 2 4 6 40 42 4 639 * 0 12 24 36 0 2 4 6 40 42 45 53 647 * 78 102 211 1 2 8 41 42 7 640 * 78 102 211 1 2 8 41 42 74 [#f1]_ 648 * =============== ============== ======= 641 * =============== ============== ================= 649 * 642 * 650 * .. [#f1] 643 * .. [#f1] 651 * 644 * 652 * For these marked lines, if we hadn't fi 645 * For these marked lines, if we hadn't first done bitmap_fold() 653 * into tmp, then the @dst result would ha 646 * into tmp, then the @dst result would have been empty. 654 * 647 * 655 * If either of @orig or @relmap is empty (no 648 * If either of @orig or @relmap is empty (no set bits), then @dst 656 * will be returned empty. 649 * will be returned empty. 657 * 650 * 658 * If (as explained above) the only set bits i 651 * If (as explained above) the only set bits in @orig are in positions 659 * m where m >= W, (where W is the weight of @ 652 * m where m >= W, (where W is the weight of @relmap) then @dst will 660 * once again be returned empty. 653 * once again be returned empty. 661 * 654 * 662 * All bits in @dst not set by the above rule 655 * All bits in @dst not set by the above rule are cleared. 663 */ 656 */ 664 void bitmap_onto(unsigned long *dst, const uns 657 void bitmap_onto(unsigned long *dst, const unsigned long *orig, 665 const unsigned long *r 658 const unsigned long *relmap, unsigned int bits) 666 { 659 { 667 unsigned int n, m; /* same meanin 660 unsigned int n, m; /* same meaning as in above comment */ 668 661 669 if (dst == orig) /* following d 662 if (dst == orig) /* following doesn't handle inplace mappings */ 670 return; 663 return; 671 bitmap_zero(dst, bits); 664 bitmap_zero(dst, bits); 672 665 673 /* 666 /* 674 * The following code is a more effici 667 * The following code is a more efficient, but less 675 * obvious, equivalent to the loop: 668 * obvious, equivalent to the loop: 676 * for (m = 0; m < bitmap_weight( 669 * for (m = 0; m < bitmap_weight(relmap, bits); m++) { 677 * n = find_nth_bit(orig, 670 * n = find_nth_bit(orig, bits, m); 678 * if (test_bit(m, orig)) 671 * if (test_bit(m, orig)) 679 * set_bit(n, dst 672 * set_bit(n, dst); 680 * } 673 * } 681 */ 674 */ 682 675 683 m = 0; 676 m = 0; 684 for_each_set_bit(n, relmap, bits) { 677 for_each_set_bit(n, relmap, bits) { 685 /* m == bitmap_pos_to_ord(relm 678 /* m == bitmap_pos_to_ord(relmap, n, bits) */ 686 if (test_bit(m, orig)) 679 if (test_bit(m, orig)) 687 set_bit(n, dst); 680 set_bit(n, dst); 688 m++; 681 m++; 689 } 682 } 690 } 683 } 691 684 692 /** 685 /** 693 * bitmap_fold - fold larger bitmap into small 686 * bitmap_fold - fold larger bitmap into smaller, modulo specified size 694 * @dst: resulting smaller bitmap 687 * @dst: resulting smaller bitmap 695 * @orig: original larger bitmap 688 * @orig: original larger bitmap 696 * @sz: specified size 689 * @sz: specified size 697 * @nbits: number of bits in each of thes 690 * @nbits: number of bits in each of these bitmaps 698 * 691 * 699 * For each bit oldbit in @orig, set bit oldbi 692 * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. 700 * Clear all other bits in @dst. See further 693 * Clear all other bits in @dst. See further the comment and 701 * Example [2] for bitmap_onto() for why and h 694 * Example [2] for bitmap_onto() for why and how to use this. 702 */ 695 */ 703 void bitmap_fold(unsigned long *dst, const uns 696 void bitmap_fold(unsigned long *dst, const unsigned long *orig, 704 unsigned int sz, unsig 697 unsigned int sz, unsigned int nbits) 705 { 698 { 706 unsigned int oldbit; 699 unsigned int oldbit; 707 700 708 if (dst == orig) /* following d 701 if (dst == orig) /* following doesn't handle inplace mappings */ 709 return; 702 return; 710 bitmap_zero(dst, nbits); 703 bitmap_zero(dst, nbits); 711 704 712 for_each_set_bit(oldbit, orig, nbits) 705 for_each_set_bit(oldbit, orig, nbits) 713 set_bit(oldbit % sz, dst); 706 set_bit(oldbit % sz, dst); 714 } 707 } 715 #endif /* CONFIG_NUMA */ 708 #endif /* CONFIG_NUMA */ 716 709 717 unsigned long *bitmap_alloc(unsigned int nbits 710 unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags) 718 { 711 { 719 return kmalloc_array(BITS_TO_LONGS(nbi 712 return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), 720 flags); 713 flags); 721 } 714 } 722 EXPORT_SYMBOL(bitmap_alloc); 715 EXPORT_SYMBOL(bitmap_alloc); 723 716 724 unsigned long *bitmap_zalloc(unsigned int nbit 717 unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) 725 { 718 { 726 return bitmap_alloc(nbits, flags | __G 719 return bitmap_alloc(nbits, flags | __GFP_ZERO); 727 } 720 } 728 EXPORT_SYMBOL(bitmap_zalloc); 721 EXPORT_SYMBOL(bitmap_zalloc); 729 722 730 unsigned long *bitmap_alloc_node(unsigned int 723 unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node) 731 { 724 { 732 return kmalloc_array_node(BITS_TO_LONG 725 return kmalloc_array_node(BITS_TO_LONGS(nbits), sizeof(unsigned long), 733 flags, node) 726 flags, node); 734 } 727 } 735 EXPORT_SYMBOL(bitmap_alloc_node); 728 EXPORT_SYMBOL(bitmap_alloc_node); 736 729 737 unsigned long *bitmap_zalloc_node(unsigned int 730 unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node) 738 { 731 { 739 return bitmap_alloc_node(nbits, flags 732 return bitmap_alloc_node(nbits, flags | __GFP_ZERO, node); 740 } 733 } 741 EXPORT_SYMBOL(bitmap_zalloc_node); 734 EXPORT_SYMBOL(bitmap_zalloc_node); 742 735 743 void bitmap_free(const unsigned long *bitmap) 736 void bitmap_free(const unsigned long *bitmap) 744 { 737 { 745 kfree(bitmap); 738 kfree(bitmap); 746 } 739 } 747 EXPORT_SYMBOL(bitmap_free); 740 EXPORT_SYMBOL(bitmap_free); 748 741 749 static void devm_bitmap_free(void *data) 742 static void devm_bitmap_free(void *data) 750 { 743 { 751 unsigned long *bitmap = data; 744 unsigned long *bitmap = data; 752 745 753 bitmap_free(bitmap); 746 bitmap_free(bitmap); 754 } 747 } 755 748 756 unsigned long *devm_bitmap_alloc(struct device 749 unsigned long *devm_bitmap_alloc(struct device *dev, 757 unsigned int 750 unsigned int nbits, gfp_t flags) 758 { 751 { 759 unsigned long *bitmap; 752 unsigned long *bitmap; 760 int ret; 753 int ret; 761 754 762 bitmap = bitmap_alloc(nbits, flags); 755 bitmap = bitmap_alloc(nbits, flags); 763 if (!bitmap) 756 if (!bitmap) 764 return NULL; 757 return NULL; 765 758 766 ret = devm_add_action_or_reset(dev, de 759 ret = devm_add_action_or_reset(dev, devm_bitmap_free, bitmap); 767 if (ret) 760 if (ret) 768 return NULL; 761 return NULL; 769 762 770 return bitmap; 763 return bitmap; 771 } 764 } 772 EXPORT_SYMBOL_GPL(devm_bitmap_alloc); 765 EXPORT_SYMBOL_GPL(devm_bitmap_alloc); 773 766 774 unsigned long *devm_bitmap_zalloc(struct devic 767 unsigned long *devm_bitmap_zalloc(struct device *dev, 775 unsigned int 768 unsigned int nbits, gfp_t flags) 776 { 769 { 777 return devm_bitmap_alloc(dev, nbits, f 770 return devm_bitmap_alloc(dev, nbits, flags | __GFP_ZERO); 778 } 771 } 779 EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); 772 EXPORT_SYMBOL_GPL(devm_bitmap_zalloc); 780 773 781 #if BITS_PER_LONG == 64 774 #if BITS_PER_LONG == 64 782 /** 775 /** 783 * bitmap_from_arr32 - copy the contents of u3 776 * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap 784 * @bitmap: array of unsigned longs, the 777 * @bitmap: array of unsigned longs, the destination bitmap 785 * @buf: array of u32 (in host byte order 778 * @buf: array of u32 (in host byte order), the source bitmap 786 * @nbits: number of bits in @bitmap 779 * @nbits: number of bits in @bitmap 787 */ 780 */ 788 void bitmap_from_arr32(unsigned long *bitmap, 781 void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, unsigned int nbits) 789 { 782 { 790 unsigned int i, halfwords; 783 unsigned int i, halfwords; 791 784 792 halfwords = DIV_ROUND_UP(nbits, 32); 785 halfwords = DIV_ROUND_UP(nbits, 32); 793 for (i = 0; i < halfwords; i++) { 786 for (i = 0; i < halfwords; i++) { 794 bitmap[i/2] = (unsigned long) 787 bitmap[i/2] = (unsigned long) buf[i]; 795 if (++i < halfwords) 788 if (++i < halfwords) 796 bitmap[i/2] |= ((unsig 789 bitmap[i/2] |= ((unsigned long) buf[i]) << 32; 797 } 790 } 798 791 799 /* Clear tail bits in last word beyond 792 /* Clear tail bits in last word beyond nbits. */ 800 if (nbits % BITS_PER_LONG) 793 if (nbits % BITS_PER_LONG) 801 bitmap[(halfwords - 1) / 2] &= 794 bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits); 802 } 795 } 803 EXPORT_SYMBOL(bitmap_from_arr32); 796 EXPORT_SYMBOL(bitmap_from_arr32); 804 797 805 /** 798 /** 806 * bitmap_to_arr32 - copy the contents of bitm 799 * bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits 807 * @buf: array of u32 (in host byte order 800 * @buf: array of u32 (in host byte order), the dest bitmap 808 * @bitmap: array of unsigned longs, the 801 * @bitmap: array of unsigned longs, the source bitmap 809 * @nbits: number of bits in @bitmap 802 * @nbits: number of bits in @bitmap 810 */ 803 */ 811 void bitmap_to_arr32(u32 *buf, const unsigned 804 void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits) 812 { 805 { 813 unsigned int i, halfwords; 806 unsigned int i, halfwords; 814 807 815 halfwords = DIV_ROUND_UP(nbits, 32); 808 halfwords = DIV_ROUND_UP(nbits, 32); 816 for (i = 0; i < halfwords; i++) { 809 for (i = 0; i < halfwords; i++) { 817 buf[i] = (u32) (bitmap[i/2] & 810 buf[i] = (u32) (bitmap[i/2] & UINT_MAX); 818 if (++i < halfwords) 811 if (++i < halfwords) 819 buf[i] = (u32) (bitmap 812 buf[i] = (u32) (bitmap[i/2] >> 32); 820 } 813 } 821 814 822 /* Clear tail bits in last element of 815 /* Clear tail bits in last element of array beyond nbits. */ 823 if (nbits % BITS_PER_LONG) 816 if (nbits % BITS_PER_LONG) 824 buf[halfwords - 1] &= (u32) (U 817 buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31)); 825 } 818 } 826 EXPORT_SYMBOL(bitmap_to_arr32); 819 EXPORT_SYMBOL(bitmap_to_arr32); 827 #endif 820 #endif 828 821 829 #if BITS_PER_LONG == 32 822 #if BITS_PER_LONG == 32 830 /** 823 /** 831 * bitmap_from_arr64 - copy the contents of u6 824 * bitmap_from_arr64 - copy the contents of u64 array of bits to bitmap 832 * @bitmap: array of unsigned longs, the 825 * @bitmap: array of unsigned longs, the destination bitmap 833 * @buf: array of u64 (in host byte order 826 * @buf: array of u64 (in host byte order), the source bitmap 834 * @nbits: number of bits in @bitmap 827 * @nbits: number of bits in @bitmap 835 */ 828 */ 836 void bitmap_from_arr64(unsigned long *bitmap, 829 void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits) 837 { 830 { 838 int n; 831 int n; 839 832 840 for (n = nbits; n > 0; n -= 64) { 833 for (n = nbits; n > 0; n -= 64) { 841 u64 val = *buf++; 834 u64 val = *buf++; 842 835 843 *bitmap++ = val; 836 *bitmap++ = val; 844 if (n > 32) 837 if (n > 32) 845 *bitmap++ = val >> 32; 838 *bitmap++ = val >> 32; 846 } 839 } 847 840 848 /* 841 /* 849 * Clear tail bits in the last word be 842 * Clear tail bits in the last word beyond nbits. 850 * 843 * 851 * Negative index is OK because here w 844 * Negative index is OK because here we point to the word next 852 * to the last word of the bitmap, exc 845 * to the last word of the bitmap, except for nbits == 0, which 853 * is tested implicitly. 846 * is tested implicitly. 854 */ 847 */ 855 if (nbits % BITS_PER_LONG) 848 if (nbits % BITS_PER_LONG) 856 bitmap[-1] &= BITMAP_LAST_WORD 849 bitmap[-1] &= BITMAP_LAST_WORD_MASK(nbits); 857 } 850 } 858 EXPORT_SYMBOL(bitmap_from_arr64); 851 EXPORT_SYMBOL(bitmap_from_arr64); 859 852 860 /** 853 /** 861 * bitmap_to_arr64 - copy the contents of bitm 854 * bitmap_to_arr64 - copy the contents of bitmap to a u64 array of bits 862 * @buf: array of u64 (in host byte order 855 * @buf: array of u64 (in host byte order), the dest bitmap 863 * @bitmap: array of unsigned longs, the 856 * @bitmap: array of unsigned longs, the source bitmap 864 * @nbits: number of bits in @bitmap 857 * @nbits: number of bits in @bitmap 865 */ 858 */ 866 void bitmap_to_arr64(u64 *buf, const unsigned 859 void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) 867 { 860 { 868 const unsigned long *end = bitmap + BI 861 const unsigned long *end = bitmap + BITS_TO_LONGS(nbits); 869 862 870 while (bitmap < end) { 863 while (bitmap < end) { 871 *buf = *bitmap++; 864 *buf = *bitmap++; 872 if (bitmap < end) 865 if (bitmap < end) 873 *buf |= (u64)(*bitmap+ 866 *buf |= (u64)(*bitmap++) << 32; 874 buf++; 867 buf++; 875 } 868 } 876 869 877 /* Clear tail bits in the last element 870 /* Clear tail bits in the last element of array beyond nbits. */ 878 if (nbits % 64) 871 if (nbits % 64) 879 buf[-1] &= GENMASK_ULL((nbits 872 buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0); 880 } 873 } 881 EXPORT_SYMBOL(bitmap_to_arr64); 874 EXPORT_SYMBOL(bitmap_to_arr64); 882 #endif 875 #endif 883 876
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.