1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name> 4 * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> 5 */ 6 7 #ifndef _LINUX_BITFIELD_H 8 #define _LINUX_BITFIELD_H 9 10 #include <linux/build_bug.h> 11 #include <asm/byteorder.h> 12 13 /* 14 * Bitfield access macros 15 * 16 * FIELD_{GET,PREP} macros take as first parameter shifted mask 17 * from which they extract the base mask and shift amount. 18 * Mask must be a compilation time constant. 19 * 20 * Example: 21 * 22 * #include <linux/bitfield.h> 23 * #include <linux/bits.h> 24 * 25 * #define REG_FIELD_A GENMASK(6, 0) 26 * #define REG_FIELD_B BIT(7) 27 * #define REG_FIELD_C GENMASK(15, 8) 28 * #define REG_FIELD_D GENMASK(31, 16) 29 * 30 * Get: 31 * a = FIELD_GET(REG_FIELD_A, reg); 32 * b = FIELD_GET(REG_FIELD_B, reg); 33 * 34 * Set: 35 * reg = FIELD_PREP(REG_FIELD_A, 1) | 36 * FIELD_PREP(REG_FIELD_B, 0) | 37 * FIELD_PREP(REG_FIELD_C, c) | 38 * FIELD_PREP(REG_FIELD_D, 0x40); 39 * 40 * Modify: 41 * reg &= ~REG_FIELD_C; 42 * reg |= FIELD_PREP(REG_FIELD_C, c); 43 */ 44 45 #define __bf_shf(x) (__builtin_ffsll(x) - 1) 46 47 #define __scalar_type_to_unsigned_cases(type) \ 48 unsigned type: (unsigned type)0, \ 49 signed type: (unsigned type)0 50 51 #define __unsigned_scalar_typeof(x) typeof( \ 52 _Generic((x), \ 53 char: (unsigned char)0, \ 54 __scalar_type_to_unsigned_cases(char), \ 55 __scalar_type_to_unsigned_cases(short), \ 56 __scalar_type_to_unsigned_cases(int), \ 57 __scalar_type_to_unsigned_cases(long), \ 58 __scalar_type_to_unsigned_cases(long long), \ 59 default: (x))) 60 61 #define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x)) 62 63 #define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ 64 ({ \ 65 BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ 66 _pfx "mask is not constant"); \ 67 BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \ 68 BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ 69 ~((_mask) >> __bf_shf(_mask)) & \ 70 (0 + (_val)) : 0, \ 71 _pfx "value too large for the field"); \ 72 BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \ 73 __bf_cast_unsigned(_reg, ~0ull), \ 74 _pfx "type of reg too small for mask"); \ 75 __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ 76 (1ULL << __bf_shf(_mask))); \ 77 }) 78 79 /** 80 * FIELD_MAX() - produce the maximum value representable by a field 81 * @_mask: shifted mask defining the field's length and position 82 * 83 * FIELD_MAX() returns the maximum value that can be held in the field 84 * specified by @_mask. 85 */ 86 #define FIELD_MAX(_mask) \ 87 ({ \ 88 __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \ 89 (typeof(_mask))((_mask) >> __bf_shf(_mask)); \ 90 }) 91 92 /** 93 * FIELD_FIT() - check if value fits in the field 94 * @_mask: shifted mask defining the field's length and position 95 * @_val: value to test against the field 96 * 97 * Return: true if @_val can fit inside @_mask, false if @_val is too big. 98 */ 99 #define FIELD_FIT(_mask, _val) \ 100 ({ \ 101 __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ 102 !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ 103 }) 104 105 /** 106 * FIELD_PREP() - prepare a bitfield element 107 * @_mask: shifted mask defining the field's length and position 108 * @_val: value to put in the field 109 * 110 * FIELD_PREP() masks and shifts up the value. The result should 111 * be combined with other fields of the bitfield using logical OR. 112 */ 113 #define FIELD_PREP(_mask, _val) \ 114 ({ \ 115 __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ 116 ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ 117 }) 118 119 #define __BF_CHECK_POW2(n) BUILD_BUG_ON_ZERO(((n) & ((n) - 1)) != 0) 120 121 /** 122 * FIELD_PREP_CONST() - prepare a constant bitfield element 123 * @_mask: shifted mask defining the field's length and position 124 * @_val: value to put in the field 125 * 126 * FIELD_PREP_CONST() masks and shifts up the value. The result should 127 * be combined with other fields of the bitfield using logical OR. 128 * 129 * Unlike FIELD_PREP() this is a constant expression and can therefore 130 * be used in initializers. Error checking is less comfortable for this 131 * version, and non-constant masks cannot be used. 132 */ 133 #define FIELD_PREP_CONST(_mask, _val) \ 134 ( \ 135 /* mask must be non-zero */ \ 136 BUILD_BUG_ON_ZERO((_mask) == 0) + \ 137 /* check if value fits */ \ 138 BUILD_BUG_ON_ZERO(~((_mask) >> __bf_shf(_mask)) & (_val)) + \ 139 /* check if mask is contiguous */ \ 140 __BF_CHECK_POW2((_mask) + (1ULL << __bf_shf(_mask))) + \ 141 /* and create the value */ \ 142 (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)) \ 143 ) 144 145 /** 146 * FIELD_GET() - extract a bitfield element 147 * @_mask: shifted mask defining the field's length and position 148 * @_reg: value of entire bitfield 149 * 150 * FIELD_GET() extracts the field specified by @_mask from the 151 * bitfield passed in as @_reg by masking and shifting it down. 152 */ 153 #define FIELD_GET(_mask, _reg) \ 154 ({ \ 155 __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ 156 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ 157 }) 158 159 extern void __compiletime_error("value doesn't fit into mask") 160 __field_overflow(void); 161 extern void __compiletime_error("bad bitfield mask") 162 __bad_mask(void); 163 static __always_inline u64 field_multiplier(u64 field) 164 { 165 if ((field | (field - 1)) & ((field | (field - 1)) + 1)) 166 __bad_mask(); 167 return field & -field; 168 } 169 static __always_inline u64 field_mask(u64 field) 170 { 171 return field / field_multiplier(field); 172 } 173 #define field_max(field) ((typeof(field))field_mask(field)) 174 #define ____MAKE_OP(type,base,to,from) \ 175 static __always_inline __##type type##_encode_bits(base v, base field) \ 176 { \ 177 if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ 178 __field_overflow(); \ 179 return to((v & field_mask(field)) * field_multiplier(field)); \ 180 } \ 181 static __always_inline __##type type##_replace_bits(__##type old, \ 182 base val, base field) \ 183 { \ 184 return (old & ~to(field)) | type##_encode_bits(val, field); \ 185 } \ 186 static __always_inline void type##p_replace_bits(__##type *p, \ 187 base val, base field) \ 188 { \ 189 *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ 190 } \ 191 static __always_inline base type##_get_bits(__##type v, base field) \ 192 { \ 193 return (from(v) & field)/field_multiplier(field); \ 194 } 195 #define __MAKE_OP(size) \ 196 ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ 197 ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ 198 ____MAKE_OP(u##size,u##size,,) 199 ____MAKE_OP(u8,u8,,) 200 __MAKE_OP(16) 201 __MAKE_OP(32) 202 __MAKE_OP(64) 203 #undef __MAKE_OP 204 #undef ____MAKE_OP 205 206 #endif 207
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.