1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2003 Bernardo Innocenti <bern 3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com> 4 * 4 * 5 * Based on former do_div() implementation fro 5 * Based on former do_div() implementation from asm-parisc/div64.h: 6 * Copyright (C) 1999 Hewlett-Packard Co 6 * Copyright (C) 1999 Hewlett-Packard Co 7 * Copyright (C) 1999 David Mosberger-Tan 7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> 8 * 8 * 9 * 9 * 10 * Generic C version of 64bit/32bit division a 10 * Generic C version of 64bit/32bit division and modulo, with 11 * 64bit result and 32bit remainder. 11 * 64bit result and 32bit remainder. 12 * 12 * 13 * The fast case for (n>>32 == 0) is handled i 13 * The fast case for (n>>32 == 0) is handled inline by do_div(). 14 * 14 * 15 * Code generated for this function might be v 15 * Code generated for this function might be very inefficient 16 * for some CPUs. __div64_32() can be overridd 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific 17 * assembly versions such as arch/ppc/lib/div6 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S 18 * or by defining a preprocessor macro in arch 18 * or by defining a preprocessor macro in arch/include/asm/div64.h. 19 */ 19 */ 20 20 21 #include <linux/bitops.h> 21 #include <linux/bitops.h> 22 #include <linux/export.h> 22 #include <linux/export.h> 23 #include <linux/math.h> 23 #include <linux/math.h> 24 #include <linux/math64.h> 24 #include <linux/math64.h> 25 #include <linux/minmax.h> << 26 #include <linux/log2.h> 25 #include <linux/log2.h> 27 26 28 /* Not needed on 64bit architectures */ 27 /* Not needed on 64bit architectures */ 29 #if BITS_PER_LONG == 32 28 #if BITS_PER_LONG == 32 30 29 31 #ifndef __div64_32 30 #ifndef __div64_32 32 uint32_t __attribute__((weak)) __div64_32(uint 31 uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) 33 { 32 { 34 uint64_t rem = *n; 33 uint64_t rem = *n; 35 uint64_t b = base; 34 uint64_t b = base; 36 uint64_t res, d = 1; 35 uint64_t res, d = 1; 37 uint32_t high = rem >> 32; 36 uint32_t high = rem >> 32; 38 37 39 /* Reduce the thing a bit first */ 38 /* Reduce the thing a bit first */ 40 res = 0; 39 res = 0; 41 if (high >= base) { 40 if (high >= base) { 42 high /= base; 41 high /= base; 43 res = (uint64_t) high << 32; 42 res = (uint64_t) high << 32; 44 rem -= (uint64_t) (high*base) 43 rem -= (uint64_t) (high*base) << 32; 45 } 44 } 46 45 47 while ((int64_t)b > 0 && b < rem) { 46 while ((int64_t)b > 0 && b < rem) { 48 b = b+b; 47 b = b+b; 49 d = d+d; 48 d = d+d; 50 } 49 } 51 50 52 do { 51 do { 53 if (rem >= b) { 52 if (rem >= b) { 54 rem -= b; 53 rem -= b; 55 res += d; 54 res += d; 56 } 55 } 57 b >>= 1; 56 b >>= 1; 58 d >>= 1; 57 d >>= 1; 59 } while (d); 58 } while (d); 60 59 61 *n = res; 60 *n = res; 62 return rem; 61 return rem; 63 } 62 } 64 EXPORT_SYMBOL(__div64_32); 63 EXPORT_SYMBOL(__div64_32); 65 #endif 64 #endif 66 65 >> 66 /** >> 67 * div_s64_rem - signed 64bit divide with 64bit divisor and remainder >> 68 * @dividend: 64bit dividend >> 69 * @divisor: 64bit divisor >> 70 * @remainder: 64bit remainder >> 71 */ 67 #ifndef div_s64_rem 72 #ifndef div_s64_rem 68 s64 div_s64_rem(s64 dividend, s32 divisor, s32 73 s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) 69 { 74 { 70 u64 quotient; 75 u64 quotient; 71 76 72 if (dividend < 0) { 77 if (dividend < 0) { 73 quotient = div_u64_rem(-divide 78 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder); 74 *remainder = -*remainder; 79 *remainder = -*remainder; 75 if (divisor > 0) 80 if (divisor > 0) 76 quotient = -quotient; 81 quotient = -quotient; 77 } else { 82 } else { 78 quotient = div_u64_rem(dividen 83 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder); 79 if (divisor < 0) 84 if (divisor < 0) 80 quotient = -quotient; 85 quotient = -quotient; 81 } 86 } 82 return quotient; 87 return quotient; 83 } 88 } 84 EXPORT_SYMBOL(div_s64_rem); 89 EXPORT_SYMBOL(div_s64_rem); 85 #endif 90 #endif 86 91 87 /* !! 92 /** 88 * div64_u64_rem - unsigned 64bit divide with 93 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder 89 * @dividend: 64bit dividend 94 * @dividend: 64bit dividend 90 * @divisor: 64bit divisor 95 * @divisor: 64bit divisor 91 * @remainder: 64bit remainder 96 * @remainder: 64bit remainder 92 * 97 * 93 * This implementation is a comparable to algo 98 * This implementation is a comparable to algorithm used by div64_u64. 94 * But this operation, which includes math for 99 * But this operation, which includes math for calculating the remainder, 95 * is kept distinct to avoid slowing down the 100 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit 96 * systems. 101 * systems. 97 */ 102 */ 98 #ifndef div64_u64_rem 103 #ifndef div64_u64_rem 99 u64 div64_u64_rem(u64 dividend, u64 divisor, u 104 u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) 100 { 105 { 101 u32 high = divisor >> 32; 106 u32 high = divisor >> 32; 102 u64 quot; 107 u64 quot; 103 108 104 if (high == 0) { 109 if (high == 0) { 105 u32 rem32; 110 u32 rem32; 106 quot = div_u64_rem(dividend, d 111 quot = div_u64_rem(dividend, divisor, &rem32); 107 *remainder = rem32; 112 *remainder = rem32; 108 } else { 113 } else { 109 int n = fls(high); 114 int n = fls(high); 110 quot = div_u64(dividend >> n, 115 quot = div_u64(dividend >> n, divisor >> n); 111 116 112 if (quot != 0) 117 if (quot != 0) 113 quot--; 118 quot--; 114 119 115 *remainder = dividend - quot * 120 *remainder = dividend - quot * divisor; 116 if (*remainder >= divisor) { 121 if (*remainder >= divisor) { 117 quot++; 122 quot++; 118 *remainder -= divisor; 123 *remainder -= divisor; 119 } 124 } 120 } 125 } 121 126 122 return quot; 127 return quot; 123 } 128 } 124 EXPORT_SYMBOL(div64_u64_rem); 129 EXPORT_SYMBOL(div64_u64_rem); 125 #endif 130 #endif 126 131 127 /* !! 132 /** 128 * div64_u64 - unsigned 64bit divide with 64bi 133 * div64_u64 - unsigned 64bit divide with 64bit divisor 129 * @dividend: 64bit dividend 134 * @dividend: 64bit dividend 130 * @divisor: 64bit divisor 135 * @divisor: 64bit divisor 131 * 136 * 132 * This implementation is a modified version o 137 * This implementation is a modified version of the algorithm proposed 133 * by the book 'Hacker's Delight'. The origin 138 * by the book 'Hacker's Delight'. The original source and full proof 134 * can be found here and is available for use 139 * can be found here and is available for use without restriction. 135 * 140 * 136 * 'http://www.hackersdelight.org/hdcodetxt/di 141 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt' 137 */ 142 */ 138 #ifndef div64_u64 143 #ifndef div64_u64 139 u64 div64_u64(u64 dividend, u64 divisor) 144 u64 div64_u64(u64 dividend, u64 divisor) 140 { 145 { 141 u32 high = divisor >> 32; 146 u32 high = divisor >> 32; 142 u64 quot; 147 u64 quot; 143 148 144 if (high == 0) { 149 if (high == 0) { 145 quot = div_u64(dividend, divis 150 quot = div_u64(dividend, divisor); 146 } else { 151 } else { 147 int n = fls(high); 152 int n = fls(high); 148 quot = div_u64(dividend >> n, 153 quot = div_u64(dividend >> n, divisor >> n); 149 154 150 if (quot != 0) 155 if (quot != 0) 151 quot--; 156 quot--; 152 if ((dividend - quot * divisor 157 if ((dividend - quot * divisor) >= divisor) 153 quot++; 158 quot++; 154 } 159 } 155 160 156 return quot; 161 return quot; 157 } 162 } 158 EXPORT_SYMBOL(div64_u64); 163 EXPORT_SYMBOL(div64_u64); 159 #endif 164 #endif 160 165 >> 166 /** >> 167 * div64_s64 - signed 64bit divide with 64bit divisor >> 168 * @dividend: 64bit dividend >> 169 * @divisor: 64bit divisor >> 170 */ 161 #ifndef div64_s64 171 #ifndef div64_s64 162 s64 div64_s64(s64 dividend, s64 divisor) 172 s64 div64_s64(s64 dividend, s64 divisor) 163 { 173 { 164 s64 quot, t; 174 s64 quot, t; 165 175 166 quot = div64_u64(abs(dividend), abs(di 176 quot = div64_u64(abs(dividend), abs(divisor)); 167 t = (dividend ^ divisor) >> 63; 177 t = (dividend ^ divisor) >> 63; 168 178 169 return (quot ^ t) - t; 179 return (quot ^ t) - t; 170 } 180 } 171 EXPORT_SYMBOL(div64_s64); 181 EXPORT_SYMBOL(div64_s64); 172 #endif 182 #endif 173 183 174 #endif /* BITS_PER_LONG == 32 */ 184 #endif /* BITS_PER_LONG == 32 */ 175 185 176 /* 186 /* 177 * Iterative div/mod for use when dividend is 187 * Iterative div/mod for use when dividend is not expected to be much 178 * bigger than divisor. 188 * bigger than divisor. 179 */ 189 */ 180 u32 iter_div_u64_rem(u64 dividend, u32 divisor 190 u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) 181 { 191 { 182 return __iter_div_u64_rem(dividend, di 192 return __iter_div_u64_rem(dividend, divisor, remainder); 183 } 193 } 184 EXPORT_SYMBOL(iter_div_u64_rem); 194 EXPORT_SYMBOL(iter_div_u64_rem); 185 195 186 #ifndef mul_u64_u64_div_u64 196 #ifndef mul_u64_u64_div_u64 187 u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) 197 u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) 188 { 198 { 189 u64 res = 0, div, rem; 199 u64 res = 0, div, rem; 190 int shift; 200 int shift; 191 201 192 /* can a * b overflow ? */ 202 /* can a * b overflow ? */ 193 if (ilog2(a) + ilog2(b) > 62) { 203 if (ilog2(a) + ilog2(b) > 62) { 194 /* << 195 * Note that the algorithm aft << 196 * some precision and the resu << 197 * exchange a and b if a is bi << 198 * << 199 * For example with a = 439804 << 200 * the below calculation doesn << 201 * and then shift becomes 45 + << 202 * becomes 4398035251080. Howe << 203 * result is calculated (i.e. << 204 */ << 205 if (a > b) << 206 swap(a, b); << 207 << 208 /* 204 /* 209 * (b * a) / c is equal to 205 * (b * a) / c is equal to 210 * 206 * 211 * (b / c) * a + 207 * (b / c) * a + 212 * (b % c) * a / c 208 * (b % c) * a / c 213 * 209 * 214 * if nothing overflows. Can t 210 * if nothing overflows. Can the 1st multiplication 215 * overflow? Yes, but we do no 211 * overflow? Yes, but we do not care: this can only 216 * happen if the end result ca 212 * happen if the end result can't fit in u64 anyway. 217 * 213 * 218 * So the code below does 214 * So the code below does 219 * 215 * 220 * res = (b / c) * a; 216 * res = (b / c) * a; 221 * b = b % c; 217 * b = b % c; 222 */ 218 */ 223 div = div64_u64_rem(b, c, &rem 219 div = div64_u64_rem(b, c, &rem); 224 res = div * a; 220 res = div * a; 225 b = rem; 221 b = rem; 226 222 227 shift = ilog2(a) + ilog2(b) - 223 shift = ilog2(a) + ilog2(b) - 62; 228 if (shift > 0) { 224 if (shift > 0) { 229 /* drop precision */ 225 /* drop precision */ 230 b >>= shift; 226 b >>= shift; 231 c >>= shift; 227 c >>= shift; 232 if (!c) 228 if (!c) 233 return res; 229 return res; 234 } 230 } 235 } 231 } 236 232 237 return res + div64_u64(a * b, c); 233 return res + div64_u64(a * b, c); 238 } 234 } 239 EXPORT_SYMBOL(mul_u64_u64_div_u64); 235 EXPORT_SYMBOL(mul_u64_u64_div_u64); 240 #endif 236 #endif 241 237
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.