~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/vdso/math64.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/vdso/math64.h (Version linux-6.11-rc3) and /include/vdso/math64.h (Version linux-6.10.4)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __VDSO_MATH64_H                             2 #ifndef __VDSO_MATH64_H
  3 #define __VDSO_MATH64_H                             3 #define __VDSO_MATH64_H
  4                                                     4 
  5 static __always_inline u32                          5 static __always_inline u32
  6 __iter_div_u64_rem(u64 dividend, u32 divisor,       6 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
  7 {                                                   7 {
  8         u32 ret = 0;                                8         u32 ret = 0;
  9                                                     9 
 10         while (dividend >= divisor) {              10         while (dividend >= divisor) {
 11                 /* The following asm() prevent     11                 /* The following asm() prevents the compiler from
 12                    optimising this loop into a     12                    optimising this loop into a modulo operation.  */
 13                 asm("" : "+rm"(dividend));         13                 asm("" : "+rm"(dividend));
 14                                                    14 
 15                 dividend -= divisor;               15                 dividend -= divisor;
 16                 ret++;                             16                 ret++;
 17         }                                          17         }
 18                                                    18 
 19         *remainder = dividend;                     19         *remainder = dividend;
 20                                                    20 
 21         return ret;                                21         return ret;
 22 }                                                  22 }
 23                                                    23 
 24 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && de     24 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 25                                                    25 
 26 #ifndef mul_u64_u32_add_u64_shr                    26 #ifndef mul_u64_u32_add_u64_shr
 27 static __always_inline u64 mul_u64_u32_add_u64     27 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
 28 {                                                  28 {
 29         return (u64)((((unsigned __int128)a *      29         return (u64)((((unsigned __int128)a * mul) + b) >> shift);
 30 }                                                  30 }
 31 #endif /* mul_u64_u32_add_u64_shr */               31 #endif /* mul_u64_u32_add_u64_shr */
 32                                                    32 
 33 #else                                              33 #else
 34                                                    34 
 35 #ifndef mul_u64_u32_add_u64_shr                    35 #ifndef mul_u64_u32_add_u64_shr
 36 #ifndef mul_u32_u32                                36 #ifndef mul_u32_u32
 37 static inline u64 mul_u32_u32(u32 a, u32 b)        37 static inline u64 mul_u32_u32(u32 a, u32 b)
 38 {                                                  38 {
 39         return (u64)a * b;                         39         return (u64)a * b;
 40 }                                                  40 }
 41 #define mul_u32_u32 mul_u32_u32                    41 #define mul_u32_u32 mul_u32_u32
 42 #endif                                             42 #endif
 43 static __always_inline u64 mul_u64_u32_add_u64     43 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
 44 {                                                  44 {
 45         u32 ah = a >> 32, al = a;                  45         u32 ah = a >> 32, al = a;
 46         bool ovf;                                  46         bool ovf;
 47         u64 ret;                                   47         u64 ret;
 48                                                    48 
 49         ovf = __builtin_add_overflow(mul_u32_u     49         ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
 50         ret >>= shift;                             50         ret >>= shift;
 51         if (ovf && shift)                          51         if (ovf && shift)
 52                 ret += 1ULL << (64 - shift);       52                 ret += 1ULL << (64 - shift);
 53         if (ah)                                    53         if (ah)
 54                 ret += mul_u32_u32(ah, mul) <<     54                 ret += mul_u32_u32(ah, mul) << (32 - shift);
 55                                                    55 
 56         return ret;                                56         return ret;
 57 }                                                  57 }
 58 #endif /* mul_u64_u32_add_u64_shr */               58 #endif /* mul_u64_u32_add_u64_shr */
 59                                                    59 
 60 #endif                                             60 #endif
 61                                                    61 
 62 #endif /* __VDSO_MATH64_H */                       62 #endif /* __VDSO_MATH64_H */
 63                                                    63 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php