~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/vdso/math64.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/vdso/math64.h (Version linux-6.12-rc7) and /include/vdso/math64.h (Version linux-4.19.323)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 
  2 #ifndef __VDSO_MATH64_H                           
  3 #define __VDSO_MATH64_H                           
  4                                                   
  5 static __always_inline u32                        
  6 __iter_div_u64_rem(u64 dividend, u32 divisor,     
  7 {                                                 
  8         u32 ret = 0;                              
  9                                                   
 10         while (dividend >= divisor) {             
 11                 /* The following asm() prevent    
 12                    optimising this loop into a    
 13                 asm("" : "+rm"(dividend));        
 14                                                   
 15                 dividend -= divisor;              
 16                 ret++;                            
 17         }                                         
 18                                                   
 19         *remainder = dividend;                    
 20                                                   
 21         return ret;                               
 22 }                                                 
 23                                                   
 24 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && de    
 25                                                   
 26 #ifndef mul_u64_u32_add_u64_shr                   
 27 static __always_inline u64 mul_u64_u32_add_u64    
 28 {                                                 
 29         return (u64)((((unsigned __int128)a *     
 30 }                                                 
 31 #endif /* mul_u64_u32_add_u64_shr */              
 32                                                   
 33 #else                                             
 34                                                   
 35 #ifndef mul_u64_u32_add_u64_shr                   
 36 #ifndef mul_u32_u32                               
 37 static inline u64 mul_u32_u32(u32 a, u32 b)       
 38 {                                                 
 39         return (u64)a * b;                        
 40 }                                                 
 41 #define mul_u32_u32 mul_u32_u32                   
 42 #endif                                            
 43 static __always_inline u64 mul_u64_u32_add_u64    
 44 {                                                 
 45         u32 ah = a >> 32, al = a;                 
 46         bool ovf;                                 
 47         u64 ret;                                  
 48                                                   
 49         ovf = __builtin_add_overflow(mul_u32_u    
 50         ret >>= shift;                            
 51         if (ovf && shift)                         
 52                 ret += 1ULL << (64 - shift);      
 53         if (ah)                                   
 54                 ret += mul_u32_u32(ah, mul) <<    
 55                                                   
 56         return ret;                               
 57 }                                                 
 58 #endif /* mul_u64_u32_add_u64_shr */              
 59                                                   
 60 #endif                                            
 61                                                   
 62 #endif /* __VDSO_MATH64_H */                      
 63                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php