~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/time/vsyscall.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/time/vsyscall.c (Version linux-6.12-rc7) and /kernel/time/vsyscall.c (Version linux-5.3.18)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Copyright 2019 ARM Ltd.                          3  * Copyright 2019 ARM Ltd.
  4  *                                                  4  *
  5  * Generic implementation of update_vsyscall a      5  * Generic implementation of update_vsyscall and update_vsyscall_tz.
  6  *                                                  6  *
  7  * Based on the x86 specific implementation.        7  * Based on the x86 specific implementation.
  8  */                                                 8  */
  9                                                     9 
 10 #include <linux/hrtimer.h>                         10 #include <linux/hrtimer.h>
 11 #include <linux/timekeeper_internal.h>             11 #include <linux/timekeeper_internal.h>
 12 #include <vdso/datapage.h>                         12 #include <vdso/datapage.h>
 13 #include <vdso/helpers.h>                          13 #include <vdso/helpers.h>
 14 #include <vdso/vsyscall.h>                         14 #include <vdso/vsyscall.h>
 15                                                    15 
 16 #include "timekeeping_internal.h"              << 
 17                                                << 
 18 static inline void update_vdso_data(struct vds     16 static inline void update_vdso_data(struct vdso_data *vdata,
 19                                     struct tim     17                                     struct timekeeper *tk)
 20 {                                                  18 {
 21         struct vdso_timestamp *vdso_ts;            19         struct vdso_timestamp *vdso_ts;
 22         u64 nsec, sec;                             20         u64 nsec, sec;
 23                                                    21 
 24         vdata[CS_HRES_COARSE].cycle_last           22         vdata[CS_HRES_COARSE].cycle_last        = tk->tkr_mono.cycle_last;
 25 #ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT    << 
 26         vdata[CS_HRES_COARSE].max_cycles       << 
 27 #endif                                         << 
 28         vdata[CS_HRES_COARSE].mask                 23         vdata[CS_HRES_COARSE].mask              = tk->tkr_mono.mask;
 29         vdata[CS_HRES_COARSE].mult                 24         vdata[CS_HRES_COARSE].mult              = tk->tkr_mono.mult;
 30         vdata[CS_HRES_COARSE].shift                25         vdata[CS_HRES_COARSE].shift             = tk->tkr_mono.shift;
 31         vdata[CS_RAW].cycle_last                   26         vdata[CS_RAW].cycle_last                = tk->tkr_raw.cycle_last;
 32 #ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT    << 
 33         vdata[CS_RAW].max_cycles               << 
 34 #endif                                         << 
 35         vdata[CS_RAW].mask                         27         vdata[CS_RAW].mask                      = tk->tkr_raw.mask;
 36         vdata[CS_RAW].mult                         28         vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
 37         vdata[CS_RAW].shift                        29         vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
 38                                                    30 
                                                   >>  31         /* CLOCK_REALTIME */
                                                   >>  32         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
                                                   >>  33         vdso_ts->sec    = tk->xtime_sec;
                                                   >>  34         vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
                                                   >>  35 
 39         /* CLOCK_MONOTONIC */                      36         /* CLOCK_MONOTONIC */
 40         vdso_ts         = &vdata[CS_HRES_COARS     37         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
 41         vdso_ts->sec    = tk->xtime_sec + tk->     38         vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
 42                                                    39 
 43         nsec = tk->tkr_mono.xtime_nsec;            40         nsec = tk->tkr_mono.xtime_nsec;
 44         nsec += ((u64)tk->wall_to_monotonic.tv     41         nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
 45         while (nsec >= (((u64)NSEC_PER_SEC) <<     42         while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
 46                 nsec -= (((u64)NSEC_PER_SEC) <     43                 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
 47                 vdso_ts->sec++;                    44                 vdso_ts->sec++;
 48         }                                          45         }
 49         vdso_ts->nsec   = nsec;                    46         vdso_ts->nsec   = nsec;
 50                                                    47 
 51         /* Copy MONOTONIC time for BOOTTIME */     48         /* Copy MONOTONIC time for BOOTTIME */
 52         sec     = vdso_ts->sec;                    49         sec     = vdso_ts->sec;
 53         /* Add the boot offset */                  50         /* Add the boot offset */
 54         sec     += tk->monotonic_to_boot.tv_se     51         sec     += tk->monotonic_to_boot.tv_sec;
 55         nsec    += (u64)tk->monotonic_to_boot.     52         nsec    += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
 56                                                    53 
 57         /* CLOCK_BOOTTIME */                       54         /* CLOCK_BOOTTIME */
 58         vdso_ts         = &vdata[CS_HRES_COARS     55         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
 59         vdso_ts->sec    = sec;                     56         vdso_ts->sec    = sec;
 60                                                    57 
 61         while (nsec >= (((u64)NSEC_PER_SEC) <<     58         while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
 62                 nsec -= (((u64)NSEC_PER_SEC) <     59                 nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
 63                 vdso_ts->sec++;                    60                 vdso_ts->sec++;
 64         }                                          61         }
 65         vdso_ts->nsec   = nsec;                    62         vdso_ts->nsec   = nsec;
 66                                                    63 
 67         /* CLOCK_MONOTONIC_RAW */                  64         /* CLOCK_MONOTONIC_RAW */
 68         vdso_ts         = &vdata[CS_RAW].baset     65         vdso_ts         = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
 69         vdso_ts->sec    = tk->raw_sec;             66         vdso_ts->sec    = tk->raw_sec;
 70         vdso_ts->nsec   = tk->tkr_raw.xtime_ns     67         vdso_ts->nsec   = tk->tkr_raw.xtime_nsec;
 71                                                    68 
 72         /* CLOCK_TAI */                            69         /* CLOCK_TAI */
 73         vdso_ts         = &vdata[CS_HRES_COARS     70         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
 74         vdso_ts->sec    = tk->xtime_sec + (s64     71         vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
 75         vdso_ts->nsec   = tk->tkr_mono.xtime_n     72         vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
                                                   >>  73 
                                                   >>  74         /*
                                                   >>  75          * Read without the seqlock held by clock_getres().
                                                   >>  76          * Note: No need to have a second copy.
                                                   >>  77          */
                                                   >>  78         WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
 76 }                                                  79 }
 77                                                    80 
 78 void update_vsyscall(struct timekeeper *tk)        81 void update_vsyscall(struct timekeeper *tk)
 79 {                                                  82 {
 80         struct vdso_data *vdata = __arch_get_k     83         struct vdso_data *vdata = __arch_get_k_vdso_data();
 81         struct vdso_timestamp *vdso_ts;            84         struct vdso_timestamp *vdso_ts;
 82         s32 clock_mode;                        << 
 83         u64 nsec;                                  85         u64 nsec;
 84                                                    86 
                                                   >>  87         if (__arch_update_vdso_data()) {
                                                   >>  88                 /*
                                                   >>  89                  * Some architectures might want to skip the update of the
                                                   >>  90                  * data page.
                                                   >>  91                  */
                                                   >>  92                 return;
                                                   >>  93         }
                                                   >>  94 
 85         /* copy vsyscall data */                   95         /* copy vsyscall data */
 86         vdso_write_begin(vdata);                   96         vdso_write_begin(vdata);
 87                                                    97 
 88         clock_mode = tk->tkr_mono.clock->vdso_ !!  98         vdata[CS_HRES_COARSE].clock_mode        = __arch_get_clock_mode(tk);
 89         vdata[CS_HRES_COARSE].clock_mode       !!  99         vdata[CS_RAW].clock_mode                = __arch_get_clock_mode(tk);
 90         vdata[CS_RAW].clock_mode               << 
 91                                                << 
 92         /* CLOCK_REALTIME also required for ti << 
 93         vdso_ts         = &vdata[CS_HRES_COARS << 
 94         vdso_ts->sec    = tk->xtime_sec;       << 
 95         vdso_ts->nsec   = tk->tkr_mono.xtime_n << 
 96                                                   100 
 97         /* CLOCK_REALTIME_COARSE */               101         /* CLOCK_REALTIME_COARSE */
 98         vdso_ts         = &vdata[CS_HRES_COARS    102         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
 99         vdso_ts->sec    = tk->xtime_sec;          103         vdso_ts->sec    = tk->xtime_sec;
100         vdso_ts->nsec   = tk->tkr_mono.xtime_n    104         vdso_ts->nsec   = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
101                                                   105 
102         /* CLOCK_MONOTONIC_COARSE */              106         /* CLOCK_MONOTONIC_COARSE */
103         vdso_ts         = &vdata[CS_HRES_COARS    107         vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
104         vdso_ts->sec    = tk->xtime_sec + tk->    108         vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
105         nsec            = tk->tkr_mono.xtime_n    109         nsec            = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
106         nsec            = nsec + tk->wall_to_m    110         nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
107         vdso_ts->sec    += __iter_div_u64_rem(    111         vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
108                                                   112 
109         /*                                     !! 113         update_vdso_data(vdata, tk);
110          * Read without the seqlock held by cl << 
111          * Note: No need to have a second copy << 
112          */                                    << 
113         WRITE_ONCE(vdata[CS_HRES_COARSE].hrtim << 
114                                                << 
115         /*                                     << 
116          * If the current clocksource is not V << 
117          * update of the high resolution parts << 
118          */                                    << 
119         if (clock_mode != VDSO_CLOCKMODE_NONE) << 
120                 update_vdso_data(vdata, tk);   << 
121                                                   114 
122         __arch_update_vsyscall(vdata, tk);        115         __arch_update_vsyscall(vdata, tk);
123                                                   116 
124         vdso_write_end(vdata);                    117         vdso_write_end(vdata);
125                                                   118 
126         __arch_sync_vdso_data(vdata);             119         __arch_sync_vdso_data(vdata);
127 }                                                 120 }
128                                                   121 
129 void update_vsyscall_tz(void)                     122 void update_vsyscall_tz(void)
130 {                                                 123 {
131         struct vdso_data *vdata = __arch_get_k    124         struct vdso_data *vdata = __arch_get_k_vdso_data();
132                                                   125 
133         vdata[CS_HRES_COARSE].tz_minuteswest =    126         vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
134         vdata[CS_HRES_COARSE].tz_dsttime = sys    127         vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
135                                                   128 
136         __arch_sync_vdso_data(vdata);             129         __arch_sync_vdso_data(vdata);
137 }                                              << 
138                                                << 
139 /**                                            << 
140  * vdso_update_begin - Start of a VDSO update  << 
141  *                                             << 
142  * Allows architecture code to safely update t << 
143  * data. Disables interrupts, acquires timekee << 
144  * concurrent updates from timekeeping and inv << 
145  * sequence counter to prevent concurrent read << 
146  * inconsistent data.                          << 
147  *                                             << 
148  * Returns: Saved interrupt flags which need t << 
149  * vdso_update_end().                          << 
150  */                                            << 
151 unsigned long vdso_update_begin(void)          << 
152 {                                              << 
153         struct vdso_data *vdata = __arch_get_k << 
154         unsigned long flags;                   << 
155                                                << 
156         raw_spin_lock_irqsave(&timekeeper_lock << 
157         vdso_write_begin(vdata);               << 
158         return flags;                          << 
159 }                                              << 
160                                                << 
161 /**                                            << 
162  * vdso_update_end - End of a VDSO update sect << 
163  * @flags:      Interrupt flags as returned fr << 
164  *                                             << 
165  * Pairs with vdso_update_begin(). Marks vdso  << 
166  * synchronization if the architecture require << 
167  * and restores interrupt flags.               << 
168  */                                            << 
169 void vdso_update_end(unsigned long flags)      << 
170 {                                              << 
171         struct vdso_data *vdata = __arch_get_k << 
172                                                << 
173         vdso_write_end(vdata);                 << 
174         __arch_sync_vdso_data(vdata);          << 
175         raw_spin_unlock_irqrestore(&timekeeper << 
176 }                                                 130 }
177                                                   131 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php