~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/msr.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_MSR_H
  3 #define _ASM_X86_MSR_H
  4 
  5 #include "msr-index.h"
  6 
  7 #ifndef __ASSEMBLY__
  8 
  9 #include <asm/asm.h>
 10 #include <asm/errno.h>
 11 #include <asm/cpumask.h>
 12 #include <uapi/asm/msr.h>
 13 #include <asm/shared/msr.h>
 14 
 15 #include <linux/percpu.h>
 16 
 17 struct msr_info {
 18         u32                     msr_no;
 19         struct msr              reg;
 20         struct msr __percpu     *msrs;
 21         int                     err;
 22 };
 23 
 24 struct msr_regs_info {
 25         u32 *regs;
 26         int err;
 27 };
 28 
 29 struct saved_msr {
 30         bool valid;
 31         struct msr_info info;
 32 };
 33 
 34 struct saved_msrs {
 35         unsigned int num;
 36         struct saved_msr *array;
 37 };
 38 
 39 /*
 40  * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A"
 41  * constraint has different meanings. For i386, "A" means exactly
 42  * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead,
 43  * it means rax *or* rdx.
 44  */
 45 #ifdef CONFIG_X86_64
 46 /* Using 64-bit values saves one instruction clearing the high half of low */
 47 #define DECLARE_ARGS(val, low, high)    unsigned long low, high
 48 #define EAX_EDX_VAL(val, low, high)     ((low) | (high) << 32)
 49 #define EAX_EDX_RET(val, low, high)     "=a" (low), "=d" (high)
 50 #else
 51 #define DECLARE_ARGS(val, low, high)    unsigned long long val
 52 #define EAX_EDX_VAL(val, low, high)     (val)
 53 #define EAX_EDX_RET(val, low, high)     "=A" (val)
 54 #endif
 55 
 56 /*
 57  * Be very careful with includes. This header is prone to include loops.
 58  */
 59 #include <asm/atomic.h>
 60 #include <linux/tracepoint-defs.h>
 61 
 62 #ifdef CONFIG_TRACEPOINTS
 63 DECLARE_TRACEPOINT(read_msr);
 64 DECLARE_TRACEPOINT(write_msr);
 65 DECLARE_TRACEPOINT(rdpmc);
 66 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
 67 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
 68 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
 69 #else
 70 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
 71 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
 72 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
 73 #endif
 74 
 75 /*
 76  * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR
 77  * accessors and should not have any tracing or other functionality piggybacking
 78  * on them - those are *purely* for accessing MSRs and nothing more. So don't even
 79  * think of extending them - you will be slapped with a stinking trout or a frozen
 80  * shark will reach you, wherever you are! You've been warned.
 81  */
 82 static __always_inline unsigned long long __rdmsr(unsigned int msr)
 83 {
 84         DECLARE_ARGS(val, low, high);
 85 
 86         asm volatile("1: rdmsr\n"
 87                      "2:\n"
 88                      _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR)
 89                      : EAX_EDX_RET(val, low, high) : "c" (msr));
 90 
 91         return EAX_EDX_VAL(val, low, high);
 92 }
 93 
 94 static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
 95 {
 96         asm volatile("1: wrmsr\n"
 97                      "2:\n"
 98                      _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
 99                      : : "c" (msr), "a"(low), "d" (high) : "memory");
100 }
101 
102 /*
103  * WRMSRNS behaves exactly like WRMSR with the only difference being
104  * that it is not a serializing instruction by default.
105  */
106 static __always_inline void __wrmsrns(u32 msr, u32 low, u32 high)
107 {
108         /* Instruction opcode for WRMSRNS; supported in binutils >= 2.40. */
109         asm volatile("1: .byte 0x0f,0x01,0xc6\n"
110                      "2:\n"
111                      _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR)
112                      : : "c" (msr), "a"(low), "d" (high));
113 }
114 
115 #define native_rdmsr(msr, val1, val2)                   \
116 do {                                                    \
117         u64 __val = __rdmsr((msr));                     \
118         (void)((val1) = (u32)__val);                    \
119         (void)((val2) = (u32)(__val >> 32));            \
120 } while (0)
121 
122 #define native_wrmsr(msr, low, high)                    \
123         __wrmsr(msr, low, high)
124 
125 #define native_wrmsrl(msr, val)                         \
126         __wrmsr((msr), (u32)((u64)(val)),               \
127                        (u32)((u64)(val) >> 32))
128 
129 static inline unsigned long long native_read_msr(unsigned int msr)
130 {
131         unsigned long long val;
132 
133         val = __rdmsr(msr);
134 
135         if (tracepoint_enabled(read_msr))
136                 do_trace_read_msr(msr, val, 0);
137 
138         return val;
139 }
140 
141 static inline unsigned long long native_read_msr_safe(unsigned int msr,
142                                                       int *err)
143 {
144         DECLARE_ARGS(val, low, high);
145 
146         asm volatile("1: rdmsr ; xor %[err],%[err]\n"
147                      "2:\n\t"
148                      _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
149                      : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
150                      : "c" (msr));
151         if (tracepoint_enabled(read_msr))
152                 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
153         return EAX_EDX_VAL(val, low, high);
154 }
155 
156 /* Can be uninlined because referenced by paravirt */
157 static inline void notrace
158 native_write_msr(unsigned int msr, u32 low, u32 high)
159 {
160         __wrmsr(msr, low, high);
161 
162         if (tracepoint_enabled(write_msr))
163                 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
164 }
165 
166 /* Can be uninlined because referenced by paravirt */
167 static inline int notrace
168 native_write_msr_safe(unsigned int msr, u32 low, u32 high)
169 {
170         int err;
171 
172         asm volatile("1: wrmsr ; xor %[err],%[err]\n"
173                      "2:\n\t"
174                      _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
175                      : [err] "=a" (err)
176                      : "c" (msr), "" (low), "d" (high)
177                      : "memory");
178         if (tracepoint_enabled(write_msr))
179                 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
180         return err;
181 }
182 
183 extern int rdmsr_safe_regs(u32 regs[8]);
184 extern int wrmsr_safe_regs(u32 regs[8]);
185 
186 /**
187  * rdtsc() - returns the current TSC without ordering constraints
188  *
189  * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
190  * only ordering constraint it supplies is the ordering implied by
191  * "asm volatile": it will put the RDTSC in the place you expect.  The
192  * CPU can and will speculatively execute that RDTSC, though, so the
193  * results can be non-monotonic if compared on different CPUs.
194  */
195 static __always_inline unsigned long long rdtsc(void)
196 {
197         DECLARE_ARGS(val, low, high);
198 
199         asm volatile("rdtsc" : EAX_EDX_RET(val, low, high));
200 
201         return EAX_EDX_VAL(val, low, high);
202 }
203 
204 /**
205  * rdtsc_ordered() - read the current TSC in program order
206  *
207  * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
208  * It is ordered like a load to a global in-memory counter.  It should
209  * be impossible to observe non-monotonic rdtsc_unordered() behavior
210  * across multiple CPUs as long as the TSC is synced.
211  */
212 static __always_inline unsigned long long rdtsc_ordered(void)
213 {
214         DECLARE_ARGS(val, low, high);
215 
216         /*
217          * The RDTSC instruction is not ordered relative to memory
218          * access.  The Intel SDM and the AMD APM are both vague on this
219          * point, but empirically an RDTSC instruction can be
220          * speculatively executed before prior loads.  An RDTSC
221          * immediately after an appropriate barrier appears to be
222          * ordered as a normal load, that is, it provides the same
223          * ordering guarantees as reading from a global memory location
224          * that some other imaginary CPU is updating continuously with a
225          * time stamp.
226          *
227          * Thus, use the preferred barrier on the respective CPU, aiming for
228          * RDTSCP as the default.
229          */
230         asm volatile(ALTERNATIVE_2("rdtsc",
231                                    "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC,
232                                    "rdtscp", X86_FEATURE_RDTSCP)
233                         : EAX_EDX_RET(val, low, high)
234                         /* RDTSCP clobbers ECX with MSR_TSC_AUX. */
235                         :: "ecx");
236 
237         return EAX_EDX_VAL(val, low, high);
238 }
239 
240 static inline unsigned long long native_read_pmc(int counter)
241 {
242         DECLARE_ARGS(val, low, high);
243 
244         asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
245         if (tracepoint_enabled(rdpmc))
246                 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
247         return EAX_EDX_VAL(val, low, high);
248 }
249 
250 #ifdef CONFIG_PARAVIRT_XXL
251 #include <asm/paravirt.h>
252 #else
253 #include <linux/errno.h>
254 /*
255  * Access to machine-specific registers (available on 586 and better only)
256  * Note: the rd* operations modify the parameters directly (without using
257  * pointer indirection), this allows gcc to optimize better
258  */
259 
260 #define rdmsr(msr, low, high)                                   \
261 do {                                                            \
262         u64 __val = native_read_msr((msr));                     \
263         (void)((low) = (u32)__val);                             \
264         (void)((high) = (u32)(__val >> 32));                    \
265 } while (0)
266 
267 static inline void wrmsr(unsigned int msr, u32 low, u32 high)
268 {
269         native_write_msr(msr, low, high);
270 }
271 
272 #define rdmsrl(msr, val)                        \
273         ((val) = native_read_msr((msr)))
274 
275 static inline void wrmsrl(unsigned int msr, u64 val)
276 {
277         native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
278 }
279 
280 /* wrmsr with exception handling */
281 static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
282 {
283         return native_write_msr_safe(msr, low, high);
284 }
285 
286 /* rdmsr with exception handling */
287 #define rdmsr_safe(msr, low, high)                              \
288 ({                                                              \
289         int __err;                                              \
290         u64 __val = native_read_msr_safe((msr), &__err);        \
291         (*low) = (u32)__val;                                    \
292         (*high) = (u32)(__val >> 32);                           \
293         __err;                                                  \
294 })
295 
296 static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
297 {
298         int err;
299 
300         *p = native_read_msr_safe(msr, &err);
301         return err;
302 }
303 
304 #define rdpmc(counter, low, high)                       \
305 do {                                                    \
306         u64 _l = native_read_pmc((counter));            \
307         (low)  = (u32)_l;                               \
308         (high) = (u32)(_l >> 32);                       \
309 } while (0)
310 
311 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
312 
313 #endif  /* !CONFIG_PARAVIRT_XXL */
314 
315 static __always_inline void wrmsrns(u32 msr, u64 val)
316 {
317         __wrmsrns(msr, val, val >> 32);
318 }
319 
320 /*
321  * 64-bit version of wrmsr_safe():
322  */
323 static inline int wrmsrl_safe(u32 msr, u64 val)
324 {
325         return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
326 }
327 
328 struct msr __percpu *msrs_alloc(void);
329 void msrs_free(struct msr __percpu *msrs);
330 int msr_set_bit(u32 msr, u8 bit);
331 int msr_clear_bit(u32 msr, u8 bit);
332 
333 #ifdef CONFIG_SMP
334 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
335 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
336 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
337 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
338 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
339 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
340 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
341 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
342 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
343 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
344 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
345 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
346 #else  /*  CONFIG_SMP  */
347 static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
348 {
349         rdmsr(msr_no, *l, *h);
350         return 0;
351 }
352 static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
353 {
354         wrmsr(msr_no, l, h);
355         return 0;
356 }
357 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
358 {
359         rdmsrl(msr_no, *q);
360         return 0;
361 }
362 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
363 {
364         wrmsrl(msr_no, q);
365         return 0;
366 }
367 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
368                                 struct msr __percpu *msrs)
369 {
370         rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
371 }
372 static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
373                                 struct msr __percpu *msrs)
374 {
375         wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
376 }
377 static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
378                                     u32 *l, u32 *h)
379 {
380         return rdmsr_safe(msr_no, l, h);
381 }
382 static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
383 {
384         return wrmsr_safe(msr_no, l, h);
385 }
386 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
387 {
388         return rdmsrl_safe(msr_no, q);
389 }
390 static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
391 {
392         return wrmsrl_safe(msr_no, q);
393 }
394 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
395 {
396         return rdmsr_safe_regs(regs);
397 }
398 static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
399 {
400         return wrmsr_safe_regs(regs);
401 }
402 #endif  /* CONFIG_SMP */
403 #endif /* __ASSEMBLY__ */
404 #endif /* _ASM_X86_MSR_H */
405 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php