~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/kmsan.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * x86 KMSAN support.
  4  *
  5  * Copyright (C) 2022, Google LLC
  6  * Author: Alexander Potapenko <glider@google.com>
  7  */
  8 
  9 #ifndef _ASM_X86_KMSAN_H
 10 #define _ASM_X86_KMSAN_H
 11 
 12 #ifndef MODULE
 13 
 14 #include <asm/cpu_entry_area.h>
 15 #include <asm/processor.h>
 16 #include <linux/mmzone.h>
 17 
 18 DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow);
 19 DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin);
 20 
 21 /*
 22  * Functions below are declared in the header to make sure they are inlined.
 23  * They all are called from kmsan_get_metadata() for every memory access in
 24  * the kernel, so speed is important here.
 25  */
 26 
 27 /*
 28  * Compute metadata addresses for the CPU entry area on x86.
 29  */
 30 static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
 31 {
 32         unsigned long addr64 = (unsigned long)addr;
 33         char *metadata_array;
 34         unsigned long off;
 35         int cpu;
 36 
 37         if ((addr64 < CPU_ENTRY_AREA_BASE) ||
 38             (addr64 >= (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE)))
 39                 return NULL;
 40         cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE;
 41         off = addr64 - (unsigned long)get_cpu_entry_area(cpu);
 42         if ((off < 0) || (off >= CPU_ENTRY_AREA_SIZE))
 43                 return NULL;
 44         metadata_array = is_origin ? cpu_entry_area_origin :
 45                                      cpu_entry_area_shadow;
 46         return &per_cpu(metadata_array[off], cpu);
 47 }
 48 
 49 /*
 50  * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version.
 51  */
 52 static inline bool kmsan_phys_addr_valid(unsigned long addr)
 53 {
 54         if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
 55                 return !(addr >> boot_cpu_data.x86_phys_bits);
 56         else
 57                 return true;
 58 }
 59 
 60 /*
 61  * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version.
 62  */
 63 static inline bool kmsan_virt_addr_valid(void *addr)
 64 {
 65         unsigned long x = (unsigned long)addr;
 66         unsigned long y = x - __START_KERNEL_map;
 67         bool ret;
 68 
 69         /* use the carry flag to determine if x was < __START_KERNEL_map */
 70         if (unlikely(x > y)) {
 71                 x = y + phys_base;
 72 
 73                 if (y >= KERNEL_IMAGE_SIZE)
 74                         return false;
 75         } else {
 76                 x = y + (__START_KERNEL_map - PAGE_OFFSET);
 77 
 78                 /* carry flag will be set if starting x was >= PAGE_OFFSET */
 79                 if ((x > y) || !kmsan_phys_addr_valid(x))
 80                         return false;
 81         }
 82 
 83         /*
 84          * pfn_valid() relies on RCU, and may call into the scheduler on exiting
 85          * the critical section. However, this would result in recursion with
 86          * KMSAN. Therefore, disable preemption here, and re-enable preemption
 87          * below while suppressing reschedules to avoid recursion.
 88          *
 89          * Note, this sacrifices occasionally breaking scheduling guarantees.
 90          * Although, a kernel compiled with KMSAN has already given up on any
 91          * performance guarantees due to being heavily instrumented.
 92          */
 93         preempt_disable();
 94         ret = pfn_valid(x >> PAGE_SHIFT);
 95         preempt_enable_no_resched();
 96 
 97         return ret;
 98 }
 99 
100 #endif /* !MODULE */
101 
102 #endif /* _ASM_X86_KMSAN_H */
103 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php