~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/mte.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2020 ARM Ltd.
  4  */
  5 #ifndef __ASM_MTE_H
  6 #define __ASM_MTE_H
  7 
  8 #include <asm/compiler.h>
  9 #include <asm/mte-def.h>
 10 
 11 #ifndef __ASSEMBLY__
 12 
 13 #include <linux/bitfield.h>
 14 #include <linux/kasan-enabled.h>
 15 #include <linux/page-flags.h>
 16 #include <linux/sched.h>
 17 #include <linux/types.h>
 18 
 19 #include <asm/pgtable-types.h>
 20 
 21 void mte_clear_page_tags(void *addr);
 22 unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
 23                                       unsigned long n);
 24 unsigned long mte_copy_tags_to_user(void __user *to, void *from,
 25                                     unsigned long n);
 26 int mte_save_tags(struct page *page);
 27 void mte_save_page_tags(const void *page_addr, void *tag_storage);
 28 void mte_restore_tags(swp_entry_t entry, struct page *page);
 29 void mte_restore_page_tags(void *page_addr, const void *tag_storage);
 30 void mte_invalidate_tags(int type, pgoff_t offset);
 31 void mte_invalidate_tags_area(int type);
 32 void *mte_allocate_tag_storage(void);
 33 void mte_free_tag_storage(char *storage);
 34 
 35 #ifdef CONFIG_ARM64_MTE
 36 
 37 /* track which pages have valid allocation tags */
 38 #define PG_mte_tagged   PG_arch_2
 39 /* simple lock to avoid multiple threads tagging the same page */
 40 #define PG_mte_lock     PG_arch_3
 41 
 42 static inline void set_page_mte_tagged(struct page *page)
 43 {
 44         /*
 45          * Ensure that the tags written prior to this function are visible
 46          * before the page flags update.
 47          */
 48         smp_wmb();
 49         set_bit(PG_mte_tagged, &page->flags);
 50 }
 51 
 52 static inline bool page_mte_tagged(struct page *page)
 53 {
 54         bool ret = test_bit(PG_mte_tagged, &page->flags);
 55 
 56         /*
 57          * If the page is tagged, ensure ordering with a likely subsequent
 58          * read of the tags.
 59          */
 60         if (ret)
 61                 smp_rmb();
 62         return ret;
 63 }
 64 
 65 /*
 66  * Lock the page for tagging and return 'true' if the page can be tagged,
 67  * 'false' if already tagged. PG_mte_tagged is never cleared and therefore the
 68  * locking only happens once for page initialisation.
 69  *
 70  * The page MTE lock state:
 71  *
 72  *   Locked:    PG_mte_lock && !PG_mte_tagged
 73  *   Unlocked:  !PG_mte_lock || PG_mte_tagged
 74  *
 75  * Acquire semantics only if the page is tagged (returning 'false').
 76  */
 77 static inline bool try_page_mte_tagging(struct page *page)
 78 {
 79         if (!test_and_set_bit(PG_mte_lock, &page->flags))
 80                 return true;
 81 
 82         /*
 83          * The tags are either being initialised or may have been initialised
 84          * already. Check if the PG_mte_tagged flag has been set or wait
 85          * otherwise.
 86          */
 87         smp_cond_load_acquire(&page->flags, VAL & (1UL << PG_mte_tagged));
 88 
 89         return false;
 90 }
 91 
 92 void mte_zero_clear_page_tags(void *addr);
 93 void mte_sync_tags(pte_t pte, unsigned int nr_pages);
 94 void mte_copy_page_tags(void *kto, const void *kfrom);
 95 void mte_thread_init_user(void);
 96 void mte_thread_switch(struct task_struct *next);
 97 void mte_cpu_setup(void);
 98 void mte_suspend_enter(void);
 99 void mte_suspend_exit(void);
100 long set_mte_ctrl(struct task_struct *task, unsigned long arg);
101 long get_mte_ctrl(struct task_struct *task);
102 int mte_ptrace_copy_tags(struct task_struct *child, long request,
103                          unsigned long addr, unsigned long data);
104 size_t mte_probe_user_range(const char __user *uaddr, size_t size);
105 
106 #else /* CONFIG_ARM64_MTE */
107 
108 /* unused if !CONFIG_ARM64_MTE, silence the compiler */
109 #define PG_mte_tagged   0
110 
111 static inline void set_page_mte_tagged(struct page *page)
112 {
113 }
114 static inline bool page_mte_tagged(struct page *page)
115 {
116         return false;
117 }
118 static inline bool try_page_mte_tagging(struct page *page)
119 {
120         return false;
121 }
122 static inline void mte_zero_clear_page_tags(void *addr)
123 {
124 }
125 static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages)
126 {
127 }
128 static inline void mte_copy_page_tags(void *kto, const void *kfrom)
129 {
130 }
131 static inline void mte_thread_init_user(void)
132 {
133 }
134 static inline void mte_thread_switch(struct task_struct *next)
135 {
136 }
137 static inline void mte_suspend_enter(void)
138 {
139 }
140 static inline void mte_suspend_exit(void)
141 {
142 }
143 static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
144 {
145         return 0;
146 }
147 static inline long get_mte_ctrl(struct task_struct *task)
148 {
149         return 0;
150 }
151 static inline int mte_ptrace_copy_tags(struct task_struct *child,
152                                        long request, unsigned long addr,
153                                        unsigned long data)
154 {
155         return -EIO;
156 }
157 
158 #endif /* CONFIG_ARM64_MTE */
159 
160 static inline void mte_disable_tco_entry(struct task_struct *task)
161 {
162         if (!system_supports_mte())
163                 return;
164 
165         /*
166          * Re-enable tag checking (TCO set on exception entry). This is only
167          * necessary if MTE is enabled in either the kernel or the userspace
168          * task in synchronous or asymmetric mode (SCTLR_EL1.TCF0 bit 0 is set
169          * for both). With MTE disabled in the kernel and disabled or
170          * asynchronous in userspace, tag check faults (including in uaccesses)
171          * are not reported, therefore there is no need to re-enable checking.
172          * This is beneficial on microarchitectures where re-enabling TCO is
173          * expensive.
174          */
175         if (kasan_hw_tags_enabled() ||
176             (task->thread.sctlr_user & (1UL << SCTLR_EL1_TCF0_SHIFT)))
177                 asm volatile(SET_PSTATE_TCO(0));
178 }
179 
180 #ifdef CONFIG_KASAN_HW_TAGS
181 void mte_check_tfsr_el1(void);
182 
183 static inline void mte_check_tfsr_entry(void)
184 {
185         if (!kasan_hw_tags_enabled())
186                 return;
187 
188         mte_check_tfsr_el1();
189 }
190 
191 static inline void mte_check_tfsr_exit(void)
192 {
193         if (!kasan_hw_tags_enabled())
194                 return;
195 
196         /*
197          * The asynchronous faults are sync'ed automatically with
198          * TFSR_EL1 on kernel entry but for exit an explicit dsb()
199          * is required.
200          */
201         dsb(nsh);
202         isb();
203 
204         mte_check_tfsr_el1();
205 }
206 #else
207 static inline void mte_check_tfsr_el1(void)
208 {
209 }
210 static inline void mte_check_tfsr_entry(void)
211 {
212 }
213 static inline void mte_check_tfsr_exit(void)
214 {
215 }
216 #endif /* CONFIG_KASAN_HW_TAGS */
217 
218 #endif /* __ASSEMBLY__ */
219 #endif /* __ASM_MTE_H  */
220 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php