~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kernel/patch.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2020 SiFive
  4  */
  5 
  6 #include <linux/spinlock.h>
  7 #include <linux/mm.h>
  8 #include <linux/memory.h>
  9 #include <linux/string.h>
 10 #include <linux/uaccess.h>
 11 #include <linux/stop_machine.h>
 12 #include <asm/kprobes.h>
 13 #include <asm/cacheflush.h>
 14 #include <asm/fixmap.h>
 15 #include <asm/ftrace.h>
 16 #include <asm/patch.h>
 17 #include <asm/sections.h>
 18 
 19 struct patch_insn {
 20         void *addr;
 21         u32 *insns;
 22         size_t len;
 23         atomic_t cpu_count;
 24 };
 25 
 26 int riscv_patch_in_stop_machine = false;
 27 
 28 #ifdef CONFIG_MMU
 29 
 30 static inline bool is_kernel_exittext(uintptr_t addr)
 31 {
 32         return system_state < SYSTEM_RUNNING &&
 33                 addr >= (uintptr_t)__exittext_begin &&
 34                 addr < (uintptr_t)__exittext_end;
 35 }
 36 
 37 /*
 38  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
 39  * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
 40  * So use '__always_inline' and 'const unsigned int fixmap' here.
 41  */
 42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
 43 {
 44         uintptr_t uintaddr = (uintptr_t) addr;
 45         struct page *page;
 46 
 47         if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
 48                 page = phys_to_page(__pa_symbol(addr));
 49         else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
 50                 page = vmalloc_to_page(addr);
 51         else
 52                 return addr;
 53 
 54         BUG_ON(!page);
 55 
 56         return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
 57                                          offset_in_page(addr));
 58 }
 59 
 60 static void patch_unmap(int fixmap)
 61 {
 62         clear_fixmap(fixmap);
 63 }
 64 NOKPROBE_SYMBOL(patch_unmap);
 65 
 66 static int __patch_insn_set(void *addr, u8 c, size_t len)
 67 {
 68         bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
 69         void *waddr = addr;
 70 
 71         /*
 72          * Only two pages can be mapped at a time for writing.
 73          */
 74         if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
 75                 return -EINVAL;
 76         /*
 77          * Before reaching here, it was expected to lock the text_mutex
 78          * already, so we don't need to give another lock here and could
 79          * ensure that it was safe between each cores.
 80          */
 81         lockdep_assert_held(&text_mutex);
 82 
 83         preempt_disable();
 84 
 85         if (across_pages)
 86                 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
 87 
 88         waddr = patch_map(addr, FIX_TEXT_POKE0);
 89 
 90         memset(waddr, c, len);
 91 
 92         /*
 93          * We could have just patched a function that is about to be
 94          * called so make sure we don't execute partially patched
 95          * instructions by flushing the icache as soon as possible.
 96          */
 97         local_flush_icache_range((unsigned long)waddr,
 98                                  (unsigned long)waddr + len);
 99 
100         patch_unmap(FIX_TEXT_POKE0);
101 
102         if (across_pages)
103                 patch_unmap(FIX_TEXT_POKE1);
104 
105         preempt_enable();
106 
107         return 0;
108 }
109 NOKPROBE_SYMBOL(__patch_insn_set);
110 
111 static int __patch_insn_write(void *addr, const void *insn, size_t len)
112 {
113         bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE;
114         void *waddr = addr;
115         int ret;
116 
117         /*
118          * Only two pages can be mapped at a time for writing.
119          */
120         if (len + offset_in_page(addr) > 2 * PAGE_SIZE)
121                 return -EINVAL;
122 
123         /*
124          * Before reaching here, it was expected to lock the text_mutex
125          * already, so we don't need to give another lock here and could
126          * ensure that it was safe between each cores.
127          *
128          * We're currently using stop_machine() for ftrace & kprobes, and while
129          * that ensures text_mutex is held before installing the mappings it
130          * does not ensure text_mutex is held by the calling thread.  That's
131          * safe but triggers a lockdep failure, so just elide it for that
132          * specific case.
133          */
134         if (!riscv_patch_in_stop_machine)
135                 lockdep_assert_held(&text_mutex);
136 
137         preempt_disable();
138 
139         if (across_pages)
140                 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
141 
142         waddr = patch_map(addr, FIX_TEXT_POKE0);
143 
144         ret = copy_to_kernel_nofault(waddr, insn, len);
145 
146         /*
147          * We could have just patched a function that is about to be
148          * called so make sure we don't execute partially patched
149          * instructions by flushing the icache as soon as possible.
150          */
151         local_flush_icache_range((unsigned long)waddr,
152                                  (unsigned long)waddr + len);
153 
154         patch_unmap(FIX_TEXT_POKE0);
155 
156         if (across_pages)
157                 patch_unmap(FIX_TEXT_POKE1);
158 
159         preempt_enable();
160 
161         return ret;
162 }
163 NOKPROBE_SYMBOL(__patch_insn_write);
164 #else
165 static int __patch_insn_set(void *addr, u8 c, size_t len)
166 {
167         memset(addr, c, len);
168 
169         return 0;
170 }
171 NOKPROBE_SYMBOL(__patch_insn_set);
172 
173 static int __patch_insn_write(void *addr, const void *insn, size_t len)
174 {
175         return copy_to_kernel_nofault(addr, insn, len);
176 }
177 NOKPROBE_SYMBOL(__patch_insn_write);
178 #endif /* CONFIG_MMU */
179 
180 static int patch_insn_set(void *addr, u8 c, size_t len)
181 {
182         size_t size;
183         int ret;
184 
185         /*
186          * __patch_insn_set() can only work on 2 pages at a time so call it in a
187          * loop with len <= 2 * PAGE_SIZE.
188          */
189         while (len) {
190                 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
191                 ret = __patch_insn_set(addr, c, size);
192                 if (ret)
193                         return ret;
194 
195                 addr += size;
196                 len -= size;
197         }
198 
199         return 0;
200 }
201 NOKPROBE_SYMBOL(patch_insn_set);
202 
203 int patch_text_set_nosync(void *addr, u8 c, size_t len)
204 {
205         int ret;
206 
207         ret = patch_insn_set(addr, c, len);
208         if (!ret)
209                 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
210 
211         return ret;
212 }
213 NOKPROBE_SYMBOL(patch_text_set_nosync);
214 
215 int patch_insn_write(void *addr, const void *insn, size_t len)
216 {
217         size_t size;
218         int ret;
219 
220         /*
221          * Copy the instructions to the destination address, two pages at a time
222          * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE.
223          */
224         while (len) {
225                 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr));
226                 ret = __patch_insn_write(addr, insn, size);
227                 if (ret)
228                         return ret;
229 
230                 addr += size;
231                 insn += size;
232                 len -= size;
233         }
234 
235         return 0;
236 }
237 NOKPROBE_SYMBOL(patch_insn_write);
238 
239 int patch_text_nosync(void *addr, const void *insns, size_t len)
240 {
241         int ret;
242 
243         ret = patch_insn_write(addr, insns, len);
244         if (!ret)
245                 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len);
246 
247         return ret;
248 }
249 NOKPROBE_SYMBOL(patch_text_nosync);
250 
251 static int patch_text_cb(void *data)
252 {
253         struct patch_insn *patch = data;
254         int ret = 0;
255 
256         if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
257                 ret = patch_insn_write(patch->addr, patch->insns, patch->len);
258                 /*
259                  * Make sure the patching store is effective *before* we
260                  * increment the counter which releases all waiting CPUs
261                  * by using the release variant of atomic increment. The
262                  * release pairs with the call to local_flush_icache_all()
263                  * on the waiting CPU.
264                  */
265                 atomic_inc_return_release(&patch->cpu_count);
266         } else {
267                 while (atomic_read(&patch->cpu_count) <= num_online_cpus())
268                         cpu_relax();
269 
270                 local_flush_icache_all();
271         }
272 
273         return ret;
274 }
275 NOKPROBE_SYMBOL(patch_text_cb);
276 
277 int patch_text(void *addr, u32 *insns, size_t len)
278 {
279         int ret;
280         struct patch_insn patch = {
281                 .addr = addr,
282                 .insns = insns,
283                 .len = len,
284                 .cpu_count = ATOMIC_INIT(0),
285         };
286 
287         /*
288          * kprobes takes text_mutex, before calling patch_text(), but as we call
289          * calls stop_machine(), the lockdep assertion in patch_insn_write()
290          * gets confused by the context in which the lock is taken.
291          * Instead, ensure the lock is held before calling stop_machine(), and
292          * set riscv_patch_in_stop_machine to skip the check in
293          * patch_insn_write().
294          */
295         lockdep_assert_held(&text_mutex);
296         riscv_patch_in_stop_machine = true;
297         ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
298         riscv_patch_in_stop_machine = false;
299         return ret;
300 }
301 NOKPROBE_SYMBOL(patch_text);
302 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php