~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/kernel/ftrace.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Dynamic function tracer architecture backend.
  4  *
  5  * Copyright IBM Corp. 2009,2014
  6  *
  7  *   Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  8  */
  9 
 10 #include <linux/hardirq.h>
 11 #include <linux/uaccess.h>
 12 #include <linux/ftrace.h>
 13 #include <linux/kernel.h>
 14 #include <linux/types.h>
 15 #include <linux/kmsan-checks.h>
 16 #include <linux/kprobes.h>
 17 #include <linux/execmem.h>
 18 #include <trace/syscall.h>
 19 #include <asm/asm-offsets.h>
 20 #include <asm/text-patching.h>
 21 #include <asm/cacheflush.h>
 22 #include <asm/ftrace.lds.h>
 23 #include <asm/nospec-branch.h>
 24 #include <asm/set_memory.h>
 25 #include "entry.h"
 26 #include "ftrace.h"
 27 
 28 /*
 29  * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
 30  * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
 31  * (since gcc 9 / clang 10) is used.
 32  * In both cases the original and also the disabled function prologue contains
 33  * only a single six byte instruction and looks like this:
 34  * >    brcl    0,0                     # offset 0
 35  * To enable ftrace the code gets patched like above and afterwards looks
 36  * like this:
 37  * >    brasl   %r0,ftrace_caller       # offset 0
 38  *
 39  * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
 40  * The ftrace function gets called with a non-standard C function call ABI
 41  * where r0 contains the return address. It is also expected that the called
 42  * function only clobbers r0 and r1, but restores r2-r15.
 43  * For module code we can't directly jump to ftrace caller, but need a
 44  * trampoline (ftrace_plt), which clobbers also r1.
 45  */
 46 
 47 void *ftrace_func __read_mostly = ftrace_stub;
 48 struct ftrace_insn {
 49         u16 opc;
 50         s32 disp;
 51 } __packed;
 52 
 53 #ifdef CONFIG_MODULES
 54 static char *ftrace_plt;
 55 #endif /* CONFIG_MODULES */
 56 
 57 static const char *ftrace_shared_hotpatch_trampoline(const char **end)
 58 {
 59         const char *tstart, *tend;
 60 
 61         tstart = ftrace_shared_hotpatch_trampoline_br;
 62         tend = ftrace_shared_hotpatch_trampoline_br_end;
 63 #ifdef CONFIG_EXPOLINE
 64         if (!nospec_disable) {
 65                 tstart = ftrace_shared_hotpatch_trampoline_exrl;
 66                 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
 67         }
 68 #endif /* CONFIG_EXPOLINE */
 69         if (end)
 70                 *end = tend;
 71         return tstart;
 72 }
 73 
 74 bool ftrace_need_init_nop(void)
 75 {
 76         return true;
 77 }
 78 
 79 int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 80 {
 81         static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
 82                 __ftrace_hotpatch_trampolines_start;
 83         static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
 84         static struct ftrace_hotpatch_trampoline *trampoline;
 85         struct ftrace_hotpatch_trampoline **next_trampoline;
 86         struct ftrace_hotpatch_trampoline *trampolines_end;
 87         struct ftrace_hotpatch_trampoline tmp;
 88         struct ftrace_insn *insn;
 89         const char *shared;
 90         s32 disp;
 91 
 92         BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
 93                      SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
 94 
 95         next_trampoline = &next_vmlinux_trampoline;
 96         trampolines_end = __ftrace_hotpatch_trampolines_end;
 97         shared = ftrace_shared_hotpatch_trampoline(NULL);
 98 #ifdef CONFIG_MODULES
 99         if (mod) {
100                 next_trampoline = &mod->arch.next_trampoline;
101                 trampolines_end = mod->arch.trampolines_end;
102                 shared = ftrace_plt;
103         }
104 #endif
105 
106         if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
107                 return -ENOMEM;
108         trampoline = (*next_trampoline)++;
109 
110         /* Check for the compiler-generated fentry nop (brcl 0, .). */
111         if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
112                 return -EINVAL;
113 
114         /* Generate the trampoline. */
115         tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
116         tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
117         tmp.interceptor = FTRACE_ADDR;
118         tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
119         s390_kernel_write(trampoline, &tmp, sizeof(tmp));
120 
121         /* Generate a jump to the trampoline. */
122         disp = ((char *)trampoline - (char *)rec->ip) / 2;
123         insn = (struct ftrace_insn *)rec->ip;
124         s390_kernel_write(&insn->disp, &disp, sizeof(disp));
125 
126         return 0;
127 }
128 
129 static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
130 {
131         struct ftrace_hotpatch_trampoline *trampoline;
132         struct ftrace_insn insn;
133         s64 disp;
134         u16 opc;
135 
136         if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
137                 return ERR_PTR(-EFAULT);
138         disp = (s64)insn.disp * 2;
139         trampoline = (void *)(rec->ip + disp);
140         if (get_kernel_nofault(opc, &trampoline->brasl_opc))
141                 return ERR_PTR(-EFAULT);
142         if (opc != 0xc015)
143                 return ERR_PTR(-EINVAL);
144         return trampoline;
145 }
146 
147 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
148                        unsigned long addr)
149 {
150         struct ftrace_hotpatch_trampoline *trampoline;
151         u64 old;
152 
153         trampoline = ftrace_get_trampoline(rec);
154         if (IS_ERR(trampoline))
155                 return PTR_ERR(trampoline);
156         if (get_kernel_nofault(old, &trampoline->interceptor))
157                 return -EFAULT;
158         if (old != old_addr)
159                 return -EINVAL;
160         s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
161         return 0;
162 }
163 
164 static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
165 {
166         u16 old;
167         u8 op;
168 
169         if (get_kernel_nofault(old, addr))
170                 return -EFAULT;
171         if (old != expected)
172                 return -EINVAL;
173         /* set mask field to all ones or zeroes */
174         op = enable ? 0xf4 : 0x04;
175         s390_kernel_write((char *)addr + 1, &op, sizeof(op));
176         return 0;
177 }
178 
179 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
180                     unsigned long addr)
181 {
182         /* Expect brcl 0xf,... */
183         return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
184 }
185 
186 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
187 {
188         struct ftrace_hotpatch_trampoline *trampoline;
189 
190         trampoline = ftrace_get_trampoline(rec);
191         if (IS_ERR(trampoline))
192                 return PTR_ERR(trampoline);
193         s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
194         /* Expect brcl 0x0,... */
195         return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
196 }
197 
198 int ftrace_update_ftrace_func(ftrace_func_t func)
199 {
200         ftrace_func = func;
201         return 0;
202 }
203 
204 void arch_ftrace_update_code(int command)
205 {
206         ftrace_modify_all_code(command);
207 }
208 
209 void ftrace_arch_code_modify_post_process(void)
210 {
211         /*
212          * Flush any pre-fetched instructions on all
213          * CPUs to make the new code visible.
214          */
215         text_poke_sync_lock();
216 }
217 
218 #ifdef CONFIG_MODULES
219 
220 static int __init ftrace_plt_init(void)
221 {
222         const char *start, *end;
223 
224         ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE);
225         if (!ftrace_plt)
226                 panic("cannot allocate ftrace plt\n");
227 
228         start = ftrace_shared_hotpatch_trampoline(&end);
229         memcpy(ftrace_plt, start, end - start);
230         set_memory_rox((unsigned long)ftrace_plt, 1);
231         return 0;
232 }
233 device_initcall(ftrace_plt_init);
234 
235 #endif /* CONFIG_MODULES */
236 
237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
238 /*
239  * Hook the return address and push it in the stack of return addresses
240  * in current thread info.
241  */
242 unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
243                                     unsigned long ip)
244 {
245         if (unlikely(ftrace_graph_is_dead()))
246                 goto out;
247         if (unlikely(atomic_read(&current->tracing_graph_pause)))
248                 goto out;
249         ip -= MCOUNT_INSN_SIZE;
250         if (!function_graph_enter(ra, ip, 0, (void *) sp))
251                 ra = (unsigned long) return_to_handler;
252 out:
253         return ra;
254 }
255 NOKPROBE_SYMBOL(prepare_ftrace_return);
256 
257 /*
258  * Patch the kernel code at ftrace_graph_caller location. The instruction
259  * there is branch relative on condition. To enable the ftrace graph code
260  * block, we simply patch the mask field of the instruction to zero and
261  * turn the instruction into a nop.
262  * To disable the ftrace graph code the mask field will be patched to
263  * all ones, which turns the instruction into an unconditional branch.
264  */
265 int ftrace_enable_ftrace_graph_caller(void)
266 {
267         int rc;
268 
269         /* Expect brc 0xf,... */
270         rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
271         if (rc)
272                 return rc;
273         text_poke_sync_lock();
274         return 0;
275 }
276 
277 int ftrace_disable_ftrace_graph_caller(void)
278 {
279         int rc;
280 
281         /* Expect brc 0x0,... */
282         rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
283         if (rc)
284                 return rc;
285         text_poke_sync_lock();
286         return 0;
287 }
288 
289 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
290 
291 #ifdef CONFIG_KPROBES_ON_FTRACE
292 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
293                 struct ftrace_ops *ops, struct ftrace_regs *fregs)
294 {
295         struct kprobe_ctlblk *kcb;
296         struct pt_regs *regs;
297         struct kprobe *p;
298         int bit;
299 
300         if (unlikely(kprobe_ftrace_disabled))
301                 return;
302 
303         bit = ftrace_test_recursion_trylock(ip, parent_ip);
304         if (bit < 0)
305                 return;
306 
307         kmsan_unpoison_memory(fregs, sizeof(*fregs));
308         regs = ftrace_get_regs(fregs);
309         p = get_kprobe((kprobe_opcode_t *)ip);
310         if (!regs || unlikely(!p) || kprobe_disabled(p))
311                 goto out;
312 
313         if (kprobe_running()) {
314                 kprobes_inc_nmissed_count(p);
315                 goto out;
316         }
317 
318         __this_cpu_write(current_kprobe, p);
319 
320         kcb = get_kprobe_ctlblk();
321         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
322 
323         instruction_pointer_set(regs, ip);
324 
325         if (!p->pre_handler || !p->pre_handler(p, regs)) {
326 
327                 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
328 
329                 if (unlikely(p->post_handler)) {
330                         kcb->kprobe_status = KPROBE_HIT_SSDONE;
331                         p->post_handler(p, regs, 0);
332                 }
333         }
334         __this_cpu_write(current_kprobe, NULL);
335 out:
336         ftrace_test_recursion_unlock(bit);
337 }
338 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
339 
340 int arch_prepare_kprobe_ftrace(struct kprobe *p)
341 {
342         p->ainsn.insn = NULL;
343         return 0;
344 }
345 #endif
346 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php