~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/optprobes.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Code for Kernel probes Jump optimization.
  4  *
  5  * Copyright 2017, Anju T, IBM Corp.
  6  */
  7 
  8 #include <linux/kprobes.h>
  9 #include <linux/jump_label.h>
 10 #include <linux/types.h>
 11 #include <linux/slab.h>
 12 #include <linux/list.h>
 13 #include <asm/kprobes.h>
 14 #include <asm/ptrace.h>
 15 #include <asm/cacheflush.h>
 16 #include <asm/code-patching.h>
 17 #include <asm/sstep.h>
 18 #include <asm/ppc-opcode.h>
 19 #include <asm/inst.h>
 20 
 21 #define TMPL_CALL_HDLR_IDX      (optprobe_template_call_handler - optprobe_template_entry)
 22 #define TMPL_EMULATE_IDX        (optprobe_template_call_emulate - optprobe_template_entry)
 23 #define TMPL_RET_IDX            (optprobe_template_ret - optprobe_template_entry)
 24 #define TMPL_OP_IDX             (optprobe_template_op_address - optprobe_template_entry)
 25 #define TMPL_INSN_IDX           (optprobe_template_insn - optprobe_template_entry)
 26 #define TMPL_END_IDX            (optprobe_template_end - optprobe_template_entry)
 27 
 28 static bool insn_page_in_use;
 29 
 30 void *alloc_optinsn_page(void)
 31 {
 32         if (insn_page_in_use)
 33                 return NULL;
 34         insn_page_in_use = true;
 35         return &optinsn_slot;
 36 }
 37 
 38 void free_optinsn_page(void *page)
 39 {
 40         insn_page_in_use = false;
 41 }
 42 
 43 /*
 44  * Check if we can optimize this probe. Returns NIP post-emulation if this can
 45  * be optimized and 0 otherwise.
 46  */
 47 static unsigned long can_optimize(struct kprobe *p)
 48 {
 49         struct pt_regs regs;
 50         struct instruction_op op;
 51         unsigned long nip = 0;
 52         unsigned long addr = (unsigned long)p->addr;
 53 
 54         /*
 55          * kprobe placed for kretprobe during boot time
 56          * has a 'nop' instruction, which can be emulated.
 57          * So further checks can be skipped.
 58          */
 59         if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
 60                 return addr + sizeof(kprobe_opcode_t);
 61 
 62         /*
 63          * We only support optimizing kernel addresses, but not
 64          * module addresses.
 65          *
 66          * FIXME: Optimize kprobes placed in module addresses.
 67          */
 68         if (!is_kernel_addr(addr))
 69                 return 0;
 70 
 71         memset(&regs, 0, sizeof(struct pt_regs));
 72         regs.nip = addr;
 73         regs.trap = 0x0;
 74         regs.msr = MSR_KERNEL;
 75 
 76         /*
 77          * Kprobe placed in conditional branch instructions are
 78          * not optimized, as we can't predict the nip prior with
 79          * dummy pt_regs and can not ensure that the return branch
 80          * from detour buffer falls in the range of address (i.e 32MB).
 81          * A branch back from trampoline is set up in the detour buffer
 82          * to the nip returned by the analyse_instr() here.
 83          *
 84          * Ensure that the instruction is not a conditional branch,
 85          * and that can be emulated.
 86          */
 87         if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
 88             analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
 89                 emulate_update_regs(&regs, &op);
 90                 nip = regs.nip;
 91         }
 92 
 93         return nip;
 94 }
 95 
 96 static void optimized_callback(struct optimized_kprobe *op,
 97                                struct pt_regs *regs)
 98 {
 99         /* This is possible if op is under delayed unoptimizing */
100         if (kprobe_disabled(&op->kp))
101                 return;
102 
103         preempt_disable();
104 
105         if (kprobe_running()) {
106                 kprobes_inc_nmissed_count(&op->kp);
107         } else {
108                 __this_cpu_write(current_kprobe, &op->kp);
109                 regs_set_return_ip(regs, (unsigned long)op->kp.addr);
110                 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
111                 opt_pre_handler(&op->kp, regs);
112                 __this_cpu_write(current_kprobe, NULL);
113         }
114 
115         preempt_enable();
116 }
117 NOKPROBE_SYMBOL(optimized_callback);
118 
119 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
120 {
121         if (op->optinsn.insn) {
122                 free_optinsn_slot(op->optinsn.insn, 1);
123                 op->optinsn.insn = NULL;
124         }
125 }
126 
127 static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
128 {
129         patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
130         patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
131 }
132 
133 /*
134  * Generate instructions to load provided immediate 64-bit value
135  * to register 'reg' and patch these instructions at 'addr'.
136  */
137 static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
138 {
139         patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
140         patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
141         patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
142         patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
143         patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
144 }
145 
146 static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
147 {
148         if (IS_ENABLED(CONFIG_PPC64))
149                 patch_imm64_load_insns(val, reg, addr);
150         else
151                 patch_imm32_load_insns(val, reg, addr);
152 }
153 
154 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
155 {
156         ppc_inst_t branch_op_callback, branch_emulate_step, temp;
157         unsigned long op_callback_addr, emulate_step_addr;
158         kprobe_opcode_t *buff;
159         long b_offset;
160         unsigned long nip, size;
161         int rc, i;
162 
163         nip = can_optimize(p);
164         if (!nip)
165                 return -EILSEQ;
166 
167         /* Allocate instruction slot for detour buffer */
168         buff = get_optinsn_slot();
169         if (!buff)
170                 return -ENOMEM;
171 
172         /*
173          * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
174          *
175          * The target address has to be relatively nearby, to permit use
176          * of branch instruction in powerpc, because the address is specified
177          * in an immediate field in the instruction opcode itself, ie 24 bits
178          * in the opcode specify the address. Therefore the address should
179          * be within 32MB on either side of the current instruction.
180          */
181         b_offset = (unsigned long)buff - (unsigned long)p->addr;
182         if (!is_offset_in_branch_range(b_offset))
183                 goto error;
184 
185         /* Check if the return address is also within 32MB range */
186         b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
187         if (!is_offset_in_branch_range(b_offset))
188                 goto error;
189 
190         /* Setup template */
191         /* We can optimize this via patch_instruction_window later */
192         size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
193         pr_devel("Copying template to %p, size %lu\n", buff, size);
194         for (i = 0; i < size; i++) {
195                 rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
196                 if (rc < 0)
197                         goto error;
198         }
199 
200         /*
201          * Fixup the template with instructions to:
202          * 1. load the address of the actual probepoint
203          */
204         patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
205 
206         /*
207          * 2. branch to optimized_callback() and emulate_step()
208          */
209         op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
210         emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
211         if (!op_callback_addr || !emulate_step_addr) {
212                 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
213                 goto error;
214         }
215 
216         rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
217                            op_callback_addr, BRANCH_SET_LINK);
218 
219         rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
220                             emulate_step_addr, BRANCH_SET_LINK);
221 
222         if (rc)
223                 goto error;
224 
225         patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
226         patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
227 
228         /*
229          * 3. load instruction to be emulated into relevant register, and
230          */
231         temp = ppc_inst_read(p->ainsn.insn);
232         patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
233 
234         /*
235          * 4. branch back from trampoline
236          */
237         patch_branch(buff + TMPL_RET_IDX, nip, 0);
238 
239         flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
240 
241         op->optinsn.insn = buff;
242 
243         return 0;
244 
245 error:
246         free_optinsn_slot(buff, 0);
247         return -ERANGE;
248 
249 }
250 
251 int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
252 {
253         return optinsn->insn != NULL;
254 }
255 
256 /*
257  * On powerpc, Optprobes always replaces one instruction (4 bytes
258  * aligned and 4 bytes long). It is impossible to encounter another
259  * kprobe in this address range. So always return 0.
260  */
261 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
262 {
263         return 0;
264 }
265 
266 void arch_optimize_kprobes(struct list_head *oplist)
267 {
268         ppc_inst_t instr;
269         struct optimized_kprobe *op;
270         struct optimized_kprobe *tmp;
271 
272         list_for_each_entry_safe(op, tmp, oplist, list) {
273                 /*
274                  * Backup instructions which will be replaced
275                  * by jump address
276                  */
277                 memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
278                 create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
279                 patch_instruction(op->kp.addr, instr);
280                 list_del_init(&op->list);
281         }
282 }
283 
284 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
285 {
286         arch_arm_kprobe(&op->kp);
287 }
288 
289 void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
290 {
291         struct optimized_kprobe *op;
292         struct optimized_kprobe *tmp;
293 
294         list_for_each_entry_safe(op, tmp, oplist, list) {
295                 arch_unoptimize_kprobe(op);
296                 list_move(&op->list, done_list);
297         }
298 }
299 
300 int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
301 {
302         return (op->kp.addr <= addr &&
303                 op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
304 }
305 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php