1 // SPDX-License-Identifier: GPL-2.0-only !! 1 // SPDX-License-Identifier: GPL-2.0 2 /* !! 2 /* arch/sparc64/kernel/kprobes.c 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Sy !! 3 * >> 4 * Copyright (C) 2004 David S. Miller <davem@davemloft.net> 4 */ 5 */ 5 6 6 #include <linux/types.h> !! 7 #include <linux/kernel.h> 7 #include <linux/kprobes.h> 8 #include <linux/kprobes.h> 8 #include <linux/slab.h> !! 9 #include <linux/extable.h> 9 #include <linux/module.h> << 10 #include <linux/kdebug.h> 10 #include <linux/kdebug.h> 11 #include <linux/sched.h> !! 11 #include <linux/slab.h> 12 #include <linux/uaccess.h> !! 12 #include <linux/context_tracking.h> >> 13 #include <asm/signal.h> 13 #include <asm/cacheflush.h> 14 #include <asm/cacheflush.h> 14 #include <asm/current.h> !! 15 #include <linux/uaccess.h> 15 #include <asm/disasm.h> << 16 16 17 #define MIN_STACK_SIZE(addr) min((unsigned !! 17 /* We do not have hardware single-stepping on sparc64. 18 (unsigned long)current_thread_ !! 18 * So we implement software single-stepping with breakpoint >> 19 * traps. The top-level scheme is similar to that used >> 20 * in the x86 kprobes implementation. >> 21 * >> 22 * In the kprobe->ainsn.insn[] array we store the original >> 23 * instruction at index zero and a break instruction at >> 24 * index one. >> 25 * >> 26 * When we hit a kprobe we: >> 27 * - Run the pre-handler >> 28 * - Remember "regs->tnpc" and interrupt level stored in >> 29 * "regs->tstate" so we can restore them later >> 30 * - Disable PIL interrupts >> 31 * - Set regs->tpc to point to kprobe->ainsn.insn[0] >> 32 * - Set regs->tnpc to point to kprobe->ainsn.insn[1] >> 33 * - Mark that we are actively in a kprobe >> 34 * >> 35 * At this point we wait for the second breakpoint at >> 36 * kprobe->ainsn.insn[1] to hit. When it does we: >> 37 * - Run the post-handler >> 38 * - Set regs->tpc to "remembered" regs->tnpc stored above, >> 39 * restore the PIL interrupt level in "regs->tstate" as well >> 40 * - Make any adjustments necessary to regs->tnpc in order >> 41 * to handle relative branches correctly. See below. >> 42 * - Mark that we are no longer actively in a kprobe. >> 43 */ 19 44 20 DEFINE_PER_CPU(struct kprobe *, current_kprobe 45 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 21 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ct 46 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 22 47 >> 48 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; >> 49 23 int __kprobes arch_prepare_kprobe(struct kprob 50 int __kprobes arch_prepare_kprobe(struct kprobe *p) 24 { 51 { 25 /* Attempt to probe at unaligned addre !! 52 if ((unsigned long) p->addr & 0x3UL) 26 if ((unsigned long)p->addr & 0x01) !! 53 return -EILSEQ; 27 return -EINVAL; << 28 54 29 /* Address should not be in exception !! 55 p->ainsn.insn[0] = *p->addr; >> 56 flushi(&p->ainsn.insn[0]); 30 57 31 p->ainsn.is_short = is_short_instr((un !! 58 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 32 p->opcode = *p->addr; !! 59 flushi(&p->ainsn.insn[1]); 33 60 >> 61 p->opcode = *p->addr; 34 return 0; 62 return 0; 35 } 63 } 36 64 37 void __kprobes arch_arm_kprobe(struct kprobe * 65 void __kprobes arch_arm_kprobe(struct kprobe *p) 38 { 66 { 39 *p->addr = UNIMP_S_INSTRUCTION; !! 67 *p->addr = BREAKPOINT_INSTRUCTION; 40 !! 68 flushi(p->addr); 41 flush_icache_range((unsigned long)p->a << 42 (unsigned long)p->a << 43 } 69 } 44 70 45 void __kprobes arch_disarm_kprobe(struct kprob 71 void __kprobes arch_disarm_kprobe(struct kprobe *p) 46 { 72 { 47 *p->addr = p->opcode; 73 *p->addr = p->opcode; 48 !! 74 flushi(p->addr); 49 flush_icache_range((unsigned long)p->a << 50 (unsigned long)p->a << 51 } << 52 << 53 void __kprobes arch_remove_kprobe(struct kprob << 54 { << 55 arch_disarm_kprobe(p); << 56 << 57 /* Can we remove the kprobe in the mid << 58 if (p->ainsn.t1_addr) { << 59 *(p->ainsn.t1_addr) = p->ainsn << 60 << 61 flush_icache_range((unsigned l << 62 (unsigned l << 63 sizeof(kpro << 64 << 65 p->ainsn.t1_addr = NULL; << 66 } << 67 << 68 if (p->ainsn.t2_addr) { << 69 *(p->ainsn.t2_addr) = p->ainsn << 70 << 71 flush_icache_range((unsigned l << 72 (unsigned l << 73 sizeof(kpro << 74 << 75 p->ainsn.t2_addr = NULL; << 76 } << 77 } 75 } 78 76 79 static void __kprobes save_previous_kprobe(str 77 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 80 { 78 { 81 kcb->prev_kprobe.kp = kprobe_running() 79 kcb->prev_kprobe.kp = kprobe_running(); 82 kcb->prev_kprobe.status = kcb->kprobe_ 80 kcb->prev_kprobe.status = kcb->kprobe_status; >> 81 kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; >> 82 kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; 83 } 83 } 84 84 85 static void __kprobes restore_previous_kprobe( 85 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 86 { 86 { 87 __this_cpu_write(current_kprobe, kcb-> 87 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 88 kcb->kprobe_status = kcb->prev_kprobe. 88 kcb->kprobe_status = kcb->prev_kprobe.status; >> 89 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; >> 90 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; 89 } 91 } 90 92 91 static inline void __kprobes set_current_kprob !! 93 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, >> 94 struct kprobe_ctlblk *kcb) 92 { 95 { 93 __this_cpu_write(current_kprobe, p); 96 __this_cpu_write(current_kprobe, p); >> 97 kcb->kprobe_orig_tnpc = regs->tnpc; >> 98 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 94 } 99 } 95 100 96 static void __kprobes resume_execution(struct !! 101 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, 97 struct !! 102 struct kprobe_ctlblk *kcb) 98 { 103 { 99 /* Remove the trap instructions insert !! 104 regs->tstate |= TSTATE_PIL; 100 * restore the original instructions << 101 */ << 102 if (p->ainsn.t1_addr) { << 103 *(p->ainsn.t1_addr) = p->ainsn << 104 << 105 flush_icache_range((unsigned l << 106 (unsigned l << 107 sizeof(kpro << 108 105 109 p->ainsn.t1_addr = NULL; !! 106 /*single step inline, if it a breakpoint instruction*/ >> 107 if (p->opcode == BREAKPOINT_INSTRUCTION) { >> 108 regs->tpc = (unsigned long) p->addr; >> 109 regs->tnpc = kcb->kprobe_orig_tnpc; >> 110 } else { >> 111 regs->tpc = (unsigned long) &p->ainsn.insn[0]; >> 112 regs->tnpc = (unsigned long) &p->ainsn.insn[1]; 110 } 113 } >> 114 } 111 115 112 if (p->ainsn.t2_addr) { !! 116 static int __kprobes kprobe_handler(struct pt_regs *regs) 113 *(p->ainsn.t2_addr) = p->ainsn !! 117 { >> 118 struct kprobe *p; >> 119 void *addr = (void *) regs->tpc; >> 120 int ret = 0; >> 121 struct kprobe_ctlblk *kcb; 114 122 115 flush_icache_range((unsigned l !! 123 /* 116 (unsigned l !! 124 * We don't want to be preempted for the entire 117 sizeof(kpro !! 125 * duration of kprobe processing >> 126 */ >> 127 preempt_disable(); >> 128 kcb = get_kprobe_ctlblk(); >> 129 >> 130 if (kprobe_running()) { >> 131 p = get_kprobe(addr); >> 132 if (p) { >> 133 if (kcb->kprobe_status == KPROBE_HIT_SS) { >> 134 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | >> 135 kcb->kprobe_orig_tstate_pil); >> 136 goto no_kprobe; >> 137 } >> 138 /* We have reentered the kprobe_handler(), since >> 139 * another probe was hit while within the handler. >> 140 * We here save the original kprobes variables and >> 141 * just single step on the instruction of the new probe >> 142 * without calling any user handlers. >> 143 */ >> 144 save_previous_kprobe(kcb); >> 145 set_current_kprobe(p, regs, kcb); >> 146 kprobes_inc_nmissed_count(p); >> 147 kcb->kprobe_status = KPROBE_REENTER; >> 148 prepare_singlestep(p, regs, kcb); >> 149 return 1; >> 150 } else { >> 151 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { >> 152 /* The breakpoint instruction was removed by >> 153 * another cpu right after we hit, no further >> 154 * handling of this interrupt is appropriate >> 155 */ >> 156 ret = 1; >> 157 goto no_kprobe; >> 158 } >> 159 p = __this_cpu_read(current_kprobe); >> 160 if (p->break_handler && p->break_handler(p, regs)) >> 161 goto ss_probe; >> 162 } >> 163 goto no_kprobe; >> 164 } 118 165 119 p->ainsn.t2_addr = NULL; !! 166 p = get_kprobe(addr); >> 167 if (!p) { >> 168 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { >> 169 /* >> 170 * The breakpoint instruction was removed right >> 171 * after we hit it. Another cpu has removed >> 172 * either a probepoint or a debugger breakpoint >> 173 * at this address. In either case, no further >> 174 * handling of this interrupt is appropriate. >> 175 */ >> 176 ret = 1; >> 177 } >> 178 /* Not one of ours: let kernel handle it */ >> 179 goto no_kprobe; 120 } 180 } 121 181 122 return; !! 182 set_current_kprobe(p, regs, kcb); >> 183 kcb->kprobe_status = KPROBE_HIT_ACTIVE; >> 184 if (p->pre_handler && p->pre_handler(p, regs)) >> 185 return 1; >> 186 >> 187 ss_probe: >> 188 prepare_singlestep(p, regs, kcb); >> 189 kcb->kprobe_status = KPROBE_HIT_SS; >> 190 return 1; >> 191 >> 192 no_kprobe: >> 193 preempt_enable_no_resched(); >> 194 return ret; 123 } 195 } 124 196 125 static void __kprobes setup_singlestep(struct !! 197 /* If INSN is a relative control transfer instruction, >> 198 * return the corrected branch destination value. >> 199 * >> 200 * regs->tpc and regs->tnpc still hold the values of the >> 201 * program counters at the time of trap due to the execution >> 202 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] >> 203 * >> 204 */ >> 205 static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, >> 206 struct pt_regs *regs) 126 { 207 { 127 unsigned long next_pc; !! 208 unsigned long real_pc = (unsigned long) p->addr; 128 unsigned long tgt_if_br = 0; << 129 int is_branch; << 130 unsigned long bta; << 131 209 132 /* Copy the opcode back to the kprobe !! 210 /* Branch not taken, no mods necessary. */ 133 * instruction. Because of this we wil !! 211 if (regs->tnpc == regs->tpc + 0x4UL) 134 * same kprobe until this kprobe is do !! 212 return real_pc + 0x8UL; 135 */ << 136 *(p->addr) = p->opcode; << 137 213 138 flush_icache_range((unsigned long)p->a !! 214 /* The three cases are call, branch w/prediction, 139 (unsigned long)p->a !! 215 * and traditional branch. >> 216 */ >> 217 if ((insn & 0xc0000000) == 0x40000000 || >> 218 (insn & 0xc1c00000) == 0x00400000 || >> 219 (insn & 0xc1c00000) == 0x00800000) { >> 220 unsigned long ainsn_addr; >> 221 >> 222 ainsn_addr = (unsigned long) &p->ainsn.insn[0]; >> 223 >> 224 /* The instruction did all the work for us >> 225 * already, just apply the offset to the correct >> 226 * instruction location. >> 227 */ >> 228 return (real_pc + (regs->tnpc - ainsn_addr)); >> 229 } 140 230 141 /* Now we insert the trap at the next !! 231 /* It is jmpl or some other absolute PC modification instruction, 142 * single step. If it is a branch we i !! 232 * leave NPC as-is. 143 * targets << 144 */ 233 */ >> 234 return regs->tnpc; >> 235 } >> 236 >> 237 /* If INSN is an instruction which writes it's PC location >> 238 * into a destination register, fix that up. >> 239 */ >> 240 static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, >> 241 unsigned long real_pc) >> 242 { >> 243 unsigned long *slot = NULL; 145 244 146 bta = regs->bta; !! 245 /* Simplest case is 'call', which always uses %o7 */ >> 246 if ((insn & 0xc0000000) == 0x40000000) { >> 247 slot = ®s->u_regs[UREG_I7]; >> 248 } 147 249 148 if (regs->status32 & 0x40) { !! 250 /* 'jmpl' encodes the register inside of the opcode */ 149 /* We are in a delay slot with !! 251 if ((insn & 0xc1f80000) == 0x81c00000) { >> 252 unsigned long rd = ((insn >> 25) & 0x1f); 150 253 151 next_pc = bta & ~0x01; !! 254 if (rd <= 15) { >> 255 slot = ®s->u_regs[rd]; >> 256 } else { >> 257 /* Hard case, it goes onto the stack. */ >> 258 flushw_all(); 152 259 153 if (!p->ainsn.is_short) { !! 260 rd -= 16; 154 if (bta & 0x01) !! 261 slot = (unsigned long *) 155 regs->blink += !! 262 (regs->u_regs[UREG_FP] + STACK_BIAS); 156 else { !! 263 slot += rd; 157 /* Branch not << 158 next_pc += 2; << 159 << 160 /* next pc is << 161 * delay slot << 162 */ << 163 regs->bta += 2 << 164 } << 165 } 264 } 166 << 167 is_branch = 0; << 168 } else << 169 is_branch = << 170 disasm_next_pc((unsigned l << 171 (struct callee_regs *) << 172 &next_pc, &tgt_if_br); << 173 << 174 p->ainsn.t1_addr = (kprobe_opcode_t *) << 175 p->ainsn.t1_opcode = *(p->ainsn.t1_add << 176 *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUC << 177 << 178 flush_icache_range((unsigned long)p->a << 179 (unsigned long)p->a << 180 sizeof(kprobe_opcod << 181 << 182 if (is_branch) { << 183 p->ainsn.t2_addr = (kprobe_opc << 184 p->ainsn.t2_opcode = *(p->ains << 185 *(p->ainsn.t2_addr) = TRAP_S_2 << 186 << 187 flush_icache_range((unsigned l << 188 (unsigned l << 189 sizeof(kpro << 190 } 265 } >> 266 if (slot != NULL) >> 267 *slot = real_pc; 191 } 268 } 192 269 193 static int !! 270 /* 194 __kprobes arc_kprobe_handler(unsigned long add !! 271 * Called after single-stepping. p->addr is the address of the >> 272 * instruction which has been replaced by the breakpoint >> 273 * instruction. To avoid the SMP problems that can occur when we >> 274 * temporarily put back the original opcode to single-step, we >> 275 * single-stepped a copy of the instruction. The address of this >> 276 * copy is &p->ainsn.insn[0]. >> 277 * >> 278 * This function prepares to return from the post-single-step >> 279 * breakpoint trap. >> 280 */ >> 281 static void __kprobes resume_execution(struct kprobe *p, >> 282 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 195 { 283 { 196 struct kprobe *p; !! 284 u32 insn = p->ainsn.insn[0]; 197 struct kprobe_ctlblk *kcb; << 198 285 199 preempt_disable(); !! 286 regs->tnpc = relbranch_fixup(insn, p, regs); 200 287 201 kcb = get_kprobe_ctlblk(); !! 288 /* This assignment must occur after relbranch_fixup() */ 202 p = get_kprobe((unsigned long *)addr); !! 289 regs->tpc = kcb->kprobe_orig_tnpc; 203 290 204 if (p) { !! 291 retpc_fixup(regs, insn, (unsigned long) p->addr); 205 /* << 206 * We have reentered the kprob << 207 * was hit while within the ha << 208 * kprobes and single step on << 209 * without calling any user ha << 210 * kprobes. << 211 */ << 212 if (kprobe_running()) { << 213 save_previous_kprobe(k << 214 set_current_kprobe(p); << 215 kprobes_inc_nmissed_co << 216 setup_singlestep(p, re << 217 kcb->kprobe_status = K << 218 return 1; << 219 } << 220 << 221 set_current_kprobe(p); << 222 kcb->kprobe_status = KPROBE_HI << 223 << 224 /* If we have no pre-handler o << 225 * normal processing. If we ha << 226 * non-zero - which means user << 227 * to another instruction, we << 228 */ << 229 if (!p->pre_handler || !p->pre << 230 setup_singlestep(p, re << 231 kcb->kprobe_status = K << 232 } else { << 233 reset_current_kprobe() << 234 preempt_enable_no_resc << 235 } << 236 << 237 return 1; << 238 } << 239 292 240 /* no_kprobe: */ !! 293 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 241 preempt_enable_no_resched(); !! 294 kcb->kprobe_orig_tstate_pil); 242 return 0; << 243 } 295 } 244 296 245 static int !! 297 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 246 __kprobes arc_post_kprobe_handler(unsigned lon << 247 { 298 { 248 struct kprobe *cur = kprobe_running(); 299 struct kprobe *cur = kprobe_running(); 249 struct kprobe_ctlblk *kcb = get_kprobe 300 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 250 301 251 if (!cur) 302 if (!cur) 252 return 0; 303 return 0; 253 304 254 resume_execution(cur, addr, regs); << 255 << 256 /* Rearm the kprobe */ << 257 arch_arm_kprobe(cur); << 258 << 259 /* << 260 * When we return from trap instructio << 261 * We restored the actual instruction << 262 * return to the same address and exec << 263 */ << 264 regs->ret = addr; << 265 << 266 if ((kcb->kprobe_status != KPROBE_REEN 305 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 267 kcb->kprobe_status = KPROBE_HI 306 kcb->kprobe_status = KPROBE_HIT_SSDONE; 268 cur->post_handler(cur, regs, 0 307 cur->post_handler(cur, regs, 0); 269 } 308 } 270 309 >> 310 resume_execution(cur, regs, kcb); >> 311 >> 312 /*Restore back the original saved kprobes variables and continue. */ 271 if (kcb->kprobe_status == KPROBE_REENT 313 if (kcb->kprobe_status == KPROBE_REENTER) { 272 restore_previous_kprobe(kcb); 314 restore_previous_kprobe(kcb); 273 goto out; 315 goto out; 274 } 316 } 275 << 276 reset_current_kprobe(); 317 reset_current_kprobe(); 277 << 278 out: 318 out: 279 preempt_enable_no_resched(); 319 preempt_enable_no_resched(); >> 320 280 return 1; 321 return 1; 281 } 322 } 282 323 283 /* !! 324 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 284 * Fault can be for the instruction being sing << 285 * pre/post handlers in the module. << 286 * This is applicable for applications like us << 287 * probe in user space and the handlers in the << 288 */ << 289 << 290 int __kprobes kprobe_fault_handler(struct pt_r << 291 { 325 { 292 struct kprobe *cur = kprobe_running(); 326 struct kprobe *cur = kprobe_running(); 293 struct kprobe_ctlblk *kcb = get_kprobe 327 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); >> 328 const struct exception_table_entry *entry; 294 329 295 switch (kcb->kprobe_status) { !! 330 switch(kcb->kprobe_status) { 296 case KPROBE_HIT_SS: 331 case KPROBE_HIT_SS: 297 case KPROBE_REENTER: 332 case KPROBE_REENTER: 298 /* 333 /* 299 * We are here because the ins !! 334 * We are here because the instruction being single 300 * caused the fault. We reset !! 335 * stepped caused a page fault. We reset the current 301 * exception handler as if it !! 336 * kprobe and the tpc points back to the probe address 302 * case it doesn't matter beca !! 337 * and allow the page fault handler to continue as a >> 338 * normal page fault. 303 */ 339 */ 304 resume_execution(cur, (unsigne !! 340 regs->tpc = (unsigned long)cur->addr; 305 !! 341 regs->tnpc = kcb->kprobe_orig_tnpc; >> 342 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | >> 343 kcb->kprobe_orig_tstate_pil); 306 if (kcb->kprobe_status == KPRO 344 if (kcb->kprobe_status == KPROBE_REENTER) 307 restore_previous_kprob 345 restore_previous_kprobe(kcb); 308 else 346 else 309 reset_current_kprobe() 347 reset_current_kprobe(); 310 << 311 preempt_enable_no_resched(); 348 preempt_enable_no_resched(); 312 break; 349 break; 313 << 314 case KPROBE_HIT_ACTIVE: 350 case KPROBE_HIT_ACTIVE: 315 case KPROBE_HIT_SSDONE: 351 case KPROBE_HIT_SSDONE: 316 /* 352 /* 317 * We are here because the ins !! 353 * We increment the nmissed count for accounting, 318 * caused the fault. !! 354 * we can also use npre/npostfault count for accounting >> 355 * these specific fault cases. 319 */ 356 */ >> 357 kprobes_inc_nmissed_count(cur); >> 358 >> 359 /* >> 360 * We come here because instructions in the pre/post >> 361 * handler caused the page_fault, this could happen >> 362 * if handler tries to access user space by >> 363 * copy_from_user(), get_user() etc. Let the >> 364 * user-specified handler try to fix it first. >> 365 */ >> 366 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) >> 367 return 1; 320 368 321 /* 369 /* 322 * In case the user-specified !! 370 * In case the user-specified fault handler returned 323 * try to fix up. !! 371 * zero, try to fix up. 324 */ 372 */ 325 if (fixup_exception(regs)) !! 373 >> 374 entry = search_exception_tables(regs->tpc); >> 375 if (entry) { >> 376 regs->tpc = entry->fixup; >> 377 regs->tnpc = regs->tpc + 4; 326 return 1; 378 return 1; >> 379 } 327 380 328 /* 381 /* 329 * fixup_exception() could not 382 * fixup_exception() could not handle it, 330 * Let do_page_fault() fix it. 383 * Let do_page_fault() fix it. 331 */ 384 */ 332 break; 385 break; 333 << 334 default: 386 default: 335 break; 387 break; 336 } 388 } >> 389 337 return 0; 390 return 0; 338 } 391 } 339 392 >> 393 /* >> 394 * Wrapper routine to for handling exceptions. >> 395 */ 340 int __kprobes kprobe_exceptions_notify(struct 396 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 341 unsigne 397 unsigned long val, void *data) 342 { 398 { 343 struct die_args *args = data; !! 399 struct die_args *args = (struct die_args *)data; 344 unsigned long addr = args->err; << 345 int ret = NOTIFY_DONE; 400 int ret = NOTIFY_DONE; 346 401 >> 402 if (args->regs && user_mode(args->regs)) >> 403 return ret; >> 404 347 switch (val) { 405 switch (val) { 348 case DIE_IERR: !! 406 case DIE_DEBUG: 349 if (arc_kprobe_handler(addr, a !! 407 if (kprobe_handler(args->regs)) 350 return NOTIFY_STOP; !! 408 ret = NOTIFY_STOP; 351 break; 409 break; 352 !! 410 case DIE_DEBUG_2: 353 case DIE_TRAP: !! 411 if (post_kprobe_handler(args->regs)) 354 if (arc_post_kprobe_handler(ad !! 412 ret = NOTIFY_STOP; 355 return NOTIFY_STOP; << 356 break; 413 break; 357 << 358 default: 414 default: 359 break; 415 break; 360 } 416 } 361 << 362 return ret; 417 return ret; 363 } 418 } 364 419 365 static void __used kretprobe_trampoline_holder !! 420 asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, >> 421 struct pt_regs *regs) >> 422 { >> 423 enum ctx_state prev_state = exception_enter(); >> 424 >> 425 BUG_ON(trap_level != 0x170 && trap_level != 0x171); >> 426 >> 427 if (user_mode(regs)) { >> 428 local_irq_enable(); >> 429 bad_trap(regs, trap_level); >> 430 goto out; >> 431 } >> 432 >> 433 /* trap_level == 0x170 --> ta 0x70 >> 434 * trap_level == 0x171 --> ta 0x71 >> 435 */ >> 436 if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, >> 437 (trap_level == 0x170) ? "debug" : "debug_2", >> 438 regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) >> 439 bad_trap(regs, trap_level); >> 440 out: >> 441 exception_exit(prev_state); >> 442 } >> 443 >> 444 /* Jprobes support. */ >> 445 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 366 { 446 { 367 __asm__ __volatile__(".global __kretpr !! 447 struct jprobe *jp = container_of(p, struct jprobe, kp); 368 "__kretprobe_tram !! 448 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 369 "nop\n"); !! 449 >> 450 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); >> 451 >> 452 regs->tpc = (unsigned long) jp->entry; >> 453 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; >> 454 regs->tstate |= TSTATE_PIL; >> 455 >> 456 return 1; >> 457 } >> 458 >> 459 void __kprobes jprobe_return(void) >> 460 { >> 461 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); >> 462 register unsigned long orig_fp asm("g1"); >> 463 >> 464 orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; >> 465 __asm__ __volatile__("\n" >> 466 "1: cmp %%sp, %0\n\t" >> 467 "blu,a,pt %%xcc, 1b\n\t" >> 468 " restore\n\t" >> 469 ".globl jprobe_return_trap_instruction\n" >> 470 "jprobe_return_trap_instruction:\n\t" >> 471 "ta 0x70" >> 472 : /* no outputs */ >> 473 : "r" (orig_fp)); >> 474 } >> 475 >> 476 extern void jprobe_return_trap_instruction(void); >> 477 >> 478 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) >> 479 { >> 480 u32 *addr = (u32 *) regs->tpc; >> 481 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); >> 482 >> 483 if (addr == (u32 *) jprobe_return_trap_instruction) { >> 484 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); >> 485 preempt_enable_no_resched(); >> 486 return 1; >> 487 } >> 488 return 0; 370 } 489 } 371 490 >> 491 /* The value stored in the return address register is actually 2 >> 492 * instructions before where the callee will return to. >> 493 * Sequences usually look something like this >> 494 * >> 495 * call some_function <--- return register points here >> 496 * nop <--- call delay slot >> 497 * whatever <--- where callee returns to >> 498 * >> 499 * To keep trampoline_probe_handler logic simpler, we normalize the >> 500 * value kept in ri->ret_addr so we don't need to keep adjusting it >> 501 * back and forth. >> 502 */ 372 void __kprobes arch_prepare_kretprobe(struct k 503 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 373 struct p 504 struct pt_regs *regs) 374 { 505 { 375 !! 506 ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); 376 ri->ret_addr = (kprobe_opcode_t *) reg << 377 ri->fp = NULL; << 378 507 379 /* Replace the return addr with trampo 508 /* Replace the return addr with trampoline addr */ 380 regs->blink = (unsigned long)&__kretpr !! 509 regs->u_regs[UREG_RETPC] = >> 510 ((unsigned long)kretprobe_trampoline) - 8; 381 } 511 } 382 512 >> 513 /* >> 514 * Called when the probe at kretprobe trampoline is hit >> 515 */ 383 static int __kprobes trampoline_probe_handler( 516 static int __kprobes trampoline_probe_handler(struct kprobe *p, 384 517 struct pt_regs *regs) 385 { 518 { 386 regs->ret = __kretprobe_trampoline_han !! 519 struct kretprobe_instance *ri = NULL; >> 520 struct hlist_head *head, empty_rp; >> 521 struct hlist_node *tmp; >> 522 unsigned long flags, orig_ret_address = 0; >> 523 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; >> 524 >> 525 INIT_HLIST_HEAD(&empty_rp); >> 526 kretprobe_hash_lock(current, &head, &flags); 387 527 388 /* By returning a non zero value, we a !! 528 /* 389 * that we don't want the post_handler !! 529 * It is possible to have multiple instances associated with a given >> 530 * task either because an multiple functions in the call path >> 531 * have a return probe installed on them, and/or more than one return >> 532 * return probe was registered for a target function. >> 533 * >> 534 * We can handle this because: >> 535 * - instances are always inserted at the head of the list >> 536 * - when multiple return probes are registered for the same >> 537 * function, the first instance's ret_addr will point to the >> 538 * real return address, and all the rest will point to >> 539 * kretprobe_trampoline >> 540 */ >> 541 hlist_for_each_entry_safe(ri, tmp, head, hlist) { >> 542 if (ri->task != current) >> 543 /* another task is sharing our hash bucket */ >> 544 continue; >> 545 >> 546 if (ri->rp && ri->rp->handler) >> 547 ri->rp->handler(ri, regs); >> 548 >> 549 orig_ret_address = (unsigned long)ri->ret_addr; >> 550 recycle_rp_inst(ri, &empty_rp); >> 551 >> 552 if (orig_ret_address != trampoline_address) >> 553 /* >> 554 * This is the real return address. Any other >> 555 * instances associated with this task are for >> 556 * other calls deeper on the call stack >> 557 */ >> 558 break; >> 559 } >> 560 >> 561 kretprobe_assert(ri, orig_ret_address, trampoline_address); >> 562 regs->tpc = orig_ret_address; >> 563 regs->tnpc = orig_ret_address + 4; >> 564 >> 565 reset_current_kprobe(); >> 566 kretprobe_hash_unlock(current, &flags); >> 567 preempt_enable_no_resched(); >> 568 >> 569 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { >> 570 hlist_del(&ri->hlist); >> 571 kfree(ri); >> 572 } >> 573 /* >> 574 * By returning a non-zero value, we are telling >> 575 * kprobe_handler() that we don't want the post_handler >> 576 * to run (and have re-enabled preemption) 390 */ 577 */ 391 return 1; 578 return 1; 392 } 579 } 393 580 >> 581 static void __used kretprobe_trampoline_holder(void) >> 582 { >> 583 asm volatile(".global kretprobe_trampoline\n" >> 584 "kretprobe_trampoline:\n" >> 585 "\tnop\n" >> 586 "\tnop\n"); >> 587 } 394 static struct kprobe trampoline_p = { 588 static struct kprobe trampoline_p = { 395 .addr = (kprobe_opcode_t *) &__kretpro !! 589 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 396 .pre_handler = trampoline_probe_handle 590 .pre_handler = trampoline_probe_handler 397 }; 591 }; 398 592 399 int __init arch_init_kprobes(void) 593 int __init arch_init_kprobes(void) 400 { 594 { 401 /* Registering the trampoline code for << 402 return register_kprobe(&trampoline_p); 595 return register_kprobe(&trampoline_p); 403 } 596 } 404 597 405 int __kprobes arch_trampoline_kprobe(struct kp 598 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 406 { 599 { 407 if (p->addr == (kprobe_opcode_t *) &__ !! 600 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 408 return 1; 601 return 1; 409 602 410 return 0; 603 return 0; 411 } << 412 << 413 void trap_is_kprobe(unsigned long address, str << 414 { << 415 notify_die(DIE_TRAP, "kprobe_trap", re << 416 } 604 } 417 605
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.