1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author: Hanlu Li <lihanlu@loongson.cn> 4 * Huacai Chen <chenhuacai@loongson.cn> 5 * 6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 7 * 8 * Derived from MIPS: 9 * Copyright (C) 1992 Ross Biro 10 * Copyright (C) Linus Torvalds 11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle 12 * Copyright (C) 1996 David S. Miller 13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 14 * Copyright (C) 1999 MIPS Technologies, Inc. 15 * Copyright (C) 2000 Ulf Carlsson 16 */ 17 #include <linux/kernel.h> 18 #include <linux/audit.h> 19 #include <linux/compiler.h> 20 #include <linux/context_tracking.h> 21 #include <linux/elf.h> 22 #include <linux/errno.h> 23 #include <linux/hw_breakpoint.h> 24 #include <linux/mm.h> 25 #include <linux/nospec.h> 26 #include <linux/ptrace.h> 27 #include <linux/regset.h> 28 #include <linux/sched.h> 29 #include <linux/sched/task_stack.h> 30 #include <linux/security.h> 31 #include <linux/smp.h> 32 #include <linux/stddef.h> 33 #include <linux/seccomp.h> 34 #include <linux/thread_info.h> 35 #include <linux/uaccess.h> 36 37 #include <asm/byteorder.h> 38 #include <asm/cpu.h> 39 #include <asm/cpu-info.h> 40 #include <asm/fpu.h> 41 #include <asm/lbt.h> 42 #include <asm/loongarch.h> 43 #include <asm/page.h> 44 #include <asm/pgtable.h> 45 #include <asm/processor.h> 46 #include <asm/ptrace.h> 47 #include <asm/reg.h> 48 #include <asm/syscall.h> 49 50 static void init_fp_ctx(struct task_struct *target) 51 { 52 /* The target already has context */ 53 if (tsk_used_math(target)) 54 return; 55 56 /* Begin with data registers set to all 1s... */ 57 memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr)); 58 set_stopped_child_used_math(target); 59 } 60 61 /* 62 * Called by kernel/ptrace.c when detaching.. 63 * 64 * Make sure single step bits etc are not set. 65 */ 66 void ptrace_disable(struct task_struct *child) 67 { 68 /* Don't load the watchpoint registers for the ex-child. */ 69 clear_tsk_thread_flag(child, TIF_LOAD_WATCH); 70 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 71 } 72 73 /* regset get/set implementations */ 74 75 static int gpr_get(struct task_struct *target, 76 const struct user_regset *regset, 77 struct membuf to) 78 { 79 int r; 80 struct pt_regs *regs = task_pt_regs(target); 81 82 r = membuf_write(&to, ®s->regs, sizeof(u64) * GPR_NUM); 83 r = membuf_write(&to, ®s->orig_a0, sizeof(u64)); 84 r = membuf_write(&to, ®s->csr_era, sizeof(u64)); 85 r = membuf_write(&to, ®s->csr_badvaddr, sizeof(u64)); 86 87 return r; 88 } 89 90 static int gpr_set(struct task_struct *target, 91 const struct user_regset *regset, 92 unsigned int pos, unsigned int count, 93 const void *kbuf, const void __user *ubuf) 94 { 95 int err; 96 int a0_start = sizeof(u64) * GPR_NUM; 97 int era_start = a0_start + sizeof(u64); 98 int badvaddr_start = era_start + sizeof(u64); 99 struct pt_regs *regs = task_pt_regs(target); 100 101 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 102 ®s->regs, 103 0, a0_start); 104 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 105 ®s->orig_a0, 106 a0_start, a0_start + sizeof(u64)); 107 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 108 ®s->csr_era, 109 era_start, era_start + sizeof(u64)); 110 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 111 ®s->csr_badvaddr, 112 badvaddr_start, badvaddr_start + sizeof(u64)); 113 114 return err; 115 } 116 117 118 /* 119 * Get the general floating-point registers. 120 */ 121 static int gfpr_get(struct task_struct *target, struct membuf *to) 122 { 123 return membuf_write(to, &target->thread.fpu.fpr, 124 sizeof(elf_fpreg_t) * NUM_FPU_REGS); 125 } 126 127 static int gfpr_get_simd(struct task_struct *target, struct membuf *to) 128 { 129 int i, r; 130 u64 fpr_val; 131 132 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 133 for (i = 0; i < NUM_FPU_REGS; i++) { 134 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0); 135 r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t)); 136 } 137 138 return r; 139 } 140 141 /* 142 * Choose the appropriate helper for general registers, and then copy 143 * the FCC and FCSR registers separately. 144 */ 145 static int fpr_get(struct task_struct *target, 146 const struct user_regset *regset, 147 struct membuf to) 148 { 149 int r; 150 151 save_fpu_regs(target); 152 153 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 154 r = gfpr_get(target, &to); 155 else 156 r = gfpr_get_simd(target, &to); 157 158 r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc)); 159 r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr)); 160 161 return r; 162 } 163 164 static int gfpr_set(struct task_struct *target, 165 unsigned int *pos, unsigned int *count, 166 const void **kbuf, const void __user **ubuf) 167 { 168 return user_regset_copyin(pos, count, kbuf, ubuf, 169 &target->thread.fpu.fpr, 170 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); 171 } 172 173 static int gfpr_set_simd(struct task_struct *target, 174 unsigned int *pos, unsigned int *count, 175 const void **kbuf, const void __user **ubuf) 176 { 177 int i, err; 178 u64 fpr_val; 179 180 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); 181 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { 182 err = user_regset_copyin(pos, count, kbuf, ubuf, 183 &fpr_val, i * sizeof(elf_fpreg_t), 184 (i + 1) * sizeof(elf_fpreg_t)); 185 if (err) 186 return err; 187 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); 188 } 189 190 return 0; 191 } 192 193 /* 194 * Choose the appropriate helper for general registers, and then copy 195 * the FCC register separately. 196 */ 197 static int fpr_set(struct task_struct *target, 198 const struct user_regset *regset, 199 unsigned int pos, unsigned int count, 200 const void *kbuf, const void __user *ubuf) 201 { 202 const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t); 203 const int fcsr_start = fcc_start + sizeof(u64); 204 int err; 205 206 BUG_ON(count % sizeof(elf_fpreg_t)); 207 if (pos + count > sizeof(elf_fpregset_t)) 208 return -EIO; 209 210 init_fp_ctx(target); 211 212 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) 213 err = gfpr_set(target, &pos, &count, &kbuf, &ubuf); 214 else 215 err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf); 216 if (err) 217 return err; 218 219 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 220 &target->thread.fpu.fcc, fcc_start, 221 fcc_start + sizeof(u64)); 222 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 223 &target->thread.fpu.fcsr, fcsr_start, 224 fcsr_start + sizeof(u32)); 225 226 return err; 227 } 228 229 static int cfg_get(struct task_struct *target, 230 const struct user_regset *regset, 231 struct membuf to) 232 { 233 int i, r; 234 u32 cfg_val; 235 236 i = 0; 237 while (to.left > 0) { 238 cfg_val = read_cpucfg(i++); 239 r = membuf_write(&to, &cfg_val, sizeof(u32)); 240 } 241 242 return r; 243 } 244 245 /* 246 * CFG registers are read-only. 247 */ 248 static int cfg_set(struct task_struct *target, 249 const struct user_regset *regset, 250 unsigned int pos, unsigned int count, 251 const void *kbuf, const void __user *ubuf) 252 { 253 return 0; 254 } 255 256 #ifdef CONFIG_CPU_HAS_LSX 257 258 static void copy_pad_fprs(struct task_struct *target, 259 const struct user_regset *regset, 260 struct membuf *to, unsigned int live_sz) 261 { 262 int i, j; 263 unsigned long long fill = ~0ull; 264 unsigned int cp_sz, pad_sz; 265 266 cp_sz = min(regset->size, live_sz); 267 pad_sz = regset->size - cp_sz; 268 WARN_ON(pad_sz % sizeof(fill)); 269 270 for (i = 0; i < NUM_FPU_REGS; i++) { 271 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); 272 for (j = 0; j < (pad_sz / sizeof(fill)); j++) { 273 membuf_store(to, fill); 274 } 275 } 276 } 277 278 static int simd_get(struct task_struct *target, 279 const struct user_regset *regset, 280 struct membuf to) 281 { 282 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 283 284 save_fpu_regs(target); 285 286 if (!tsk_used_math(target)) { 287 /* The task hasn't used FP or LSX, fill with 0xff */ 288 copy_pad_fprs(target, regset, &to, 0); 289 } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) { 290 /* Copy scalar FP context, fill the rest with 0xff */ 291 copy_pad_fprs(target, regset, &to, 8); 292 #ifdef CONFIG_CPU_HAS_LASX 293 } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) { 294 /* Copy LSX 128 Bit context, fill the rest with 0xff */ 295 copy_pad_fprs(target, regset, &to, 16); 296 #endif 297 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 298 /* Trivially copy the vector registers */ 299 membuf_write(&to, &target->thread.fpu.fpr, wr_size); 300 } else { 301 /* Copy as much context as possible, fill the rest with 0xff */ 302 copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0])); 303 } 304 305 return 0; 306 } 307 308 static int simd_set(struct task_struct *target, 309 const struct user_regset *regset, 310 unsigned int pos, unsigned int count, 311 const void *kbuf, const void __user *ubuf) 312 { 313 const unsigned int wr_size = NUM_FPU_REGS * regset->size; 314 unsigned int cp_sz; 315 int i, err, start; 316 317 init_fp_ctx(target); 318 319 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { 320 /* Trivially copy the vector registers */ 321 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, 322 &target->thread.fpu.fpr, 323 0, wr_size); 324 } else { 325 /* Copy as much context as possible */ 326 cp_sz = min_t(unsigned int, regset->size, 327 sizeof(target->thread.fpu.fpr[0])); 328 329 i = start = err = 0; 330 for (; i < NUM_FPU_REGS; i++, start += regset->size) { 331 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 332 &target->thread.fpu.fpr[i], 333 start, start + cp_sz); 334 } 335 } 336 337 return err; 338 } 339 340 #endif /* CONFIG_CPU_HAS_LSX */ 341 342 #ifdef CONFIG_CPU_HAS_LBT 343 static int lbt_get(struct task_struct *target, 344 const struct user_regset *regset, 345 struct membuf to) 346 { 347 int r; 348 349 r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0)); 350 r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1)); 351 r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2)); 352 r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3)); 353 r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32)); 354 r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32)); 355 356 return r; 357 } 358 359 static int lbt_set(struct task_struct *target, 360 const struct user_regset *regset, 361 unsigned int pos, unsigned int count, 362 const void *kbuf, const void __user *ubuf) 363 { 364 int err = 0; 365 const int eflags_start = 4 * sizeof(target->thread.lbt.scr0); 366 const int ftop_start = eflags_start + sizeof(u32); 367 368 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 369 &target->thread.lbt.scr0, 370 0, 4 * sizeof(target->thread.lbt.scr0)); 371 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 372 &target->thread.lbt.eflags, 373 eflags_start, ftop_start); 374 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, 375 &target->thread.fpu.ftop, 376 ftop_start, ftop_start + sizeof(u32)); 377 378 return err; 379 } 380 #endif /* CONFIG_CPU_HAS_LBT */ 381 382 #ifdef CONFIG_HAVE_HW_BREAKPOINT 383 384 /* 385 * Handle hitting a HW-breakpoint. 386 */ 387 static void ptrace_hbptriggered(struct perf_event *bp, 388 struct perf_sample_data *data, 389 struct pt_regs *regs) 390 { 391 int i; 392 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); 393 394 for (i = 0; i < LOONGARCH_MAX_BRP; ++i) 395 if (current->thread.hbp_break[i] == bp) 396 break; 397 398 for (i = 0; i < LOONGARCH_MAX_WRP; ++i) 399 if (current->thread.hbp_watch[i] == bp) 400 break; 401 402 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address); 403 } 404 405 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, 406 struct task_struct *tsk, 407 unsigned long idx) 408 { 409 struct perf_event *bp; 410 411 switch (note_type) { 412 case NT_LOONGARCH_HW_BREAK: 413 if (idx >= LOONGARCH_MAX_BRP) 414 return ERR_PTR(-EINVAL); 415 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 416 bp = tsk->thread.hbp_break[idx]; 417 break; 418 case NT_LOONGARCH_HW_WATCH: 419 if (idx >= LOONGARCH_MAX_WRP) 420 return ERR_PTR(-EINVAL); 421 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 422 bp = tsk->thread.hbp_watch[idx]; 423 break; 424 } 425 426 return bp; 427 } 428 429 static int ptrace_hbp_set_event(unsigned int note_type, 430 struct task_struct *tsk, 431 unsigned long idx, 432 struct perf_event *bp) 433 { 434 switch (note_type) { 435 case NT_LOONGARCH_HW_BREAK: 436 if (idx >= LOONGARCH_MAX_BRP) 437 return -EINVAL; 438 idx = array_index_nospec(idx, LOONGARCH_MAX_BRP); 439 tsk->thread.hbp_break[idx] = bp; 440 break; 441 case NT_LOONGARCH_HW_WATCH: 442 if (idx >= LOONGARCH_MAX_WRP) 443 return -EINVAL; 444 idx = array_index_nospec(idx, LOONGARCH_MAX_WRP); 445 tsk->thread.hbp_watch[idx] = bp; 446 break; 447 } 448 449 return 0; 450 } 451 452 static struct perf_event *ptrace_hbp_create(unsigned int note_type, 453 struct task_struct *tsk, 454 unsigned long idx) 455 { 456 int err, type; 457 struct perf_event *bp; 458 struct perf_event_attr attr; 459 460 switch (note_type) { 461 case NT_LOONGARCH_HW_BREAK: 462 type = HW_BREAKPOINT_X; 463 break; 464 case NT_LOONGARCH_HW_WATCH: 465 type = HW_BREAKPOINT_RW; 466 break; 467 default: 468 return ERR_PTR(-EINVAL); 469 } 470 471 ptrace_breakpoint_init(&attr); 472 473 /* 474 * Initialise fields to sane defaults 475 * (i.e. values that will pass validation). 476 */ 477 attr.bp_addr = 0; 478 attr.bp_len = HW_BREAKPOINT_LEN_4; 479 attr.bp_type = type; 480 attr.disabled = 1; 481 482 bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); 483 if (IS_ERR(bp)) 484 return bp; 485 486 err = ptrace_hbp_set_event(note_type, tsk, idx, bp); 487 if (err) 488 return ERR_PTR(err); 489 490 return bp; 491 } 492 493 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, 494 struct arch_hw_breakpoint_ctrl ctrl, 495 struct perf_event_attr *attr) 496 { 497 int err, len, type; 498 499 err = arch_bp_generic_fields(ctrl, &len, &type); 500 if (err) 501 return err; 502 503 attr->bp_len = len; 504 attr->bp_type = type; 505 506 return 0; 507 } 508 509 static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info) 510 { 511 u8 num; 512 u64 reg = 0; 513 514 switch (note_type) { 515 case NT_LOONGARCH_HW_BREAK: 516 num = hw_breakpoint_slots(TYPE_INST); 517 break; 518 case NT_LOONGARCH_HW_WATCH: 519 num = hw_breakpoint_slots(TYPE_DATA); 520 break; 521 default: 522 return -EINVAL; 523 } 524 525 *info = reg | num; 526 527 return 0; 528 } 529 530 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, 531 struct task_struct *tsk, 532 unsigned long idx) 533 { 534 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 535 536 if (!bp) 537 bp = ptrace_hbp_create(note_type, tsk, idx); 538 539 return bp; 540 } 541 542 static int ptrace_hbp_get_ctrl(unsigned int note_type, 543 struct task_struct *tsk, 544 unsigned long idx, u32 *ctrl) 545 { 546 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 547 548 if (IS_ERR(bp)) 549 return PTR_ERR(bp); 550 551 *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; 552 553 return 0; 554 } 555 556 static int ptrace_hbp_get_mask(unsigned int note_type, 557 struct task_struct *tsk, 558 unsigned long idx, u64 *mask) 559 { 560 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 561 562 if (IS_ERR(bp)) 563 return PTR_ERR(bp); 564 565 *mask = bp ? counter_arch_bp(bp)->mask : 0; 566 567 return 0; 568 } 569 570 static int ptrace_hbp_get_addr(unsigned int note_type, 571 struct task_struct *tsk, 572 unsigned long idx, u64 *addr) 573 { 574 struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); 575 576 if (IS_ERR(bp)) 577 return PTR_ERR(bp); 578 579 *addr = bp ? counter_arch_bp(bp)->address : 0; 580 581 return 0; 582 } 583 584 static int ptrace_hbp_set_ctrl(unsigned int note_type, 585 struct task_struct *tsk, 586 unsigned long idx, u32 uctrl) 587 { 588 int err; 589 struct perf_event *bp; 590 struct perf_event_attr attr; 591 struct arch_hw_breakpoint_ctrl ctrl; 592 struct thread_info *ti = task_thread_info(tsk); 593 594 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 595 if (IS_ERR(bp)) 596 return PTR_ERR(bp); 597 598 attr = bp->attr; 599 600 switch (note_type) { 601 case NT_LOONGARCH_HW_BREAK: 602 ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE; 603 ctrl.len = LOONGARCH_BREAKPOINT_LEN_4; 604 break; 605 case NT_LOONGARCH_HW_WATCH: 606 decode_ctrl_reg(uctrl, &ctrl); 607 break; 608 default: 609 return -EINVAL; 610 } 611 612 if (uctrl & CTRL_PLV_ENABLE) { 613 err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); 614 if (err) 615 return err; 616 attr.disabled = 0; 617 set_ti_thread_flag(ti, TIF_LOAD_WATCH); 618 } else { 619 attr.disabled = 1; 620 clear_ti_thread_flag(ti, TIF_LOAD_WATCH); 621 } 622 623 return modify_user_hw_breakpoint(bp, &attr); 624 } 625 626 static int ptrace_hbp_set_mask(unsigned int note_type, 627 struct task_struct *tsk, 628 unsigned long idx, u64 mask) 629 { 630 struct perf_event *bp; 631 struct perf_event_attr attr; 632 struct arch_hw_breakpoint *info; 633 634 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 635 if (IS_ERR(bp)) 636 return PTR_ERR(bp); 637 638 attr = bp->attr; 639 info = counter_arch_bp(bp); 640 info->mask = mask; 641 642 return modify_user_hw_breakpoint(bp, &attr); 643 } 644 645 static int ptrace_hbp_set_addr(unsigned int note_type, 646 struct task_struct *tsk, 647 unsigned long idx, u64 addr) 648 { 649 struct perf_event *bp; 650 struct perf_event_attr attr; 651 652 /* Kernel-space address cannot be monitored by user-space */ 653 if ((unsigned long)addr >= XKPRANGE) 654 return -EINVAL; 655 656 bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); 657 if (IS_ERR(bp)) 658 return PTR_ERR(bp); 659 660 attr = bp->attr; 661 attr.bp_addr = addr; 662 663 return modify_user_hw_breakpoint(bp, &attr); 664 } 665 666 #define PTRACE_HBP_ADDR_SZ sizeof(u64) 667 #define PTRACE_HBP_MASK_SZ sizeof(u64) 668 #define PTRACE_HBP_CTRL_SZ sizeof(u32) 669 #define PTRACE_HBP_PAD_SZ sizeof(u32) 670 671 static int hw_break_get(struct task_struct *target, 672 const struct user_regset *regset, 673 struct membuf to) 674 { 675 u64 info; 676 u32 ctrl; 677 u64 addr, mask; 678 int ret, idx = 0; 679 unsigned int note_type = regset->core_note_type; 680 681 /* Resource info */ 682 ret = ptrace_hbp_get_resource_info(note_type, &info); 683 if (ret) 684 return ret; 685 686 membuf_write(&to, &info, sizeof(info)); 687 688 /* (address, mask, ctrl) registers */ 689 while (to.left) { 690 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); 691 if (ret) 692 return ret; 693 694 ret = ptrace_hbp_get_mask(note_type, target, idx, &mask); 695 if (ret) 696 return ret; 697 698 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); 699 if (ret) 700 return ret; 701 702 membuf_store(&to, addr); 703 membuf_store(&to, mask); 704 membuf_store(&to, ctrl); 705 membuf_zero(&to, sizeof(u32)); 706 idx++; 707 } 708 709 return 0; 710 } 711 712 static int hw_break_set(struct task_struct *target, 713 const struct user_regset *regset, 714 unsigned int pos, unsigned int count, 715 const void *kbuf, const void __user *ubuf) 716 { 717 u32 ctrl; 718 u64 addr, mask; 719 int ret, idx = 0, offset, limit; 720 unsigned int note_type = regset->core_note_type; 721 722 /* Resource info */ 723 offset = offsetof(struct user_watch_state, dbg_regs); 724 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset); 725 726 /* (address, mask, ctrl) registers */ 727 limit = regset->n * regset->size; 728 while (count && offset < limit) { 729 if (count < PTRACE_HBP_ADDR_SZ) 730 return -EINVAL; 731 732 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, 733 offset, offset + PTRACE_HBP_ADDR_SZ); 734 if (ret) 735 return ret; 736 737 ret = ptrace_hbp_set_addr(note_type, target, idx, addr); 738 if (ret) 739 return ret; 740 offset += PTRACE_HBP_ADDR_SZ; 741 742 if (!count) 743 break; 744 745 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask, 746 offset, offset + PTRACE_HBP_MASK_SZ); 747 if (ret) 748 return ret; 749 750 ret = ptrace_hbp_set_mask(note_type, target, idx, mask); 751 if (ret) 752 return ret; 753 offset += PTRACE_HBP_MASK_SZ; 754 755 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 756 offset, offset + PTRACE_HBP_CTRL_SZ); 757 if (ret) 758 return ret; 759 760 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); 761 if (ret) 762 return ret; 763 offset += PTRACE_HBP_CTRL_SZ; 764 765 user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 766 offset, offset + PTRACE_HBP_PAD_SZ); 767 offset += PTRACE_HBP_PAD_SZ; 768 769 idx++; 770 } 771 772 return 0; 773 } 774 775 #endif 776 777 struct pt_regs_offset { 778 const char *name; 779 int offset; 780 }; 781 782 #define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)} 783 #define REG_OFFSET_END {.name = NULL, .offset = 0} 784 785 static const struct pt_regs_offset regoffset_table[] = { 786 REG_OFFSET_NAME(r0, regs[0]), 787 REG_OFFSET_NAME(r1, regs[1]), 788 REG_OFFSET_NAME(r2, regs[2]), 789 REG_OFFSET_NAME(r3, regs[3]), 790 REG_OFFSET_NAME(r4, regs[4]), 791 REG_OFFSET_NAME(r5, regs[5]), 792 REG_OFFSET_NAME(r6, regs[6]), 793 REG_OFFSET_NAME(r7, regs[7]), 794 REG_OFFSET_NAME(r8, regs[8]), 795 REG_OFFSET_NAME(r9, regs[9]), 796 REG_OFFSET_NAME(r10, regs[10]), 797 REG_OFFSET_NAME(r11, regs[11]), 798 REG_OFFSET_NAME(r12, regs[12]), 799 REG_OFFSET_NAME(r13, regs[13]), 800 REG_OFFSET_NAME(r14, regs[14]), 801 REG_OFFSET_NAME(r15, regs[15]), 802 REG_OFFSET_NAME(r16, regs[16]), 803 REG_OFFSET_NAME(r17, regs[17]), 804 REG_OFFSET_NAME(r18, regs[18]), 805 REG_OFFSET_NAME(r19, regs[19]), 806 REG_OFFSET_NAME(r20, regs[20]), 807 REG_OFFSET_NAME(r21, regs[21]), 808 REG_OFFSET_NAME(r22, regs[22]), 809 REG_OFFSET_NAME(r23, regs[23]), 810 REG_OFFSET_NAME(r24, regs[24]), 811 REG_OFFSET_NAME(r25, regs[25]), 812 REG_OFFSET_NAME(r26, regs[26]), 813 REG_OFFSET_NAME(r27, regs[27]), 814 REG_OFFSET_NAME(r28, regs[28]), 815 REG_OFFSET_NAME(r29, regs[29]), 816 REG_OFFSET_NAME(r30, regs[30]), 817 REG_OFFSET_NAME(r31, regs[31]), 818 REG_OFFSET_NAME(orig_a0, orig_a0), 819 REG_OFFSET_NAME(csr_era, csr_era), 820 REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr), 821 REG_OFFSET_NAME(csr_crmd, csr_crmd), 822 REG_OFFSET_NAME(csr_prmd, csr_prmd), 823 REG_OFFSET_NAME(csr_euen, csr_euen), 824 REG_OFFSET_NAME(csr_ecfg, csr_ecfg), 825 REG_OFFSET_NAME(csr_estat, csr_estat), 826 REG_OFFSET_END, 827 }; 828 829 /** 830 * regs_query_register_offset() - query register offset from its name 831 * @name: the name of a register 832 * 833 * regs_query_register_offset() returns the offset of a register in struct 834 * pt_regs from its name. If the name is invalid, this returns -EINVAL; 835 */ 836 int regs_query_register_offset(const char *name) 837 { 838 const struct pt_regs_offset *roff; 839 840 for (roff = regoffset_table; roff->name != NULL; roff++) 841 if (!strcmp(roff->name, name)) 842 return roff->offset; 843 return -EINVAL; 844 } 845 846 enum loongarch_regset { 847 REGSET_GPR, 848 REGSET_FPR, 849 REGSET_CPUCFG, 850 #ifdef CONFIG_CPU_HAS_LSX 851 REGSET_LSX, 852 #endif 853 #ifdef CONFIG_CPU_HAS_LASX 854 REGSET_LASX, 855 #endif 856 #ifdef CONFIG_CPU_HAS_LBT 857 REGSET_LBT, 858 #endif 859 #ifdef CONFIG_HAVE_HW_BREAKPOINT 860 REGSET_HW_BREAK, 861 REGSET_HW_WATCH, 862 #endif 863 }; 864 865 static const struct user_regset loongarch64_regsets[] = { 866 [REGSET_GPR] = { 867 .core_note_type = NT_PRSTATUS, 868 .n = ELF_NGREG, 869 .size = sizeof(elf_greg_t), 870 .align = sizeof(elf_greg_t), 871 .regset_get = gpr_get, 872 .set = gpr_set, 873 }, 874 [REGSET_FPR] = { 875 .core_note_type = NT_PRFPREG, 876 .n = ELF_NFPREG, 877 .size = sizeof(elf_fpreg_t), 878 .align = sizeof(elf_fpreg_t), 879 .regset_get = fpr_get, 880 .set = fpr_set, 881 }, 882 [REGSET_CPUCFG] = { 883 .core_note_type = NT_LOONGARCH_CPUCFG, 884 .n = 64, 885 .size = sizeof(u32), 886 .align = sizeof(u32), 887 .regset_get = cfg_get, 888 .set = cfg_set, 889 }, 890 #ifdef CONFIG_CPU_HAS_LSX 891 [REGSET_LSX] = { 892 .core_note_type = NT_LOONGARCH_LSX, 893 .n = NUM_FPU_REGS, 894 .size = 16, 895 .align = 16, 896 .regset_get = simd_get, 897 .set = simd_set, 898 }, 899 #endif 900 #ifdef CONFIG_CPU_HAS_LASX 901 [REGSET_LASX] = { 902 .core_note_type = NT_LOONGARCH_LASX, 903 .n = NUM_FPU_REGS, 904 .size = 32, 905 .align = 32, 906 .regset_get = simd_get, 907 .set = simd_set, 908 }, 909 #endif 910 #ifdef CONFIG_CPU_HAS_LBT 911 [REGSET_LBT] = { 912 .core_note_type = NT_LOONGARCH_LBT, 913 .n = 5, 914 .size = sizeof(u64), 915 .align = sizeof(u64), 916 .regset_get = lbt_get, 917 .set = lbt_set, 918 }, 919 #endif 920 #ifdef CONFIG_HAVE_HW_BREAKPOINT 921 [REGSET_HW_BREAK] = { 922 .core_note_type = NT_LOONGARCH_HW_BREAK, 923 .n = sizeof(struct user_watch_state) / sizeof(u32), 924 .size = sizeof(u32), 925 .align = sizeof(u32), 926 .regset_get = hw_break_get, 927 .set = hw_break_set, 928 }, 929 [REGSET_HW_WATCH] = { 930 .core_note_type = NT_LOONGARCH_HW_WATCH, 931 .n = sizeof(struct user_watch_state) / sizeof(u32), 932 .size = sizeof(u32), 933 .align = sizeof(u32), 934 .regset_get = hw_break_get, 935 .set = hw_break_set, 936 }, 937 #endif 938 }; 939 940 static const struct user_regset_view user_loongarch64_view = { 941 .name = "loongarch64", 942 .e_machine = ELF_ARCH, 943 .regsets = loongarch64_regsets, 944 .n = ARRAY_SIZE(loongarch64_regsets), 945 }; 946 947 948 const struct user_regset_view *task_user_regset_view(struct task_struct *task) 949 { 950 return &user_loongarch64_view; 951 } 952 953 static inline int read_user(struct task_struct *target, unsigned long addr, 954 unsigned long __user *data) 955 { 956 unsigned long tmp = 0; 957 958 switch (addr) { 959 case 0 ... 31: 960 tmp = task_pt_regs(target)->regs[addr]; 961 break; 962 case ARG0: 963 tmp = task_pt_regs(target)->orig_a0; 964 break; 965 case PC: 966 tmp = task_pt_regs(target)->csr_era; 967 break; 968 case BADVADDR: 969 tmp = task_pt_regs(target)->csr_badvaddr; 970 break; 971 default: 972 return -EIO; 973 } 974 975 return put_user(tmp, data); 976 } 977 978 static inline int write_user(struct task_struct *target, unsigned long addr, 979 unsigned long data) 980 { 981 switch (addr) { 982 case 0 ... 31: 983 task_pt_regs(target)->regs[addr] = data; 984 break; 985 case ARG0: 986 task_pt_regs(target)->orig_a0 = data; 987 break; 988 case PC: 989 task_pt_regs(target)->csr_era = data; 990 break; 991 case BADVADDR: 992 task_pt_regs(target)->csr_badvaddr = data; 993 break; 994 default: 995 return -EIO; 996 } 997 998 return 0; 999 } 1000 1001 long arch_ptrace(struct task_struct *child, long request, 1002 unsigned long addr, unsigned long data) 1003 { 1004 int ret; 1005 unsigned long __user *datap = (void __user *) data; 1006 1007 switch (request) { 1008 case PTRACE_PEEKUSR: 1009 ret = read_user(child, addr, datap); 1010 break; 1011 1012 case PTRACE_POKEUSR: 1013 ret = write_user(child, addr, data); 1014 break; 1015 1016 default: 1017 ret = ptrace_request(child, request, addr, data); 1018 break; 1019 } 1020 1021 return ret; 1022 } 1023 1024 #ifdef CONFIG_HAVE_HW_BREAKPOINT 1025 static void ptrace_triggered(struct perf_event *bp, 1026 struct perf_sample_data *data, struct pt_regs *regs) 1027 { 1028 struct perf_event_attr attr; 1029 1030 attr = bp->attr; 1031 attr.disabled = true; 1032 modify_user_hw_breakpoint(bp, &attr); 1033 } 1034 1035 static int set_single_step(struct task_struct *tsk, unsigned long addr) 1036 { 1037 struct perf_event *bp; 1038 struct perf_event_attr attr; 1039 struct arch_hw_breakpoint *info; 1040 struct thread_struct *thread = &tsk->thread; 1041 1042 bp = thread->hbp_break[0]; 1043 if (!bp) { 1044 ptrace_breakpoint_init(&attr); 1045 1046 attr.bp_addr = addr; 1047 attr.bp_len = HW_BREAKPOINT_LEN_8; 1048 attr.bp_type = HW_BREAKPOINT_X; 1049 1050 bp = register_user_hw_breakpoint(&attr, ptrace_triggered, 1051 NULL, tsk); 1052 if (IS_ERR(bp)) 1053 return PTR_ERR(bp); 1054 1055 thread->hbp_break[0] = bp; 1056 } else { 1057 int err; 1058 1059 attr = bp->attr; 1060 attr.bp_addr = addr; 1061 1062 /* Reenable breakpoint */ 1063 attr.disabled = false; 1064 err = modify_user_hw_breakpoint(bp, &attr); 1065 if (unlikely(err)) 1066 return err; 1067 1068 csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR); 1069 } 1070 info = counter_arch_bp(bp); 1071 info->mask = TASK_SIZE - 1; 1072 1073 return 0; 1074 } 1075 1076 /* ptrace API */ 1077 void user_enable_single_step(struct task_struct *task) 1078 { 1079 struct thread_info *ti = task_thread_info(task); 1080 1081 set_single_step(task, task_pt_regs(task)->csr_era); 1082 task->thread.single_step = task_pt_regs(task)->csr_era; 1083 set_ti_thread_flag(ti, TIF_SINGLESTEP); 1084 } 1085 1086 void user_disable_single_step(struct task_struct *task) 1087 { 1088 clear_tsk_thread_flag(task, TIF_SINGLESTEP); 1089 } 1090 #endif 1091
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.