1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/kernel/signal.c 4 * 5 * Copyright (C) 1995-2009 Russell King 6 * Copyright (C) 2012 ARM Ltd. 7 */ 8 9 #include <linux/cache.h> 10 #include <linux/compat.h> 11 #include <linux/errno.h> 12 #include <linux/kernel.h> 13 #include <linux/signal.h> 14 #include <linux/freezer.h> 15 #include <linux/stddef.h> 16 #include <linux/uaccess.h> 17 #include <linux/sizes.h> 18 #include <linux/string.h> 19 #include <linux/ratelimit.h> 20 #include <linux/rseq.h> 21 #include <linux/syscalls.h> 22 23 #include <asm/daifflags.h> 24 #include <asm/debug-monitors.h> 25 #include <asm/elf.h> 26 #include <asm/exception.h> 27 #include <asm/cacheflush.h> 28 #include <asm/ucontext.h> 29 #include <asm/unistd.h> 30 #include <asm/fpsimd.h> 31 #include <asm/ptrace.h> 32 #include <asm/syscall.h> 33 #include <asm/signal32.h> 34 #include <asm/traps.h> 35 #include <asm/vdso.h> 36 37 /* 38 * Do a signal return; undo the signal stack. These are aligned to 128-bit. 39 */ 40 struct rt_sigframe { 41 struct siginfo info; 42 struct ucontext uc; 43 }; 44 45 struct frame_record { 46 u64 fp; 47 u64 lr; 48 }; 49 50 struct rt_sigframe_user_layout { 51 struct rt_sigframe __user *sigframe; 52 struct frame_record __user *next_frame; 53 54 unsigned long size; /* size of allocated sigframe data */ 55 unsigned long limit; /* largest allowed size */ 56 57 unsigned long fpsimd_offset; 58 unsigned long esr_offset; 59 unsigned long sve_offset; 60 unsigned long tpidr2_offset; 61 unsigned long za_offset; 62 unsigned long zt_offset; 63 unsigned long fpmr_offset; 64 unsigned long extra_offset; 65 unsigned long end_offset; 66 }; 67 68 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16) 69 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16) 70 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16) 71 72 static void init_user_layout(struct rt_sigframe_user_layout *user) 73 { 74 const size_t reserved_size = 75 sizeof(user->sigframe->uc.uc_mcontext.__reserved); 76 77 memset(user, 0, sizeof(*user)); 78 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved); 79 80 user->limit = user->size + reserved_size; 81 82 user->limit -= TERMINATOR_SIZE; 83 user->limit -= EXTRA_CONTEXT_SIZE; 84 /* Reserve space for extension and terminator ^ */ 85 } 86 87 static size_t sigframe_size(struct rt_sigframe_user_layout const *user) 88 { 89 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16); 90 } 91 92 /* 93 * Sanity limit on the approximate maximum size of signal frame we'll 94 * try to generate. Stack alignment padding and the frame record are 95 * not taken into account. This limit is not a guarantee and is 96 * NOT ABI. 97 */ 98 #define SIGFRAME_MAXSZ SZ_256K 99 100 static int __sigframe_alloc(struct rt_sigframe_user_layout *user, 101 unsigned long *offset, size_t size, bool extend) 102 { 103 size_t padded_size = round_up(size, 16); 104 105 if (padded_size > user->limit - user->size && 106 !user->extra_offset && 107 extend) { 108 int ret; 109 110 user->limit += EXTRA_CONTEXT_SIZE; 111 ret = __sigframe_alloc(user, &user->extra_offset, 112 sizeof(struct extra_context), false); 113 if (ret) { 114 user->limit -= EXTRA_CONTEXT_SIZE; 115 return ret; 116 } 117 118 /* Reserve space for the __reserved[] terminator */ 119 user->size += TERMINATOR_SIZE; 120 121 /* 122 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for 123 * the terminator: 124 */ 125 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE; 126 } 127 128 /* Still not enough space? Bad luck! */ 129 if (padded_size > user->limit - user->size) 130 return -ENOMEM; 131 132 *offset = user->size; 133 user->size += padded_size; 134 135 return 0; 136 } 137 138 /* 139 * Allocate space for an optional record of <size> bytes in the user 140 * signal frame. The offset from the signal frame base address to the 141 * allocated block is assigned to *offset. 142 */ 143 static int sigframe_alloc(struct rt_sigframe_user_layout *user, 144 unsigned long *offset, size_t size) 145 { 146 return __sigframe_alloc(user, offset, size, true); 147 } 148 149 /* Allocate the null terminator record and prevent further allocations */ 150 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user) 151 { 152 int ret; 153 154 /* Un-reserve the space reserved for the terminator: */ 155 user->limit += TERMINATOR_SIZE; 156 157 ret = sigframe_alloc(user, &user->end_offset, 158 sizeof(struct _aarch64_ctx)); 159 if (ret) 160 return ret; 161 162 /* Prevent further allocation: */ 163 user->limit = user->size; 164 return 0; 165 } 166 167 static void __user *apply_user_offset( 168 struct rt_sigframe_user_layout const *user, unsigned long offset) 169 { 170 char __user *base = (char __user *)user->sigframe; 171 172 return base + offset; 173 } 174 175 struct user_ctxs { 176 struct fpsimd_context __user *fpsimd; 177 u32 fpsimd_size; 178 struct sve_context __user *sve; 179 u32 sve_size; 180 struct tpidr2_context __user *tpidr2; 181 u32 tpidr2_size; 182 struct za_context __user *za; 183 u32 za_size; 184 struct zt_context __user *zt; 185 u32 zt_size; 186 struct fpmr_context __user *fpmr; 187 u32 fpmr_size; 188 }; 189 190 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) 191 { 192 struct user_fpsimd_state const *fpsimd = 193 ¤t->thread.uw.fpsimd_state; 194 int err; 195 196 /* copy the FP and status/control registers */ 197 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); 198 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); 199 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); 200 201 /* copy the magic/size information */ 202 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); 203 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); 204 205 return err ? -EFAULT : 0; 206 } 207 208 static int restore_fpsimd_context(struct user_ctxs *user) 209 { 210 struct user_fpsimd_state fpsimd; 211 int err = 0; 212 213 /* check the size information */ 214 if (user->fpsimd_size != sizeof(struct fpsimd_context)) 215 return -EINVAL; 216 217 /* copy the FP and status/control registers */ 218 err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs), 219 sizeof(fpsimd.vregs)); 220 __get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err); 221 __get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err); 222 223 clear_thread_flag(TIF_SVE); 224 current->thread.fp_type = FP_STATE_FPSIMD; 225 226 /* load the hardware registers from the fpsimd_state structure */ 227 if (!err) 228 fpsimd_update_current_state(&fpsimd); 229 230 return err ? -EFAULT : 0; 231 } 232 233 static int preserve_fpmr_context(struct fpmr_context __user *ctx) 234 { 235 int err = 0; 236 237 current->thread.uw.fpmr = read_sysreg_s(SYS_FPMR); 238 239 __put_user_error(FPMR_MAGIC, &ctx->head.magic, err); 240 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 241 __put_user_error(current->thread.uw.fpmr, &ctx->fpmr, err); 242 243 return err; 244 } 245 246 static int restore_fpmr_context(struct user_ctxs *user) 247 { 248 u64 fpmr; 249 int err = 0; 250 251 if (user->fpmr_size != sizeof(*user->fpmr)) 252 return -EINVAL; 253 254 __get_user_error(fpmr, &user->fpmr->fpmr, err); 255 if (!err) 256 write_sysreg_s(fpmr, SYS_FPMR); 257 258 return err; 259 } 260 261 #ifdef CONFIG_ARM64_SVE 262 263 static int preserve_sve_context(struct sve_context __user *ctx) 264 { 265 int err = 0; 266 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 267 u16 flags = 0; 268 unsigned int vl = task_get_sve_vl(current); 269 unsigned int vq = 0; 270 271 if (thread_sm_enabled(¤t->thread)) { 272 vl = task_get_sme_vl(current); 273 vq = sve_vq_from_vl(vl); 274 flags |= SVE_SIG_FLAG_SM; 275 } else if (current->thread.fp_type == FP_STATE_SVE) { 276 vq = sve_vq_from_vl(vl); 277 } 278 279 memset(reserved, 0, sizeof(reserved)); 280 281 __put_user_error(SVE_MAGIC, &ctx->head.magic, err); 282 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16), 283 &ctx->head.size, err); 284 __put_user_error(vl, &ctx->vl, err); 285 __put_user_error(flags, &ctx->flags, err); 286 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 287 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 288 289 if (vq) { 290 /* 291 * This assumes that the SVE state has already been saved to 292 * the task struct by calling the function 293 * fpsimd_signal_preserve_current_state(). 294 */ 295 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET, 296 current->thread.sve_state, 297 SVE_SIG_REGS_SIZE(vq)); 298 } 299 300 return err ? -EFAULT : 0; 301 } 302 303 static int restore_sve_fpsimd_context(struct user_ctxs *user) 304 { 305 int err = 0; 306 unsigned int vl, vq; 307 struct user_fpsimd_state fpsimd; 308 u16 user_vl, flags; 309 310 if (user->sve_size < sizeof(*user->sve)) 311 return -EINVAL; 312 313 __get_user_error(user_vl, &(user->sve->vl), err); 314 __get_user_error(flags, &(user->sve->flags), err); 315 if (err) 316 return err; 317 318 if (flags & SVE_SIG_FLAG_SM) { 319 if (!system_supports_sme()) 320 return -EINVAL; 321 322 vl = task_get_sme_vl(current); 323 } else { 324 /* 325 * A SME only system use SVE for streaming mode so can 326 * have a SVE formatted context with a zero VL and no 327 * payload data. 328 */ 329 if (!system_supports_sve() && !system_supports_sme()) 330 return -EINVAL; 331 332 vl = task_get_sve_vl(current); 333 } 334 335 if (user_vl != vl) 336 return -EINVAL; 337 338 if (user->sve_size == sizeof(*user->sve)) { 339 clear_thread_flag(TIF_SVE); 340 current->thread.svcr &= ~SVCR_SM_MASK; 341 current->thread.fp_type = FP_STATE_FPSIMD; 342 goto fpsimd_only; 343 } 344 345 vq = sve_vq_from_vl(vl); 346 347 if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq)) 348 return -EINVAL; 349 350 /* 351 * Careful: we are about __copy_from_user() directly into 352 * thread.sve_state with preemption enabled, so protection is 353 * needed to prevent a racing context switch from writing stale 354 * registers back over the new data. 355 */ 356 357 fpsimd_flush_task_state(current); 358 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 359 360 sve_alloc(current, true); 361 if (!current->thread.sve_state) { 362 clear_thread_flag(TIF_SVE); 363 return -ENOMEM; 364 } 365 366 err = __copy_from_user(current->thread.sve_state, 367 (char __user const *)user->sve + 368 SVE_SIG_REGS_OFFSET, 369 SVE_SIG_REGS_SIZE(vq)); 370 if (err) 371 return -EFAULT; 372 373 if (flags & SVE_SIG_FLAG_SM) 374 current->thread.svcr |= SVCR_SM_MASK; 375 else 376 set_thread_flag(TIF_SVE); 377 current->thread.fp_type = FP_STATE_SVE; 378 379 fpsimd_only: 380 /* copy the FP and status/control registers */ 381 /* restore_sigframe() already checked that user->fpsimd != NULL. */ 382 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs, 383 sizeof(fpsimd.vregs)); 384 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err); 385 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err); 386 387 /* load the hardware registers from the fpsimd_state structure */ 388 if (!err) 389 fpsimd_update_current_state(&fpsimd); 390 391 return err ? -EFAULT : 0; 392 } 393 394 #else /* ! CONFIG_ARM64_SVE */ 395 396 static int restore_sve_fpsimd_context(struct user_ctxs *user) 397 { 398 WARN_ON_ONCE(1); 399 return -EINVAL; 400 } 401 402 /* Turn any non-optimised out attempts to use this into a link error: */ 403 extern int preserve_sve_context(void __user *ctx); 404 405 #endif /* ! CONFIG_ARM64_SVE */ 406 407 #ifdef CONFIG_ARM64_SME 408 409 static int preserve_tpidr2_context(struct tpidr2_context __user *ctx) 410 { 411 int err = 0; 412 413 current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0); 414 415 __put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err); 416 __put_user_error(sizeof(*ctx), &ctx->head.size, err); 417 __put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err); 418 419 return err; 420 } 421 422 static int restore_tpidr2_context(struct user_ctxs *user) 423 { 424 u64 tpidr2_el0; 425 int err = 0; 426 427 if (user->tpidr2_size != sizeof(*user->tpidr2)) 428 return -EINVAL; 429 430 __get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err); 431 if (!err) 432 write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0); 433 434 return err; 435 } 436 437 static int preserve_za_context(struct za_context __user *ctx) 438 { 439 int err = 0; 440 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 441 unsigned int vl = task_get_sme_vl(current); 442 unsigned int vq; 443 444 if (thread_za_enabled(¤t->thread)) 445 vq = sve_vq_from_vl(vl); 446 else 447 vq = 0; 448 449 memset(reserved, 0, sizeof(reserved)); 450 451 __put_user_error(ZA_MAGIC, &ctx->head.magic, err); 452 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16), 453 &ctx->head.size, err); 454 __put_user_error(vl, &ctx->vl, err); 455 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 456 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 457 458 if (vq) { 459 /* 460 * This assumes that the ZA state has already been saved to 461 * the task struct by calling the function 462 * fpsimd_signal_preserve_current_state(). 463 */ 464 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET, 465 current->thread.sme_state, 466 ZA_SIG_REGS_SIZE(vq)); 467 } 468 469 return err ? -EFAULT : 0; 470 } 471 472 static int restore_za_context(struct user_ctxs *user) 473 { 474 int err = 0; 475 unsigned int vq; 476 u16 user_vl; 477 478 if (user->za_size < sizeof(*user->za)) 479 return -EINVAL; 480 481 __get_user_error(user_vl, &(user->za->vl), err); 482 if (err) 483 return err; 484 485 if (user_vl != task_get_sme_vl(current)) 486 return -EINVAL; 487 488 if (user->za_size == sizeof(*user->za)) { 489 current->thread.svcr &= ~SVCR_ZA_MASK; 490 return 0; 491 } 492 493 vq = sve_vq_from_vl(user_vl); 494 495 if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq)) 496 return -EINVAL; 497 498 /* 499 * Careful: we are about __copy_from_user() directly into 500 * thread.sme_state with preemption enabled, so protection is 501 * needed to prevent a racing context switch from writing stale 502 * registers back over the new data. 503 */ 504 505 fpsimd_flush_task_state(current); 506 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ 507 508 sme_alloc(current, true); 509 if (!current->thread.sme_state) { 510 current->thread.svcr &= ~SVCR_ZA_MASK; 511 clear_thread_flag(TIF_SME); 512 return -ENOMEM; 513 } 514 515 err = __copy_from_user(current->thread.sme_state, 516 (char __user const *)user->za + 517 ZA_SIG_REGS_OFFSET, 518 ZA_SIG_REGS_SIZE(vq)); 519 if (err) 520 return -EFAULT; 521 522 set_thread_flag(TIF_SME); 523 current->thread.svcr |= SVCR_ZA_MASK; 524 525 return 0; 526 } 527 528 static int preserve_zt_context(struct zt_context __user *ctx) 529 { 530 int err = 0; 531 u16 reserved[ARRAY_SIZE(ctx->__reserved)]; 532 533 if (WARN_ON(!thread_za_enabled(¤t->thread))) 534 return -EINVAL; 535 536 memset(reserved, 0, sizeof(reserved)); 537 538 __put_user_error(ZT_MAGIC, &ctx->head.magic, err); 539 __put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16), 540 &ctx->head.size, err); 541 __put_user_error(1, &ctx->nregs, err); 542 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved)); 543 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved)); 544 545 /* 546 * This assumes that the ZT state has already been saved to 547 * the task struct by calling the function 548 * fpsimd_signal_preserve_current_state(). 549 */ 550 err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET, 551 thread_zt_state(¤t->thread), 552 ZT_SIG_REGS_SIZE(1)); 553 554 return err ? -EFAULT : 0; 555 } 556 557 static int restore_zt_context(struct user_ctxs *user) 558 { 559 int err; 560 u16 nregs; 561 562 /* ZA must be restored first for this check to be valid */ 563 if (!thread_za_enabled(¤t->thread)) 564 return -EINVAL; 565 566 if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1)) 567 return -EINVAL; 568 569 if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs))) 570 return -EFAULT; 571 572 if (nregs != 1) 573 return -EINVAL; 574 575 /* 576 * Careful: we are about __copy_from_user() directly into 577 * thread.zt_state with preemption enabled, so protection is 578 * needed to prevent a racing context switch from writing stale 579 * registers back over the new data. 580 */ 581 582 fpsimd_flush_task_state(current); 583 /* From now, fpsimd_thread_switch() won't touch ZT in thread state */ 584 585 err = __copy_from_user(thread_zt_state(¤t->thread), 586 (char __user const *)user->zt + 587 ZT_SIG_REGS_OFFSET, 588 ZT_SIG_REGS_SIZE(1)); 589 if (err) 590 return -EFAULT; 591 592 return 0; 593 } 594 595 #else /* ! CONFIG_ARM64_SME */ 596 597 /* Turn any non-optimised out attempts to use these into a link error: */ 598 extern int preserve_tpidr2_context(void __user *ctx); 599 extern int restore_tpidr2_context(struct user_ctxs *user); 600 extern int preserve_za_context(void __user *ctx); 601 extern int restore_za_context(struct user_ctxs *user); 602 extern int preserve_zt_context(void __user *ctx); 603 extern int restore_zt_context(struct user_ctxs *user); 604 605 #endif /* ! CONFIG_ARM64_SME */ 606 607 static int parse_user_sigframe(struct user_ctxs *user, 608 struct rt_sigframe __user *sf) 609 { 610 struct sigcontext __user *const sc = &sf->uc.uc_mcontext; 611 struct _aarch64_ctx __user *head; 612 char __user *base = (char __user *)&sc->__reserved; 613 size_t offset = 0; 614 size_t limit = sizeof(sc->__reserved); 615 bool have_extra_context = false; 616 char const __user *const sfp = (char const __user *)sf; 617 618 user->fpsimd = NULL; 619 user->sve = NULL; 620 user->tpidr2 = NULL; 621 user->za = NULL; 622 user->zt = NULL; 623 user->fpmr = NULL; 624 625 if (!IS_ALIGNED((unsigned long)base, 16)) 626 goto invalid; 627 628 while (1) { 629 int err = 0; 630 u32 magic, size; 631 char const __user *userp; 632 struct extra_context const __user *extra; 633 u64 extra_datap; 634 u32 extra_size; 635 struct _aarch64_ctx const __user *end; 636 u32 end_magic, end_size; 637 638 if (limit - offset < sizeof(*head)) 639 goto invalid; 640 641 if (!IS_ALIGNED(offset, 16)) 642 goto invalid; 643 644 head = (struct _aarch64_ctx __user *)(base + offset); 645 __get_user_error(magic, &head->magic, err); 646 __get_user_error(size, &head->size, err); 647 if (err) 648 return err; 649 650 if (limit - offset < size) 651 goto invalid; 652 653 switch (magic) { 654 case 0: 655 if (size) 656 goto invalid; 657 658 goto done; 659 660 case FPSIMD_MAGIC: 661 if (!system_supports_fpsimd()) 662 goto invalid; 663 if (user->fpsimd) 664 goto invalid; 665 666 user->fpsimd = (struct fpsimd_context __user *)head; 667 user->fpsimd_size = size; 668 break; 669 670 case ESR_MAGIC: 671 /* ignore */ 672 break; 673 674 case SVE_MAGIC: 675 if (!system_supports_sve() && !system_supports_sme()) 676 goto invalid; 677 678 if (user->sve) 679 goto invalid; 680 681 user->sve = (struct sve_context __user *)head; 682 user->sve_size = size; 683 break; 684 685 case TPIDR2_MAGIC: 686 if (!system_supports_tpidr2()) 687 goto invalid; 688 689 if (user->tpidr2) 690 goto invalid; 691 692 user->tpidr2 = (struct tpidr2_context __user *)head; 693 user->tpidr2_size = size; 694 break; 695 696 case ZA_MAGIC: 697 if (!system_supports_sme()) 698 goto invalid; 699 700 if (user->za) 701 goto invalid; 702 703 user->za = (struct za_context __user *)head; 704 user->za_size = size; 705 break; 706 707 case ZT_MAGIC: 708 if (!system_supports_sme2()) 709 goto invalid; 710 711 if (user->zt) 712 goto invalid; 713 714 user->zt = (struct zt_context __user *)head; 715 user->zt_size = size; 716 break; 717 718 case FPMR_MAGIC: 719 if (!system_supports_fpmr()) 720 goto invalid; 721 722 if (user->fpmr) 723 goto invalid; 724 725 user->fpmr = (struct fpmr_context __user *)head; 726 user->fpmr_size = size; 727 break; 728 729 case EXTRA_MAGIC: 730 if (have_extra_context) 731 goto invalid; 732 733 if (size < sizeof(*extra)) 734 goto invalid; 735 736 userp = (char const __user *)head; 737 738 extra = (struct extra_context const __user *)userp; 739 userp += size; 740 741 __get_user_error(extra_datap, &extra->datap, err); 742 __get_user_error(extra_size, &extra->size, err); 743 if (err) 744 return err; 745 746 /* Check for the dummy terminator in __reserved[]: */ 747 748 if (limit - offset - size < TERMINATOR_SIZE) 749 goto invalid; 750 751 end = (struct _aarch64_ctx const __user *)userp; 752 userp += TERMINATOR_SIZE; 753 754 __get_user_error(end_magic, &end->magic, err); 755 __get_user_error(end_size, &end->size, err); 756 if (err) 757 return err; 758 759 if (end_magic || end_size) 760 goto invalid; 761 762 /* Prevent looping/repeated parsing of extra_context */ 763 have_extra_context = true; 764 765 base = (__force void __user *)extra_datap; 766 if (!IS_ALIGNED((unsigned long)base, 16)) 767 goto invalid; 768 769 if (!IS_ALIGNED(extra_size, 16)) 770 goto invalid; 771 772 if (base != userp) 773 goto invalid; 774 775 /* Reject "unreasonably large" frames: */ 776 if (extra_size > sfp + SIGFRAME_MAXSZ - userp) 777 goto invalid; 778 779 /* 780 * Ignore trailing terminator in __reserved[] 781 * and start parsing extra data: 782 */ 783 offset = 0; 784 limit = extra_size; 785 786 if (!access_ok(base, limit)) 787 goto invalid; 788 789 continue; 790 791 default: 792 goto invalid; 793 } 794 795 if (size < sizeof(*head)) 796 goto invalid; 797 798 if (limit - offset < size) 799 goto invalid; 800 801 offset += size; 802 } 803 804 done: 805 return 0; 806 807 invalid: 808 return -EINVAL; 809 } 810 811 static int restore_sigframe(struct pt_regs *regs, 812 struct rt_sigframe __user *sf) 813 { 814 sigset_t set; 815 int i, err; 816 struct user_ctxs user; 817 818 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 819 if (err == 0) 820 set_current_blocked(&set); 821 822 for (i = 0; i < 31; i++) 823 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 824 err); 825 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 826 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 827 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 828 829 /* 830 * Avoid sys_rt_sigreturn() restarting. 831 */ 832 forget_syscall(regs); 833 834 err |= !valid_user_regs(®s->user_regs, current); 835 if (err == 0) 836 err = parse_user_sigframe(&user, sf); 837 838 if (err == 0 && system_supports_fpsimd()) { 839 if (!user.fpsimd) 840 return -EINVAL; 841 842 if (user.sve) 843 err = restore_sve_fpsimd_context(&user); 844 else 845 err = restore_fpsimd_context(&user); 846 } 847 848 if (err == 0 && system_supports_tpidr2() && user.tpidr2) 849 err = restore_tpidr2_context(&user); 850 851 if (err == 0 && system_supports_fpmr() && user.fpmr) 852 err = restore_fpmr_context(&user); 853 854 if (err == 0 && system_supports_sme() && user.za) 855 err = restore_za_context(&user); 856 857 if (err == 0 && system_supports_sme2() && user.zt) 858 err = restore_zt_context(&user); 859 860 return err; 861 } 862 863 SYSCALL_DEFINE0(rt_sigreturn) 864 { 865 struct pt_regs *regs = current_pt_regs(); 866 struct rt_sigframe __user *frame; 867 868 /* Always make any pending restarted system calls return -EINTR */ 869 current->restart_block.fn = do_no_restart_syscall; 870 871 /* 872 * Since we stacked the signal on a 128-bit boundary, then 'sp' should 873 * be word aligned here. 874 */ 875 if (regs->sp & 15) 876 goto badframe; 877 878 frame = (struct rt_sigframe __user *)regs->sp; 879 880 if (!access_ok(frame, sizeof (*frame))) 881 goto badframe; 882 883 if (restore_sigframe(regs, frame)) 884 goto badframe; 885 886 if (restore_altstack(&frame->uc.uc_stack)) 887 goto badframe; 888 889 return regs->regs[0]; 890 891 badframe: 892 arm64_notify_segfault(regs->sp); 893 return 0; 894 } 895 896 /* 897 * Determine the layout of optional records in the signal frame 898 * 899 * add_all: if true, lays out the biggest possible signal frame for 900 * this task; otherwise, generates a layout for the current state 901 * of the task. 902 */ 903 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user, 904 bool add_all) 905 { 906 int err; 907 908 if (system_supports_fpsimd()) { 909 err = sigframe_alloc(user, &user->fpsimd_offset, 910 sizeof(struct fpsimd_context)); 911 if (err) 912 return err; 913 } 914 915 /* fault information, if valid */ 916 if (add_all || current->thread.fault_code) { 917 err = sigframe_alloc(user, &user->esr_offset, 918 sizeof(struct esr_context)); 919 if (err) 920 return err; 921 } 922 923 if (system_supports_sve() || system_supports_sme()) { 924 unsigned int vq = 0; 925 926 if (add_all || current->thread.fp_type == FP_STATE_SVE || 927 thread_sm_enabled(¤t->thread)) { 928 int vl = max(sve_max_vl(), sme_max_vl()); 929 930 if (!add_all) 931 vl = thread_get_cur_vl(¤t->thread); 932 933 vq = sve_vq_from_vl(vl); 934 } 935 936 err = sigframe_alloc(user, &user->sve_offset, 937 SVE_SIG_CONTEXT_SIZE(vq)); 938 if (err) 939 return err; 940 } 941 942 if (system_supports_tpidr2()) { 943 err = sigframe_alloc(user, &user->tpidr2_offset, 944 sizeof(struct tpidr2_context)); 945 if (err) 946 return err; 947 } 948 949 if (system_supports_sme()) { 950 unsigned int vl; 951 unsigned int vq = 0; 952 953 if (add_all) 954 vl = sme_max_vl(); 955 else 956 vl = task_get_sme_vl(current); 957 958 if (thread_za_enabled(¤t->thread)) 959 vq = sve_vq_from_vl(vl); 960 961 err = sigframe_alloc(user, &user->za_offset, 962 ZA_SIG_CONTEXT_SIZE(vq)); 963 if (err) 964 return err; 965 } 966 967 if (system_supports_sme2()) { 968 if (add_all || thread_za_enabled(¤t->thread)) { 969 err = sigframe_alloc(user, &user->zt_offset, 970 ZT_SIG_CONTEXT_SIZE(1)); 971 if (err) 972 return err; 973 } 974 } 975 976 if (system_supports_fpmr()) { 977 err = sigframe_alloc(user, &user->fpmr_offset, 978 sizeof(struct fpmr_context)); 979 if (err) 980 return err; 981 } 982 983 return sigframe_alloc_end(user); 984 } 985 986 static int setup_sigframe(struct rt_sigframe_user_layout *user, 987 struct pt_regs *regs, sigset_t *set) 988 { 989 int i, err = 0; 990 struct rt_sigframe __user *sf = user->sigframe; 991 992 /* set up the stack frame for unwinding */ 993 __put_user_error(regs->regs[29], &user->next_frame->fp, err); 994 __put_user_error(regs->regs[30], &user->next_frame->lr, err); 995 996 for (i = 0; i < 31; i++) 997 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], 998 err); 999 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); 1000 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); 1001 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); 1002 1003 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); 1004 1005 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); 1006 1007 if (err == 0 && system_supports_fpsimd()) { 1008 struct fpsimd_context __user *fpsimd_ctx = 1009 apply_user_offset(user, user->fpsimd_offset); 1010 err |= preserve_fpsimd_context(fpsimd_ctx); 1011 } 1012 1013 /* fault information, if valid */ 1014 if (err == 0 && user->esr_offset) { 1015 struct esr_context __user *esr_ctx = 1016 apply_user_offset(user, user->esr_offset); 1017 1018 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err); 1019 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err); 1020 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err); 1021 } 1022 1023 /* Scalable Vector Extension state (including streaming), if present */ 1024 if ((system_supports_sve() || system_supports_sme()) && 1025 err == 0 && user->sve_offset) { 1026 struct sve_context __user *sve_ctx = 1027 apply_user_offset(user, user->sve_offset); 1028 err |= preserve_sve_context(sve_ctx); 1029 } 1030 1031 /* TPIDR2 if supported */ 1032 if (system_supports_tpidr2() && err == 0) { 1033 struct tpidr2_context __user *tpidr2_ctx = 1034 apply_user_offset(user, user->tpidr2_offset); 1035 err |= preserve_tpidr2_context(tpidr2_ctx); 1036 } 1037 1038 /* FPMR if supported */ 1039 if (system_supports_fpmr() && err == 0) { 1040 struct fpmr_context __user *fpmr_ctx = 1041 apply_user_offset(user, user->fpmr_offset); 1042 err |= preserve_fpmr_context(fpmr_ctx); 1043 } 1044 1045 /* ZA state if present */ 1046 if (system_supports_sme() && err == 0 && user->za_offset) { 1047 struct za_context __user *za_ctx = 1048 apply_user_offset(user, user->za_offset); 1049 err |= preserve_za_context(za_ctx); 1050 } 1051 1052 /* ZT state if present */ 1053 if (system_supports_sme2() && err == 0 && user->zt_offset) { 1054 struct zt_context __user *zt_ctx = 1055 apply_user_offset(user, user->zt_offset); 1056 err |= preserve_zt_context(zt_ctx); 1057 } 1058 1059 if (err == 0 && user->extra_offset) { 1060 char __user *sfp = (char __user *)user->sigframe; 1061 char __user *userp = 1062 apply_user_offset(user, user->extra_offset); 1063 1064 struct extra_context __user *extra; 1065 struct _aarch64_ctx __user *end; 1066 u64 extra_datap; 1067 u32 extra_size; 1068 1069 extra = (struct extra_context __user *)userp; 1070 userp += EXTRA_CONTEXT_SIZE; 1071 1072 end = (struct _aarch64_ctx __user *)userp; 1073 userp += TERMINATOR_SIZE; 1074 1075 /* 1076 * extra_datap is just written to the signal frame. 1077 * The value gets cast back to a void __user * 1078 * during sigreturn. 1079 */ 1080 extra_datap = (__force u64)userp; 1081 extra_size = sfp + round_up(user->size, 16) - userp; 1082 1083 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err); 1084 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err); 1085 __put_user_error(extra_datap, &extra->datap, err); 1086 __put_user_error(extra_size, &extra->size, err); 1087 1088 /* Add the terminator */ 1089 __put_user_error(0, &end->magic, err); 1090 __put_user_error(0, &end->size, err); 1091 } 1092 1093 /* set the "end" magic */ 1094 if (err == 0) { 1095 struct _aarch64_ctx __user *end = 1096 apply_user_offset(user, user->end_offset); 1097 1098 __put_user_error(0, &end->magic, err); 1099 __put_user_error(0, &end->size, err); 1100 } 1101 1102 return err; 1103 } 1104 1105 static int get_sigframe(struct rt_sigframe_user_layout *user, 1106 struct ksignal *ksig, struct pt_regs *regs) 1107 { 1108 unsigned long sp, sp_top; 1109 int err; 1110 1111 init_user_layout(user); 1112 err = setup_sigframe_layout(user, false); 1113 if (err) 1114 return err; 1115 1116 sp = sp_top = sigsp(regs->sp, ksig); 1117 1118 sp = round_down(sp - sizeof(struct frame_record), 16); 1119 user->next_frame = (struct frame_record __user *)sp; 1120 1121 sp = round_down(sp, 16) - sigframe_size(user); 1122 user->sigframe = (struct rt_sigframe __user *)sp; 1123 1124 /* 1125 * Check that we can actually write to the signal frame. 1126 */ 1127 if (!access_ok(user->sigframe, sp_top - sp)) 1128 return -EFAULT; 1129 1130 return 0; 1131 } 1132 1133 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, 1134 struct rt_sigframe_user_layout *user, int usig) 1135 { 1136 __sigrestore_t sigtramp; 1137 1138 regs->regs[0] = usig; 1139 regs->sp = (unsigned long)user->sigframe; 1140 regs->regs[29] = (unsigned long)&user->next_frame->fp; 1141 regs->pc = (unsigned long)ka->sa.sa_handler; 1142 1143 /* 1144 * Signal delivery is a (wacky) indirect function call in 1145 * userspace, so simulate the same setting of BTYPE as a BLR 1146 * <register containing the signal handler entry point>. 1147 * Signal delivery to a location in a PROT_BTI guarded page 1148 * that is not a function entry point will now trigger a 1149 * SIGILL in userspace. 1150 * 1151 * If the signal handler entry point is not in a PROT_BTI 1152 * guarded page, this is harmless. 1153 */ 1154 if (system_supports_bti()) { 1155 regs->pstate &= ~PSR_BTYPE_MASK; 1156 regs->pstate |= PSR_BTYPE_C; 1157 } 1158 1159 /* TCO (Tag Check Override) always cleared for signal handlers */ 1160 regs->pstate &= ~PSR_TCO_BIT; 1161 1162 /* Signal handlers are invoked with ZA and streaming mode disabled */ 1163 if (system_supports_sme()) { 1164 /* 1165 * If we were in streaming mode the saved register 1166 * state was SVE but we will exit SM and use the 1167 * FPSIMD register state - flush the saved FPSIMD 1168 * register state in case it gets loaded. 1169 */ 1170 if (current->thread.svcr & SVCR_SM_MASK) { 1171 memset(¤t->thread.uw.fpsimd_state, 0, 1172 sizeof(current->thread.uw.fpsimd_state)); 1173 current->thread.fp_type = FP_STATE_FPSIMD; 1174 } 1175 1176 current->thread.svcr &= ~(SVCR_ZA_MASK | 1177 SVCR_SM_MASK); 1178 sme_smstop(); 1179 } 1180 1181 if (ka->sa.sa_flags & SA_RESTORER) 1182 sigtramp = ka->sa.sa_restorer; 1183 else 1184 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); 1185 1186 regs->regs[30] = (unsigned long)sigtramp; 1187 } 1188 1189 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, 1190 struct pt_regs *regs) 1191 { 1192 struct rt_sigframe_user_layout user; 1193 struct rt_sigframe __user *frame; 1194 int err = 0; 1195 1196 fpsimd_signal_preserve_current_state(); 1197 1198 if (get_sigframe(&user, ksig, regs)) 1199 return 1; 1200 1201 frame = user.sigframe; 1202 1203 __put_user_error(0, &frame->uc.uc_flags, err); 1204 __put_user_error(NULL, &frame->uc.uc_link, err); 1205 1206 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 1207 err |= setup_sigframe(&user, regs, set); 1208 if (err == 0) { 1209 setup_return(regs, &ksig->ka, &user, usig); 1210 if (ksig->ka.sa.sa_flags & SA_SIGINFO) { 1211 err |= copy_siginfo_to_user(&frame->info, &ksig->info); 1212 regs->regs[1] = (unsigned long)&frame->info; 1213 regs->regs[2] = (unsigned long)&frame->uc; 1214 } 1215 } 1216 1217 return err; 1218 } 1219 1220 static void setup_restart_syscall(struct pt_regs *regs) 1221 { 1222 if (is_compat_task()) 1223 compat_setup_restart_syscall(regs); 1224 else 1225 regs->regs[8] = __NR_restart_syscall; 1226 } 1227 1228 /* 1229 * OK, we're invoking a handler 1230 */ 1231 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) 1232 { 1233 sigset_t *oldset = sigmask_to_save(); 1234 int usig = ksig->sig; 1235 int ret; 1236 1237 rseq_signal_deliver(ksig, regs); 1238 1239 /* 1240 * Set up the stack frame 1241 */ 1242 if (is_compat_task()) { 1243 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 1244 ret = compat_setup_rt_frame(usig, ksig, oldset, regs); 1245 else 1246 ret = compat_setup_frame(usig, ksig, oldset, regs); 1247 } else { 1248 ret = setup_rt_frame(usig, ksig, oldset, regs); 1249 } 1250 1251 /* 1252 * Check that the resulting registers are actually sane. 1253 */ 1254 ret |= !valid_user_regs(®s->user_regs, current); 1255 1256 /* Step into the signal handler if we are stepping */ 1257 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); 1258 } 1259 1260 /* 1261 * Note that 'init' is a special process: it doesn't get signals it doesn't 1262 * want to handle. Thus you cannot kill init even with a SIGKILL even by 1263 * mistake. 1264 * 1265 * Note that we go through the signals twice: once to check the signals that 1266 * the kernel can handle, and then we build all the user-level signal handling 1267 * stack-frames in one go after that. 1268 */ 1269 void do_signal(struct pt_regs *regs) 1270 { 1271 unsigned long continue_addr = 0, restart_addr = 0; 1272 int retval = 0; 1273 struct ksignal ksig; 1274 bool syscall = in_syscall(regs); 1275 1276 /* 1277 * If we were from a system call, check for system call restarting... 1278 */ 1279 if (syscall) { 1280 continue_addr = regs->pc; 1281 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); 1282 retval = regs->regs[0]; 1283 1284 /* 1285 * Avoid additional syscall restarting via ret_to_user. 1286 */ 1287 forget_syscall(regs); 1288 1289 /* 1290 * Prepare for system call restart. We do this here so that a 1291 * debugger will see the already changed PC. 1292 */ 1293 switch (retval) { 1294 case -ERESTARTNOHAND: 1295 case -ERESTARTSYS: 1296 case -ERESTARTNOINTR: 1297 case -ERESTART_RESTARTBLOCK: 1298 regs->regs[0] = regs->orig_x0; 1299 regs->pc = restart_addr; 1300 break; 1301 } 1302 } 1303 1304 /* 1305 * Get the signal to deliver. When running under ptrace, at this point 1306 * the debugger may change all of our registers. 1307 */ 1308 if (get_signal(&ksig)) { 1309 /* 1310 * Depending on the signal settings, we may need to revert the 1311 * decision to restart the system call, but skip this if a 1312 * debugger has chosen to restart at a different PC. 1313 */ 1314 if (regs->pc == restart_addr && 1315 (retval == -ERESTARTNOHAND || 1316 retval == -ERESTART_RESTARTBLOCK || 1317 (retval == -ERESTARTSYS && 1318 !(ksig.ka.sa.sa_flags & SA_RESTART)))) { 1319 syscall_set_return_value(current, regs, -EINTR, 0); 1320 regs->pc = continue_addr; 1321 } 1322 1323 handle_signal(&ksig, regs); 1324 return; 1325 } 1326 1327 /* 1328 * Handle restarting a different system call. As above, if a debugger 1329 * has chosen to restart at a different PC, ignore the restart. 1330 */ 1331 if (syscall && regs->pc == restart_addr) { 1332 if (retval == -ERESTART_RESTARTBLOCK) 1333 setup_restart_syscall(regs); 1334 user_rewind_single_step(current); 1335 } 1336 1337 restore_saved_sigmask(); 1338 } 1339 1340 unsigned long __ro_after_init signal_minsigstksz; 1341 1342 /* 1343 * Determine the stack space required for guaranteed signal devliery. 1344 * This function is used to populate AT_MINSIGSTKSZ at process startup. 1345 * cpufeatures setup is assumed to be complete. 1346 */ 1347 void __init minsigstksz_setup(void) 1348 { 1349 struct rt_sigframe_user_layout user; 1350 1351 init_user_layout(&user); 1352 1353 /* 1354 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't 1355 * be big enough, but it's our best guess: 1356 */ 1357 if (WARN_ON(setup_sigframe_layout(&user, true))) 1358 return; 1359 1360 signal_minsigstksz = sigframe_size(&user) + 1361 round_up(sizeof(struct frame_record), 16) + 1362 16; /* max alignment padding */ 1363 } 1364 1365 /* 1366 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as 1367 * changes likely come with new fields that should be added below. 1368 */ 1369 static_assert(NSIGILL == 11); 1370 static_assert(NSIGFPE == 15); 1371 static_assert(NSIGSEGV == 10); 1372 static_assert(NSIGBUS == 5); 1373 static_assert(NSIGTRAP == 6); 1374 static_assert(NSIGCHLD == 6); 1375 static_assert(NSIGSYS == 2); 1376 static_assert(sizeof(siginfo_t) == 128); 1377 static_assert(__alignof__(siginfo_t) == 8); 1378 static_assert(offsetof(siginfo_t, si_signo) == 0x00); 1379 static_assert(offsetof(siginfo_t, si_errno) == 0x04); 1380 static_assert(offsetof(siginfo_t, si_code) == 0x08); 1381 static_assert(offsetof(siginfo_t, si_pid) == 0x10); 1382 static_assert(offsetof(siginfo_t, si_uid) == 0x14); 1383 static_assert(offsetof(siginfo_t, si_tid) == 0x10); 1384 static_assert(offsetof(siginfo_t, si_overrun) == 0x14); 1385 static_assert(offsetof(siginfo_t, si_status) == 0x18); 1386 static_assert(offsetof(siginfo_t, si_utime) == 0x20); 1387 static_assert(offsetof(siginfo_t, si_stime) == 0x28); 1388 static_assert(offsetof(siginfo_t, si_value) == 0x18); 1389 static_assert(offsetof(siginfo_t, si_int) == 0x18); 1390 static_assert(offsetof(siginfo_t, si_ptr) == 0x18); 1391 static_assert(offsetof(siginfo_t, si_addr) == 0x10); 1392 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18); 1393 static_assert(offsetof(siginfo_t, si_lower) == 0x20); 1394 static_assert(offsetof(siginfo_t, si_upper) == 0x28); 1395 static_assert(offsetof(siginfo_t, si_pkey) == 0x20); 1396 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18); 1397 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20); 1398 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24); 1399 static_assert(offsetof(siginfo_t, si_band) == 0x10); 1400 static_assert(offsetof(siginfo_t, si_fd) == 0x18); 1401 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10); 1402 static_assert(offsetof(siginfo_t, si_syscall) == 0x18); 1403 static_assert(offsetof(siginfo_t, si_arch) == 0x1c); 1404
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.