1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Copyright (C) 2009 Sunplus Core Technology !! 2 * This file is subject to the terms and conditions of the GNU General Public 4 * Chen Liqin <liqin.chen@sunplusct.com> !! 3 * License. See the file "COPYING" in the main directory of this archive 5 * Lennox Wu <lennox.wu@sunplusct.com> !! 4 * for more details. 6 * Copyright (C) 2012 Regents of the Universit !! 5 * 7 * Copyright (C) 2017 SiFive !! 6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. >> 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) >> 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. >> 9 * Copyright (C) 2004 Thiemo Seufer >> 10 * Copyright (C) 2013 Imagination Technologies Ltd. 8 */ 11 */ 9 !! 12 #include <linux/errno.h> 10 #include <linux/cpu.h> << 11 #include <linux/kernel.h> << 12 #include <linux/sched.h> 13 #include <linux/sched.h> 13 #include <linux/sched/debug.h> << 14 #include <linux/sched/task_stack.h> << 15 #include <linux/tick.h> 14 #include <linux/tick.h> >> 15 #include <linux/kernel.h> >> 16 #include <linux/mm.h> >> 17 #include <linux/stddef.h> >> 18 #include <linux/unistd.h> >> 19 #include <linux/export.h> 16 #include <linux/ptrace.h> 20 #include <linux/ptrace.h> 17 #include <linux/uaccess.h> !! 21 #include <linux/mman.h> 18 #include <linux/personality.h> 22 #include <linux/personality.h> 19 !! 23 #include <linux/sys.h> 20 #include <asm/unistd.h> !! 24 #include <linux/init.h> >> 25 #include <linux/completion.h> >> 26 #include <linux/kallsyms.h> >> 27 #include <linux/random.h> >> 28 #include <linux/prctl.h> >> 29 >> 30 #include <asm/asm.h> >> 31 #include <asm/bootinfo.h> >> 32 #include <asm/cpu.h> >> 33 #include <asm/dsp.h> >> 34 #include <asm/fpu.h> >> 35 #include <asm/irq.h> >> 36 #include <asm/msa.h> >> 37 #include <asm/pgtable.h> >> 38 #include <asm/mipsregs.h> 21 #include <asm/processor.h> 39 #include <asm/processor.h> 22 #include <asm/csr.h> !! 40 #include <asm/reg.h> >> 41 #include <asm/uaccess.h> >> 42 #include <asm/io.h> >> 43 #include <asm/elf.h> >> 44 #include <asm/isadep.h> >> 45 #include <asm/inst.h> 23 #include <asm/stacktrace.h> 46 #include <asm/stacktrace.h> 24 #include <asm/string.h> !! 47 #include <asm/irq_regs.h> 25 #include <asm/switch_to.h> << 26 #include <asm/thread_info.h> << 27 #include <asm/cpuidle.h> << 28 #include <asm/vector.h> << 29 #include <asm/cpufeature.h> << 30 #include <asm/exec.h> << 31 48 32 #if defined(CONFIG_STACKPROTECTOR) && !defined !! 49 #ifdef CONFIG_HOTPLUG_CPU 33 #include <linux/stackprotector.h> !! 50 void arch_cpu_idle_dead(void) 34 unsigned long __stack_chk_guard __read_mostly; !! 51 { 35 EXPORT_SYMBOL(__stack_chk_guard); !! 52 play_dead(); >> 53 } 36 #endif 54 #endif 37 55 38 extern asmlinkage void ret_from_fork(void); !! 56 asmlinkage void ret_from_fork(void); >> 57 asmlinkage void ret_from_kernel_thread(void); 39 58 40 void noinstr arch_cpu_idle(void) !! 59 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 41 { 60 { 42 cpu_do_idle(); !! 61 unsigned long status; >> 62 >> 63 /* New thread loses kernel privileges. */ >> 64 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); >> 65 status |= KU_USER; >> 66 regs->cp0_status = status; >> 67 clear_used_math(); >> 68 clear_fpu_owner(); >> 69 init_dsp(); >> 70 clear_thread_flag(TIF_USEDMSA); >> 71 clear_thread_flag(TIF_MSA_CTX_LIVE); >> 72 disable_msa(); >> 73 regs->cp0_epc = pc; >> 74 regs->regs[29] = sp; 43 } 75 } 44 76 45 int set_unalign_ctl(struct task_struct *tsk, u !! 77 void exit_thread(void) 46 { 78 { 47 if (!unaligned_ctl_available()) !! 79 } 48 return -EINVAL; !! 80 >> 81 void flush_thread(void) >> 82 { >> 83 } >> 84 >> 85 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) >> 86 { >> 87 /* >> 88 * Save any process state which is live in hardware registers to the >> 89 * parent context prior to duplication. This prevents the new child >> 90 * state becoming stale if the parent is preempted before copy_thread() >> 91 * gets a chance to save the parent's live hardware registers to the >> 92 * child context. >> 93 */ >> 94 preempt_disable(); >> 95 >> 96 if (is_msa_enabled()) >> 97 save_msa(current); >> 98 else if (is_fpu_owner()) >> 99 _save_fp(current); 49 100 50 tsk->thread.align_ctl = val; !! 101 save_dsp(current); >> 102 >> 103 preempt_enable(); >> 104 >> 105 *dst = *src; 51 return 0; 106 return 0; 52 } 107 } 53 108 54 int get_unalign_ctl(struct task_struct *tsk, u !! 109 /* >> 110 * Copy architecture-specific thread state >> 111 */ >> 112 int copy_thread(unsigned long clone_flags, unsigned long usp, >> 113 unsigned long kthread_arg, struct task_struct *p) 55 { 114 { 56 if (!unaligned_ctl_available()) !! 115 struct thread_info *ti = task_thread_info(p); 57 return -EINVAL; !! 116 struct pt_regs *childregs, *regs = current_pt_regs(); >> 117 unsigned long childksp; >> 118 >> 119 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; >> 120 >> 121 /* set up new TSS. */ >> 122 childregs = (struct pt_regs *) childksp - 1; >> 123 /* Put the stack after the struct pt_regs. */ >> 124 childksp = (unsigned long) childregs; >> 125 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); >> 126 if (unlikely(p->flags & PF_KTHREAD)) { >> 127 /* kernel thread */ >> 128 unsigned long status = p->thread.cp0_status; >> 129 memset(childregs, 0, sizeof(struct pt_regs)); >> 130 ti->addr_limit = KERNEL_DS; >> 131 p->thread.reg16 = usp; /* fn */ >> 132 p->thread.reg17 = kthread_arg; >> 133 p->thread.reg29 = childksp; >> 134 p->thread.reg31 = (unsigned long) ret_from_kernel_thread; >> 135 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) >> 136 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | >> 137 ((status & (ST0_KUC | ST0_IEC)) << 2); >> 138 #else >> 139 status |= ST0_EXL; >> 140 #endif >> 141 childregs->cp0_status = status; >> 142 return 0; >> 143 } >> 144 >> 145 /* user thread */ >> 146 *childregs = *regs; >> 147 childregs->regs[7] = 0; /* Clear error flag */ >> 148 childregs->regs[2] = 0; /* Child gets zero as return value */ >> 149 if (usp) >> 150 childregs->regs[29] = usp; >> 151 ti->addr_limit = USER_DS; 58 152 59 return put_user(tsk->thread.align_ctl, !! 153 p->thread.reg29 = (unsigned long) childregs; >> 154 p->thread.reg31 = (unsigned long) ret_from_fork; >> 155 >> 156 /* >> 157 * New tasks lose permission to use the fpu. This accelerates context >> 158 * switching for most programs since they don't use the fpu. >> 159 */ >> 160 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); >> 161 >> 162 clear_tsk_thread_flag(p, TIF_USEDFPU); >> 163 clear_tsk_thread_flag(p, TIF_USEDMSA); >> 164 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); >> 165 >> 166 #ifdef CONFIG_MIPS_MT_FPAFF >> 167 clear_tsk_thread_flag(p, TIF_FPUBOUND); >> 168 #endif /* CONFIG_MIPS_MT_FPAFF */ >> 169 >> 170 if (clone_flags & CLONE_SETTLS) >> 171 ti->tp_value = regs->regs[7]; >> 172 >> 173 return 0; 60 } 174 } 61 175 62 void __show_regs(struct pt_regs *regs) !! 176 #ifdef CONFIG_CC_STACKPROTECTOR >> 177 #include <linux/stackprotector.h> >> 178 unsigned long __stack_chk_guard __read_mostly; >> 179 EXPORT_SYMBOL(__stack_chk_guard); >> 180 #endif >> 181 >> 182 struct mips_frame_info { >> 183 void *func; >> 184 unsigned long func_size; >> 185 int frame_size; >> 186 int pc_offset; >> 187 }; >> 188 >> 189 #define J_TARGET(pc,target) \ >> 190 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) >> 191 >> 192 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) 63 { 193 { 64 show_regs_print_info(KERN_DEFAULT); !! 194 #ifdef CONFIG_CPU_MICROMIPS >> 195 /* >> 196 * swsp ra,offset >> 197 * swm16 reglist,offset(sp) >> 198 * swm32 reglist,offset(sp) >> 199 * sw32 ra,offset(sp) >> 200 * jradiussp - NOT SUPPORTED >> 201 * >> 202 * microMIPS is way more fun... >> 203 */ >> 204 if (mm_insn_16bit(ip->halfword[1])) { >> 205 switch (ip->mm16_r5_format.opcode) { >> 206 case mm_swsp16_op: >> 207 if (ip->mm16_r5_format.rt != 31) >> 208 return 0; >> 209 >> 210 *poff = ip->mm16_r5_format.imm; >> 211 *poff = (*poff << 2) / sizeof(ulong); >> 212 return 1; >> 213 >> 214 case mm_pool16c_op: >> 215 switch (ip->mm16_m_format.func) { >> 216 case mm_swm16_op: >> 217 *poff = ip->mm16_m_format.imm; >> 218 *poff += 1 + ip->mm16_m_format.rlist; >> 219 *poff = (*poff << 2) / sizeof(ulong); >> 220 return 1; >> 221 >> 222 default: >> 223 return 0; >> 224 } >> 225 >> 226 default: >> 227 return 0; >> 228 } >> 229 } >> 230 >> 231 switch (ip->i_format.opcode) { >> 232 case mm_sw32_op: >> 233 if (ip->i_format.rs != 29) >> 234 return 0; >> 235 if (ip->i_format.rt != 31) >> 236 return 0; >> 237 >> 238 *poff = ip->i_format.simmediate / sizeof(ulong); >> 239 return 1; >> 240 >> 241 case mm_pool32b_op: >> 242 switch (ip->mm_m_format.func) { >> 243 case mm_swm32_func: >> 244 if (ip->mm_m_format.rd < 0x10) >> 245 return 0; >> 246 if (ip->mm_m_format.base != 29) >> 247 return 0; >> 248 >> 249 *poff = ip->mm_m_format.simmediate; >> 250 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); >> 251 *poff /= sizeof(ulong); >> 252 return 1; >> 253 default: >> 254 return 0; >> 255 } 65 256 66 if (!user_mode(regs)) { !! 257 default: 67 pr_cont("epc : %pS\n", (void * !! 258 return 0; 68 pr_cont(" ra : %pS\n", (void * !! 259 } >> 260 #else >> 261 /* sw / sd $ra, offset($sp) */ >> 262 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && >> 263 ip->i_format.rs == 29 && ip->i_format.rt == 31) { >> 264 *poff = ip->i_format.simmediate / sizeof(ulong); >> 265 return 1; 69 } 266 } 70 267 71 pr_cont("epc : " REG_FMT " ra : " REG_ !! 268 return 0; 72 regs->epc, regs->ra, regs->sp) !! 269 #endif 73 pr_cont(" gp : " REG_FMT " tp : " REG_ !! 270 } 74 regs->gp, regs->tp, regs->t0); << 75 pr_cont(" t1 : " REG_FMT " t2 : " REG_ << 76 regs->t1, regs->t2, regs->s0); << 77 pr_cont(" s1 : " REG_FMT " a0 : " REG_ << 78 regs->s1, regs->a0, regs->a1); << 79 pr_cont(" a2 : " REG_FMT " a3 : " REG_ << 80 regs->a2, regs->a3, regs->a4); << 81 pr_cont(" a5 : " REG_FMT " a6 : " REG_ << 82 regs->a5, regs->a6, regs->a7); << 83 pr_cont(" s2 : " REG_FMT " s3 : " REG_ << 84 regs->s2, regs->s3, regs->s4); << 85 pr_cont(" s5 : " REG_FMT " s6 : " REG_ << 86 regs->s5, regs->s6, regs->s7); << 87 pr_cont(" s8 : " REG_FMT " s9 : " REG_ << 88 regs->s8, regs->s9, regs->s10) << 89 pr_cont(" s11: " REG_FMT " t3 : " REG_ << 90 regs->s11, regs->t3, regs->t4) << 91 pr_cont(" t5 : " REG_FMT " t6 : " REG_ << 92 regs->t5, regs->t6); << 93 271 94 pr_cont("status: " REG_FMT " badaddr: !! 272 static inline int is_jump_ins(union mips_instruction *ip) 95 regs->status, regs->badaddr, r !! 273 { >> 274 #ifdef CONFIG_CPU_MICROMIPS >> 275 /* >> 276 * jr16,jrc,jalr16,jalr16 >> 277 * jal >> 278 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb >> 279 * jraddiusp - NOT SUPPORTED >> 280 * >> 281 * microMIPS is kind of more fun... >> 282 */ >> 283 if (mm_insn_16bit(ip->halfword[1])) { >> 284 if ((ip->mm16_r5_format.opcode == mm_pool16c_op && >> 285 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) >> 286 return 1; >> 287 return 0; >> 288 } >> 289 >> 290 if (ip->j_format.opcode == mm_j32_op) >> 291 return 1; >> 292 if (ip->j_format.opcode == mm_jal32_op) >> 293 return 1; >> 294 if (ip->r_format.opcode != mm_pool32a_op || >> 295 ip->r_format.func != mm_pool32axf_op) >> 296 return 0; >> 297 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; >> 298 #else >> 299 if (ip->j_format.opcode == j_op) >> 300 return 1; >> 301 if (ip->j_format.opcode == jal_op) >> 302 return 1; >> 303 if (ip->r_format.opcode != spec_op) >> 304 return 0; >> 305 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; >> 306 #endif 96 } 307 } 97 void show_regs(struct pt_regs *regs) !! 308 >> 309 static inline int is_sp_move_ins(union mips_instruction *ip) 98 { 310 { 99 __show_regs(regs); !! 311 #ifdef CONFIG_CPU_MICROMIPS 100 if (!user_mode(regs)) !! 312 /* 101 dump_backtrace(regs, NULL, KER !! 313 * addiusp -imm >> 314 * addius5 sp,-imm >> 315 * addiu32 sp,sp,-imm >> 316 * jradiussp - NOT SUPPORTED >> 317 * >> 318 * microMIPS is not more fun... >> 319 */ >> 320 if (mm_insn_16bit(ip->halfword[1])) { >> 321 return (ip->mm16_r3_format.opcode == mm_pool16d_op && >> 322 ip->mm16_r3_format.simmediate && mm_addiusp_func) || >> 323 (ip->mm16_r5_format.opcode == mm_pool16d_op && >> 324 ip->mm16_r5_format.rt == 29); >> 325 } >> 326 >> 327 return ip->mm_i_format.opcode == mm_addiu32_op && >> 328 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29; >> 329 #else >> 330 /* addiu/daddiu sp,sp,-imm */ >> 331 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) >> 332 return 0; >> 333 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) >> 334 return 1; >> 335 #endif >> 336 return 0; 102 } 337 } 103 338 104 unsigned long arch_align_stack(unsigned long s !! 339 static int get_frame_info(struct mips_frame_info *info) 105 { 340 { 106 if (!(current->personality & ADDR_NO_R !! 341 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 107 sp -= get_random_u32_below(PAG !! 342 union mips_instruction insn, *ip; 108 return sp & ~0xf; !! 343 const unsigned int max_insns = 128; >> 344 unsigned int last_insn_size = 0; >> 345 unsigned int i; >> 346 >> 347 info->pc_offset = -1; >> 348 info->frame_size = 0; >> 349 >> 350 ip = (void *)msk_isa16_mode((ulong)info->func); >> 351 if (!ip) >> 352 goto err; >> 353 >> 354 for (i = 0; i < max_insns; i++) { >> 355 ip = (void *)ip + last_insn_size; >> 356 >> 357 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { >> 358 insn.halfword[0] = 0; >> 359 insn.halfword[1] = ip->halfword[0]; >> 360 last_insn_size = 2; >> 361 } else if (is_mmips) { >> 362 insn.halfword[0] = ip->halfword[1]; >> 363 insn.halfword[1] = ip->halfword[0]; >> 364 last_insn_size = 4; >> 365 } else { >> 366 insn.word = ip->word; >> 367 last_insn_size = 4; >> 368 } >> 369 >> 370 if (is_jump_ins(&insn)) >> 371 break; >> 372 >> 373 if (!info->frame_size) { >> 374 if (is_sp_move_ins(&insn)) >> 375 { >> 376 #ifdef CONFIG_CPU_MICROMIPS >> 377 if (mm_insn_16bit(ip->halfword[0])) >> 378 { >> 379 unsigned short tmp; >> 380 >> 381 if (ip->halfword[0] & mm_addiusp_func) >> 382 { >> 383 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); >> 384 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); >> 385 } else { >> 386 tmp = (ip->halfword[0] >> 1); >> 387 info->frame_size = -(signed short)(tmp & 0xf); >> 388 } >> 389 } else >> 390 #endif >> 391 info->frame_size = - ip->i_format.simmediate; >> 392 } >> 393 continue; >> 394 } >> 395 if (info->pc_offset == -1 && >> 396 is_ra_save_ins(&insn, &info->pc_offset)) >> 397 break; >> 398 } >> 399 if (info->frame_size && info->pc_offset >= 0) /* nested */ >> 400 return 0; >> 401 if (info->pc_offset < 0) /* leaf */ >> 402 return 1; >> 403 /* prologue seems boggus... */ >> 404 err: >> 405 return -1; 109 } 406 } 110 407 111 #ifdef CONFIG_COMPAT !! 408 static struct mips_frame_info schedule_mfi __read_mostly; 112 static bool compat_mode_supported __read_mostl << 113 409 114 bool compat_elf_check_arch(Elf32_Ehdr *hdr) !! 410 #ifdef CONFIG_KALLSYMS >> 411 static unsigned long get___schedule_addr(void) 115 { 412 { 116 return compat_mode_supported && !! 413 return kallsyms_lookup_name("__schedule"); 117 hdr->e_machine == EM_RISCV && << 118 hdr->e_ident[EI_CLASS] == ELFCL << 119 } 414 } >> 415 #else >> 416 static unsigned long get___schedule_addr(void) >> 417 { >> 418 union mips_instruction *ip = (void *)schedule; >> 419 int max_insns = 8; >> 420 int i; 120 421 121 static int __init compat_mode_detect(void) !! 422 for (i = 0; i < max_insns; i++, ip++) { >> 423 if (ip->j_format.opcode == j_op) >> 424 return J_TARGET(ip, ip->j_format.target); >> 425 } >> 426 return 0; >> 427 } >> 428 #endif >> 429 >> 430 static int __init frame_info_init(void) 122 { 431 { 123 unsigned long tmp = csr_read(CSR_STATU !! 432 unsigned long size = 0; >> 433 #ifdef CONFIG_KALLSYMS >> 434 unsigned long ofs; >> 435 #endif >> 436 unsigned long addr; 124 437 125 csr_write(CSR_STATUS, (tmp & ~SR_UXL) !! 438 addr = get___schedule_addr(); 126 compat_mode_supported = !! 439 if (!addr) 127 (csr_read(CSR_STATUS) !! 440 addr = (unsigned long)schedule; 128 441 129 csr_write(CSR_STATUS, tmp); !! 442 #ifdef CONFIG_KALLSYMS >> 443 kallsyms_lookup_size_offset(addr, &size, &ofs); >> 444 #endif >> 445 schedule_mfi.func = (void *)addr; >> 446 schedule_mfi.func_size = size; 130 447 131 pr_info("riscv: ELF compat mode %s", !! 448 get_frame_info(&schedule_mfi); 132 compat_mode_supported !! 449 >> 450 /* >> 451 * Without schedule() frame info, result given by >> 452 * thread_saved_pc() and get_wchan() are not reliable. >> 453 */ >> 454 if (schedule_mfi.pc_offset < 0) >> 455 printk("Can't analyze schedule() prologue at %p\n", schedule); 133 456 134 return 0; 457 return 0; 135 } 458 } 136 early_initcall(compat_mode_detect); << 137 #endif << 138 459 139 void start_thread(struct pt_regs *regs, unsign !! 460 arch_initcall(frame_info_init); 140 unsigned long sp) !! 461 >> 462 /* >> 463 * Return saved PC of a blocked thread. >> 464 */ >> 465 unsigned long thread_saved_pc(struct task_struct *tsk) 141 { 466 { 142 regs->status = SR_PIE; !! 467 struct thread_struct *t = &tsk->thread; 143 if (has_fpu()) { !! 468 144 regs->status |= SR_FS_INITIAL; !! 469 /* New born processes are a special case */ >> 470 if (t->reg31 == (unsigned long) ret_from_fork) >> 471 return t->reg31; >> 472 if (schedule_mfi.pc_offset < 0) >> 473 return 0; >> 474 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; >> 475 } >> 476 >> 477 >> 478 #ifdef CONFIG_KALLSYMS >> 479 /* generic stack unwinding function */ >> 480 unsigned long notrace unwind_stack_by_address(unsigned long stack_page, >> 481 unsigned long *sp, >> 482 unsigned long pc, >> 483 unsigned long *ra) >> 484 { >> 485 unsigned long low, high, irq_stack_high; >> 486 struct mips_frame_info info; >> 487 unsigned long size, ofs; >> 488 struct pt_regs *regs; >> 489 int leaf; >> 490 >> 491 if (!stack_page) >> 492 return 0; >> 493 >> 494 /* >> 495 * IRQ stacks start at IRQ_STACK_START >> 496 * task stacks at THREAD_SIZE - 32 >> 497 */ >> 498 low = stack_page; >> 499 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { >> 500 high = stack_page + IRQ_STACK_START; >> 501 irq_stack_high = high; >> 502 } else { >> 503 high = stack_page + THREAD_SIZE - 32; >> 504 irq_stack_high = 0; >> 505 } >> 506 >> 507 /* >> 508 * If we reached the top of the interrupt stack, start unwinding >> 509 * the interrupted task stack. >> 510 */ >> 511 if (unlikely(*sp == irq_stack_high)) { >> 512 unsigned long task_sp = *(unsigned long *)*sp; >> 513 >> 514 /* >> 515 * Check that the pointer saved in the IRQ stack head points to >> 516 * something within the stack of the current task >> 517 */ >> 518 if (!object_is_on_stack((void *)task_sp)) >> 519 return 0; >> 520 145 /* 521 /* 146 * Restore the initial value t !! 522 * Follow pointer to tasks kernel stack frame where interrupted 147 * before starting the user pr !! 523 * state was saved. 148 */ 524 */ 149 fstate_restore(current, regs); !! 525 regs = (struct pt_regs *)task_sp; >> 526 pc = regs->cp0_epc; >> 527 if (!user_mode(regs) && __kernel_text_address(pc)) { >> 528 *sp = regs->regs[29]; >> 529 *ra = regs->regs[31]; >> 530 return pc; >> 531 } >> 532 return 0; 150 } 533 } 151 regs->epc = pc; !! 534 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 152 regs->sp = sp; !! 535 return 0; >> 536 /* >> 537 * Return ra if an exception occurred at the first instruction >> 538 */ >> 539 if (unlikely(ofs == 0)) { >> 540 pc = *ra; >> 541 *ra = 0; >> 542 return pc; >> 543 } >> 544 >> 545 info.func = (void *)(pc - ofs); >> 546 info.func_size = ofs; /* analyze from start to ofs */ >> 547 leaf = get_frame_info(&info); >> 548 if (leaf < 0) >> 549 return 0; 153 550 154 #ifdef CONFIG_64BIT !! 551 if (*sp < low || *sp + info.frame_size > high) 155 regs->status &= ~SR_UXL; !! 552 return 0; 156 553 157 if (is_compat_task()) !! 554 if (leaf) 158 regs->status |= SR_UXL_32; !! 555 /* >> 556 * For some extreme cases, get_frame_info() can >> 557 * consider wrongly a nested function as a leaf >> 558 * one. In that cases avoid to return always the >> 559 * same value. >> 560 */ >> 561 pc = pc != *ra ? *ra : 0; 159 else 562 else 160 regs->status |= SR_UXL_64; !! 563 pc = ((unsigned long *)(*sp))[info.pc_offset]; 161 #endif !! 564 >> 565 *sp += info.frame_size; >> 566 *ra = 0; >> 567 return __kernel_text_address(pc) ? pc : 0; >> 568 } >> 569 EXPORT_SYMBOL(unwind_stack_by_address); >> 570 >> 571 /* used by show_backtrace() */ >> 572 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, >> 573 unsigned long pc, unsigned long *ra) >> 574 { >> 575 unsigned long stack_page = 0; >> 576 int cpu; >> 577 >> 578 for_each_possible_cpu(cpu) { >> 579 if (on_irq_stack(cpu, *sp)) { >> 580 stack_page = (unsigned long)irq_stack[cpu]; >> 581 break; >> 582 } >> 583 } >> 584 >> 585 if (!stack_page) >> 586 stack_page = (unsigned long)task_stack_page(task); >> 587 >> 588 return unwind_stack_by_address(stack_page, sp, pc, ra); 162 } 589 } >> 590 #endif 163 591 164 void flush_thread(void) !! 592 /* >> 593 * get_wchan - a maintenance nightmare^W^Wpain in the ass ... >> 594 */ >> 595 unsigned long get_wchan(struct task_struct *task) 165 { 596 { 166 #ifdef CONFIG_FPU !! 597 unsigned long pc = 0; 167 /* !! 598 #ifdef CONFIG_KALLSYMS 168 * Reset FPU state and context !! 599 unsigned long sp; 169 * frm: round to nearest, ties to !! 600 unsigned long ra = 0; 170 * fflags: accrued exceptions cle << 171 */ << 172 fstate_off(current, task_pt_regs(curre << 173 memset(¤t->thread.fstate, 0, siz << 174 #endif 601 #endif 175 #ifdef CONFIG_RISCV_ISA_V !! 602 176 /* Reset vector state */ !! 603 if (!task || task == current || task->state == TASK_RUNNING) 177 riscv_v_vstate_ctrl_init(current); !! 604 goto out; 178 riscv_v_vstate_off(task_pt_regs(curren !! 605 if (!task_stack_page(task)) 179 kfree(current->thread.vstate.datap); !! 606 goto out; 180 memset(¤t->thread.vstate, 0, siz !! 607 181 clear_tsk_thread_flag(current, TIF_RIS !! 608 pc = thread_saved_pc(task); >> 609 >> 610 #ifdef CONFIG_KALLSYMS >> 611 sp = task->thread.reg29 + schedule_mfi.frame_size; >> 612 >> 613 while (in_sched_functions(pc)) >> 614 pc = unwind_stack(task, &sp, pc, &ra); 182 #endif 615 #endif >> 616 >> 617 out: >> 618 return pc; 183 } 619 } 184 620 185 void arch_release_task_struct(struct task_stru !! 621 /* >> 622 * Don't forget that the stack pointer must be aligned on a 8 bytes >> 623 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. >> 624 */ >> 625 unsigned long arch_align_stack(unsigned long sp) 186 { 626 { 187 /* Free the vector context of datap. * !! 627 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 188 if (has_vector()) !! 628 sp -= get_random_int() & ~PAGE_MASK; 189 riscv_v_thread_free(tsk); !! 629 >> 630 return sp & ALMASK; 190 } 631 } 191 632 192 int arch_dup_task_struct(struct task_struct *d !! 633 static DEFINE_PER_CPU(struct call_single_data, backtrace_csd); >> 634 static struct cpumask backtrace_csd_busy; >> 635 >> 636 static void arch_dump_stack(void *info) 193 { 637 { 194 fstate_save(src, task_pt_regs(src)); !! 638 struct pt_regs *regs; 195 *dst = *src; !! 639 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; 196 /* clear entire V context, including d << 197 memset(&dst->thread.vstate, 0, sizeof( << 198 memset(&dst->thread.kernel_vstate, 0, << 199 clear_tsk_thread_flag(dst, TIF_RISCV_V << 200 640 201 return 0; !! 641 arch_spin_lock(&lock); >> 642 regs = get_irq_regs(); >> 643 >> 644 if (regs) >> 645 show_regs(regs); >> 646 else >> 647 dump_stack(); >> 648 arch_spin_unlock(&lock); >> 649 >> 650 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); 202 } 651 } 203 652 204 int copy_thread(struct task_struct *p, const s !! 653 void arch_trigger_all_cpu_backtrace(bool include_self) 205 { 654 { 206 unsigned long clone_flags = args->flag !! 655 struct call_single_data *csd; 207 unsigned long usp = args->stack; !! 656 int cpu; 208 unsigned long tls = args->tls; << 209 struct pt_regs *childregs = task_pt_re << 210 657 211 memset(&p->thread.s, 0, sizeof(p->thre !! 658 for_each_cpu(cpu, cpu_online_mask) { >> 659 /* >> 660 * If we previously sent an IPI to the target CPU & it hasn't >> 661 * cleared its bit in the busy cpumask then it didn't handle >> 662 * our previous IPI & it's not safe for us to reuse the >> 663 * call_single_data_t. >> 664 */ >> 665 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { >> 666 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", >> 667 cpu); >> 668 continue; >> 669 } >> 670 >> 671 csd = &per_cpu(backtrace_csd, cpu); >> 672 csd->func = arch_dump_stack; >> 673 smp_call_function_single_async(cpu, csd); >> 674 } >> 675 } 212 676 213 /* p->thread holds context to be resto !! 677 int mips_get_process_fp_mode(struct task_struct *task) 214 if (unlikely(args->fn)) { !! 678 { 215 /* Kernel thread */ !! 679 int value = 0; 216 memset(childregs, 0, sizeof(st << 217 /* Supervisor/Machine, irqs on << 218 childregs->status = SR_PP | SR << 219 680 220 p->thread.s[0] = (unsigned lon !! 681 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) 221 p->thread.s[1] = (unsigned lon !! 682 value |= PR_FP_MODE_FR; 222 } else { !! 683 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) 223 *childregs = *(current_pt_regs !! 684 value |= PR_FP_MODE_FRE; 224 /* Turn off status.VS */ !! 685 225 riscv_v_vstate_off(childregs); !! 686 return value; 226 if (usp) /* User fork */ << 227 childregs->sp = usp; << 228 if (clone_flags & CLONE_SETTLS << 229 childregs->tp = tls; << 230 childregs->a0 = 0; /* Return v << 231 p->thread.s[0] = 0; << 232 } << 233 p->thread.riscv_v_flags = 0; << 234 if (has_vector()) << 235 riscv_v_thread_alloc(p); << 236 p->thread.ra = (unsigned long)ret_from << 237 p->thread.sp = (unsigned long)childreg << 238 return 0; << 239 } 687 } 240 688 241 void __init arch_task_cache_init(void) !! 689 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) 242 { 690 { 243 riscv_v_setup_ctx_cache(); !! 691 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; >> 692 unsigned long switch_count; >> 693 struct task_struct *t; >> 694 >> 695 /* If nothing to change, return right away, successfully. */ >> 696 if (value == mips_get_process_fp_mode(task)) >> 697 return 0; >> 698 >> 699 /* Only accept a mode change if 64-bit FP enabled for o32. */ >> 700 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) >> 701 return -EOPNOTSUPP; >> 702 >> 703 /* And only for o32 tasks. */ >> 704 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) >> 705 return -EOPNOTSUPP; >> 706 >> 707 /* Check the value is valid */ >> 708 if (value & ~known_bits) >> 709 return -EOPNOTSUPP; >> 710 >> 711 /* Setting FRE without FR is not supported. */ >> 712 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) >> 713 return -EOPNOTSUPP; >> 714 >> 715 /* Avoid inadvertently triggering emulation */ >> 716 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && >> 717 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) >> 718 return -EOPNOTSUPP; >> 719 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) >> 720 return -EOPNOTSUPP; >> 721 >> 722 /* FR = 0 not supported in MIPS R6 */ >> 723 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) >> 724 return -EOPNOTSUPP; >> 725 >> 726 /* Proceed with the mode switch */ >> 727 preempt_disable(); >> 728 >> 729 /* Save FP & vector context, then disable FPU & MSA */ >> 730 if (task->signal == current->signal) >> 731 lose_fpu(1); >> 732 >> 733 /* Prevent any threads from obtaining live FP context */ >> 734 atomic_set(&task->mm->context.fp_mode_switching, 1); >> 735 smp_mb__after_atomic(); >> 736 >> 737 /* >> 738 * If there are multiple online CPUs then wait until all threads whose >> 739 * FP mode is about to change have been context switched. This approach >> 740 * allows us to only worry about whether an FP mode switch is in >> 741 * progress when FP is first used in a tasks time slice. Pretty much all >> 742 * of the mode switch overhead can thus be confined to cases where mode >> 743 * switches are actually occuring. That is, to here. However for the >> 744 * thread performing the mode switch it may take a while... >> 745 */ >> 746 if (num_online_cpus() > 1) { >> 747 spin_lock_irq(&task->sighand->siglock); >> 748 >> 749 for_each_thread(task, t) { >> 750 if (t == current) >> 751 continue; >> 752 >> 753 switch_count = t->nvcsw + t->nivcsw; >> 754 >> 755 do { >> 756 spin_unlock_irq(&task->sighand->siglock); >> 757 cond_resched(); >> 758 spin_lock_irq(&task->sighand->siglock); >> 759 } while ((t->nvcsw + t->nivcsw) == switch_count); >> 760 } >> 761 >> 762 spin_unlock_irq(&task->sighand->siglock); >> 763 } >> 764 >> 765 /* >> 766 * There are now no threads of the process with live FP context, so it >> 767 * is safe to proceed with the FP mode switch. >> 768 */ >> 769 for_each_thread(task, t) { >> 770 /* Update desired FP register width */ >> 771 if (value & PR_FP_MODE_FR) { >> 772 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); >> 773 } else { >> 774 set_tsk_thread_flag(t, TIF_32BIT_FPREGS); >> 775 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); >> 776 } >> 777 >> 778 /* Update desired FP single layout */ >> 779 if (value & PR_FP_MODE_FRE) >> 780 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); >> 781 else >> 782 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); >> 783 } >> 784 >> 785 /* Allow threads to use FP again */ >> 786 atomic_set(&task->mm->context.fp_mode_switching, 0); >> 787 preempt_enable(); >> 788 >> 789 return 0; 244 } 790 } 245 791
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.