1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Copyright (C) 2009 Sunplus Core Technology !! 2 * This file is subject to the terms and conditions of the GNU General Public 4 * Chen Liqin <liqin.chen@sunplusct.com> !! 3 * License. See the file "COPYING" in the main directory of this archive 5 * Lennox Wu <lennox.wu@sunplusct.com> !! 4 * for more details. 6 * Copyright (C) 2012 Regents of the Universit !! 5 * 7 * Copyright (C) 2017 SiFive !! 6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. >> 7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) >> 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. >> 9 * Copyright (C) 2004 Thiemo Seufer >> 10 * Copyright (C) 2013 Imagination Technologies Ltd. 8 */ 11 */ 9 << 10 #include <linux/cpu.h> 12 #include <linux/cpu.h> >> 13 #include <linux/errno.h> >> 14 #include <linux/init.h> >> 15 #include <linux/kallsyms.h> 11 #include <linux/kernel.h> 16 #include <linux/kernel.h> >> 17 #include <linux/nmi.h> >> 18 #include <linux/personality.h> >> 19 #include <linux/prctl.h> >> 20 #include <linux/random.h> 12 #include <linux/sched.h> 21 #include <linux/sched.h> 13 #include <linux/sched/debug.h> 22 #include <linux/sched/debug.h> 14 #include <linux/sched/task_stack.h> 23 #include <linux/sched/task_stack.h> 15 #include <linux/tick.h> << 16 #include <linux/ptrace.h> << 17 #include <linux/uaccess.h> << 18 #include <linux/personality.h> << 19 24 20 #include <asm/unistd.h> !! 25 #include <asm/abi.h> >> 26 #include <asm/asm.h> >> 27 #include <asm/dsemul.h> >> 28 #include <asm/dsp.h> >> 29 #include <asm/exec.h> >> 30 #include <asm/fpu.h> >> 31 #include <asm/inst.h> >> 32 #include <asm/irq.h> >> 33 #include <asm/irq_regs.h> >> 34 #include <asm/isadep.h> >> 35 #include <asm/msa.h> >> 36 #include <asm/mips-cps.h> >> 37 #include <asm/mipsregs.h> 21 #include <asm/processor.h> 38 #include <asm/processor.h> 22 #include <asm/csr.h> !! 39 #include <asm/reg.h> 23 #include <asm/stacktrace.h> 40 #include <asm/stacktrace.h> 24 #include <asm/string.h> << 25 #include <asm/switch_to.h> << 26 #include <asm/thread_info.h> << 27 #include <asm/cpuidle.h> << 28 #include <asm/vector.h> << 29 #include <asm/cpufeature.h> << 30 #include <asm/exec.h> << 31 41 32 #if defined(CONFIG_STACKPROTECTOR) && !defined !! 42 #ifdef CONFIG_HOTPLUG_CPU 33 #include <linux/stackprotector.h> !! 43 void arch_cpu_idle_dead(void) 34 unsigned long __stack_chk_guard __read_mostly; !! 44 { 35 EXPORT_SYMBOL(__stack_chk_guard); !! 45 play_dead(); >> 46 } 36 #endif 47 #endif 37 48 38 extern asmlinkage void ret_from_fork(void); !! 49 asmlinkage void ret_from_fork(void); >> 50 asmlinkage void ret_from_kernel_thread(void); >> 51 >> 52 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) >> 53 { >> 54 unsigned long status; >> 55 >> 56 /* New thread loses kernel privileges. */ >> 57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK); >> 58 status |= KU_USER; >> 59 regs->cp0_status = status; >> 60 lose_fpu(0); >> 61 clear_thread_flag(TIF_MSA_CTX_LIVE); >> 62 clear_used_math(); >> 63 #ifdef CONFIG_MIPS_FP_SUPPORT >> 64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); >> 65 #endif >> 66 init_dsp(); >> 67 regs->cp0_epc = pc; >> 68 regs->regs[29] = sp; >> 69 } 39 70 40 void noinstr arch_cpu_idle(void) !! 71 void exit_thread(struct task_struct *tsk) 41 { 72 { 42 cpu_do_idle(); !! 73 /* >> 74 * User threads may have allocated a delay slot emulation frame. >> 75 * If so, clean up that allocation. >> 76 */ >> 77 if (!(current->flags & PF_KTHREAD)) >> 78 dsemul_thread_cleanup(tsk); 43 } 79 } 44 80 45 int set_unalign_ctl(struct task_struct *tsk, u !! 81 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 46 { 82 { 47 if (!unaligned_ctl_available()) !! 83 /* 48 return -EINVAL; !! 84 * Save any process state which is live in hardware registers to the >> 85 * parent context prior to duplication. This prevents the new child >> 86 * state becoming stale if the parent is preempted before copy_thread() >> 87 * gets a chance to save the parent's live hardware registers to the >> 88 * child context. >> 89 */ >> 90 preempt_disable(); >> 91 >> 92 if (is_msa_enabled()) >> 93 save_msa(current); >> 94 else if (is_fpu_owner()) >> 95 _save_fp(current); >> 96 >> 97 save_dsp(current); >> 98 >> 99 preempt_enable(); >> 100 >> 101 *dst = *src; >> 102 return 0; >> 103 } >> 104 >> 105 /* >> 106 * Copy architecture-specific thread state >> 107 */ >> 108 int copy_thread(unsigned long clone_flags, unsigned long usp, >> 109 unsigned long kthread_arg, struct task_struct *p, >> 110 unsigned long tls) >> 111 { >> 112 struct thread_info *ti = task_thread_info(p); >> 113 struct pt_regs *childregs, *regs = current_pt_regs(); >> 114 unsigned long childksp; >> 115 >> 116 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; >> 117 >> 118 /* set up new TSS. */ >> 119 childregs = (struct pt_regs *) childksp - 1; >> 120 /* Put the stack after the struct pt_regs. */ >> 121 childksp = (unsigned long) childregs; >> 122 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; >> 123 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { >> 124 /* kernel thread */ >> 125 unsigned long status = p->thread.cp0_status; >> 126 memset(childregs, 0, sizeof(struct pt_regs)); >> 127 p->thread.reg16 = usp; /* fn */ >> 128 p->thread.reg17 = kthread_arg; >> 129 p->thread.reg29 = childksp; >> 130 p->thread.reg31 = (unsigned long) ret_from_kernel_thread; >> 131 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) >> 132 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | >> 133 ((status & (ST0_KUC | ST0_IEC)) << 2); >> 134 #else >> 135 status |= ST0_EXL; >> 136 #endif >> 137 childregs->cp0_status = status; >> 138 return 0; >> 139 } >> 140 >> 141 /* user thread */ >> 142 *childregs = *regs; >> 143 childregs->regs[7] = 0; /* Clear error flag */ >> 144 childregs->regs[2] = 0; /* Child gets zero as return value */ >> 145 if (usp) >> 146 childregs->regs[29] = usp; >> 147 >> 148 p->thread.reg29 = (unsigned long) childregs; >> 149 p->thread.reg31 = (unsigned long) ret_from_fork; >> 150 >> 151 /* >> 152 * New tasks lose permission to use the fpu. This accelerates context >> 153 * switching for most programs since they don't use the fpu. >> 154 */ >> 155 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); >> 156 >> 157 clear_tsk_thread_flag(p, TIF_USEDFPU); >> 158 clear_tsk_thread_flag(p, TIF_USEDMSA); >> 159 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); >> 160 >> 161 #ifdef CONFIG_MIPS_MT_FPAFF >> 162 clear_tsk_thread_flag(p, TIF_FPUBOUND); >> 163 #endif /* CONFIG_MIPS_MT_FPAFF */ >> 164 >> 165 #ifdef CONFIG_MIPS_FP_SUPPORT >> 166 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); >> 167 #endif >> 168 >> 169 if (clone_flags & CLONE_SETTLS) >> 170 ti->tp_value = tls; 49 171 50 tsk->thread.align_ctl = val; << 51 return 0; 172 return 0; 52 } 173 } 53 174 54 int get_unalign_ctl(struct task_struct *tsk, u !! 175 #ifdef CONFIG_STACKPROTECTOR >> 176 #include <linux/stackprotector.h> >> 177 unsigned long __stack_chk_guard __read_mostly; >> 178 EXPORT_SYMBOL(__stack_chk_guard); >> 179 #endif >> 180 >> 181 struct mips_frame_info { >> 182 void *func; >> 183 unsigned long func_size; >> 184 int frame_size; >> 185 int pc_offset; >> 186 }; >> 187 >> 188 #define J_TARGET(pc,target) \ >> 189 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) >> 190 >> 191 static inline int is_jr_ra_ins(union mips_instruction *ip) 55 { 192 { 56 if (!unaligned_ctl_available()) !! 193 #ifdef CONFIG_CPU_MICROMIPS 57 return -EINVAL; !! 194 /* >> 195 * jr16 ra >> 196 * jr ra >> 197 */ >> 198 if (mm_insn_16bit(ip->word >> 16)) { >> 199 if (ip->mm16_r5_format.opcode == mm_pool16c_op && >> 200 ip->mm16_r5_format.rt == mm_jr16_op && >> 201 ip->mm16_r5_format.imm == 31) >> 202 return 1; >> 203 return 0; >> 204 } 58 205 59 return put_user(tsk->thread.align_ctl, !! 206 if (ip->r_format.opcode == mm_pool32a_op && >> 207 ip->r_format.func == mm_pool32axf_op && >> 208 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op && >> 209 ip->r_format.rt == 31) >> 210 return 1; >> 211 return 0; >> 212 #else >> 213 if (ip->r_format.opcode == spec_op && >> 214 ip->r_format.func == jr_op && >> 215 ip->r_format.rs == 31) >> 216 return 1; >> 217 return 0; >> 218 #endif 60 } 219 } 61 220 62 void __show_regs(struct pt_regs *regs) !! 221 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) 63 { 222 { 64 show_regs_print_info(KERN_DEFAULT); !! 223 #ifdef CONFIG_CPU_MICROMIPS >> 224 /* >> 225 * swsp ra,offset >> 226 * swm16 reglist,offset(sp) >> 227 * swm32 reglist,offset(sp) >> 228 * sw32 ra,offset(sp) >> 229 * jradiussp - NOT SUPPORTED >> 230 * >> 231 * microMIPS is way more fun... >> 232 */ >> 233 if (mm_insn_16bit(ip->word >> 16)) { >> 234 switch (ip->mm16_r5_format.opcode) { >> 235 case mm_swsp16_op: >> 236 if (ip->mm16_r5_format.rt != 31) >> 237 return 0; >> 238 >> 239 *poff = ip->mm16_r5_format.imm; >> 240 *poff = (*poff << 2) / sizeof(ulong); >> 241 return 1; >> 242 >> 243 case mm_pool16c_op: >> 244 switch (ip->mm16_m_format.func) { >> 245 case mm_swm16_op: >> 246 *poff = ip->mm16_m_format.imm; >> 247 *poff += 1 + ip->mm16_m_format.rlist; >> 248 *poff = (*poff << 2) / sizeof(ulong); >> 249 return 1; >> 250 >> 251 default: >> 252 return 0; >> 253 } >> 254 >> 255 default: >> 256 return 0; >> 257 } >> 258 } >> 259 >> 260 switch (ip->i_format.opcode) { >> 261 case mm_sw32_op: >> 262 if (ip->i_format.rs != 29) >> 263 return 0; >> 264 if (ip->i_format.rt != 31) >> 265 return 0; >> 266 >> 267 *poff = ip->i_format.simmediate / sizeof(ulong); >> 268 return 1; >> 269 >> 270 case mm_pool32b_op: >> 271 switch (ip->mm_m_format.func) { >> 272 case mm_swm32_func: >> 273 if (ip->mm_m_format.rd < 0x10) >> 274 return 0; >> 275 if (ip->mm_m_format.base != 29) >> 276 return 0; >> 277 >> 278 *poff = ip->mm_m_format.simmediate; >> 279 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); >> 280 *poff /= sizeof(ulong); >> 281 return 1; >> 282 default: >> 283 return 0; >> 284 } 65 285 66 if (!user_mode(regs)) { !! 286 default: 67 pr_cont("epc : %pS\n", (void * !! 287 return 0; 68 pr_cont(" ra : %pS\n", (void * !! 288 } >> 289 #else >> 290 /* sw / sd $ra, offset($sp) */ >> 291 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && >> 292 ip->i_format.rs == 29 && ip->i_format.rt == 31) { >> 293 *poff = ip->i_format.simmediate / sizeof(ulong); >> 294 return 1; 69 } 295 } >> 296 #ifdef CONFIG_CPU_LOONGSON64 >> 297 if ((ip->loongson3_lswc2_format.opcode == swc2_op) && >> 298 (ip->loongson3_lswc2_format.ls == 1) && >> 299 (ip->loongson3_lswc2_format.fr == 0) && >> 300 (ip->loongson3_lswc2_format.base == 29)) { >> 301 if (ip->loongson3_lswc2_format.rt == 31) { >> 302 *poff = ip->loongson3_lswc2_format.offset << 1; >> 303 return 1; >> 304 } >> 305 if (ip->loongson3_lswc2_format.rq == 31) { >> 306 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1; >> 307 return 1; >> 308 } >> 309 } >> 310 #endif >> 311 return 0; >> 312 #endif >> 313 } 70 314 71 pr_cont("epc : " REG_FMT " ra : " REG_ !! 315 static inline int is_jump_ins(union mips_instruction *ip) 72 regs->epc, regs->ra, regs->sp) !! 316 { 73 pr_cont(" gp : " REG_FMT " tp : " REG_ !! 317 #ifdef CONFIG_CPU_MICROMIPS 74 regs->gp, regs->tp, regs->t0); !! 318 /* 75 pr_cont(" t1 : " REG_FMT " t2 : " REG_ !! 319 * jr16,jrc,jalr16,jalr16 76 regs->t1, regs->t2, regs->s0); !! 320 * jal 77 pr_cont(" s1 : " REG_FMT " a0 : " REG_ !! 321 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb 78 regs->s1, regs->a0, regs->a1); !! 322 * jraddiusp - NOT SUPPORTED 79 pr_cont(" a2 : " REG_FMT " a3 : " REG_ !! 323 * 80 regs->a2, regs->a3, regs->a4); !! 324 * microMIPS is kind of more fun... 81 pr_cont(" a5 : " REG_FMT " a6 : " REG_ !! 325 */ 82 regs->a5, regs->a6, regs->a7); !! 326 if (mm_insn_16bit(ip->word >> 16)) { 83 pr_cont(" s2 : " REG_FMT " s3 : " REG_ !! 327 if ((ip->mm16_r5_format.opcode == mm_pool16c_op && 84 regs->s2, regs->s3, regs->s4); !! 328 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) 85 pr_cont(" s5 : " REG_FMT " s6 : " REG_ !! 329 return 1; 86 regs->s5, regs->s6, regs->s7); !! 330 return 0; 87 pr_cont(" s8 : " REG_FMT " s9 : " REG_ !! 331 } 88 regs->s8, regs->s9, regs->s10) << 89 pr_cont(" s11: " REG_FMT " t3 : " REG_ << 90 regs->s11, regs->t3, regs->t4) << 91 pr_cont(" t5 : " REG_FMT " t6 : " REG_ << 92 regs->t5, regs->t6); << 93 332 94 pr_cont("status: " REG_FMT " badaddr: !! 333 if (ip->j_format.opcode == mm_j32_op) 95 regs->status, regs->badaddr, r !! 334 return 1; >> 335 if (ip->j_format.opcode == mm_jal32_op) >> 336 return 1; >> 337 if (ip->r_format.opcode != mm_pool32a_op || >> 338 ip->r_format.func != mm_pool32axf_op) >> 339 return 0; >> 340 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; >> 341 #else >> 342 if (ip->j_format.opcode == j_op) >> 343 return 1; >> 344 if (ip->j_format.opcode == jal_op) >> 345 return 1; >> 346 if (ip->r_format.opcode != spec_op) >> 347 return 0; >> 348 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; >> 349 #endif 96 } 350 } 97 void show_regs(struct pt_regs *regs) !! 351 >> 352 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) 98 { 353 { 99 __show_regs(regs); !! 354 #ifdef CONFIG_CPU_MICROMIPS 100 if (!user_mode(regs)) !! 355 unsigned short tmp; 101 dump_backtrace(regs, NULL, KER !! 356 >> 357 /* >> 358 * addiusp -imm >> 359 * addius5 sp,-imm >> 360 * addiu32 sp,sp,-imm >> 361 * jradiussp - NOT SUPPORTED >> 362 * >> 363 * microMIPS is not more fun... >> 364 */ >> 365 if (mm_insn_16bit(ip->word >> 16)) { >> 366 if (ip->mm16_r3_format.opcode == mm_pool16d_op && >> 367 ip->mm16_r3_format.simmediate & mm_addiusp_func) { >> 368 tmp = ip->mm_b0_format.simmediate >> 1; >> 369 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100; >> 370 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */ >> 371 tmp ^= 0x100; >> 372 *frame_size = -(signed short)(tmp << 2); >> 373 return 1; >> 374 } >> 375 if (ip->mm16_r5_format.opcode == mm_pool16d_op && >> 376 ip->mm16_r5_format.rt == 29) { >> 377 tmp = ip->mm16_r5_format.imm >> 1; >> 378 *frame_size = -(signed short)(tmp & 0xf); >> 379 return 1; >> 380 } >> 381 return 0; >> 382 } >> 383 >> 384 if (ip->mm_i_format.opcode == mm_addiu32_op && >> 385 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) { >> 386 *frame_size = -ip->i_format.simmediate; >> 387 return 1; >> 388 } >> 389 #else >> 390 /* addiu/daddiu sp,sp,-imm */ >> 391 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) >> 392 return 0; >> 393 >> 394 if (ip->i_format.opcode == addiu_op || >> 395 ip->i_format.opcode == daddiu_op) { >> 396 *frame_size = -ip->i_format.simmediate; >> 397 return 1; >> 398 } >> 399 #endif >> 400 return 0; 102 } 401 } 103 402 104 unsigned long arch_align_stack(unsigned long s !! 403 static int get_frame_info(struct mips_frame_info *info) 105 { 404 { 106 if (!(current->personality & ADDR_NO_R !! 405 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); 107 sp -= get_random_u32_below(PAG !! 406 union mips_instruction insn, *ip, *ip_end; 108 return sp & ~0xf; !! 407 unsigned int last_insn_size = 0; >> 408 bool saw_jump = false; >> 409 >> 410 info->pc_offset = -1; >> 411 info->frame_size = 0; >> 412 >> 413 ip = (void *)msk_isa16_mode((ulong)info->func); >> 414 if (!ip) >> 415 goto err; >> 416 >> 417 ip_end = (void *)ip + (info->func_size ? info->func_size : 512); >> 418 >> 419 while (ip < ip_end) { >> 420 ip = (void *)ip + last_insn_size; >> 421 >> 422 if (is_mmips && mm_insn_16bit(ip->halfword[0])) { >> 423 insn.word = ip->halfword[0] << 16; >> 424 last_insn_size = 2; >> 425 } else if (is_mmips) { >> 426 insn.word = ip->halfword[0] << 16 | ip->halfword[1]; >> 427 last_insn_size = 4; >> 428 } else { >> 429 insn.word = ip->word; >> 430 last_insn_size = 4; >> 431 } >> 432 >> 433 if (is_jr_ra_ins(ip)) { >> 434 break; >> 435 } else if (!info->frame_size) { >> 436 is_sp_move_ins(&insn, &info->frame_size); >> 437 continue; >> 438 } else if (!saw_jump && is_jump_ins(ip)) { >> 439 /* >> 440 * If we see a jump instruction, we are finished >> 441 * with the frame save. >> 442 * >> 443 * Some functions can have a shortcut return at >> 444 * the beginning of the function, so don't start >> 445 * looking for jump instruction until we see the >> 446 * frame setup. >> 447 * >> 448 * The RA save instruction can get put into the >> 449 * delay slot of the jump instruction, so look >> 450 * at the next instruction, too. >> 451 */ >> 452 saw_jump = true; >> 453 continue; >> 454 } >> 455 if (info->pc_offset == -1 && >> 456 is_ra_save_ins(&insn, &info->pc_offset)) >> 457 break; >> 458 if (saw_jump) >> 459 break; >> 460 } >> 461 if (info->frame_size && info->pc_offset >= 0) /* nested */ >> 462 return 0; >> 463 if (info->pc_offset < 0) /* leaf */ >> 464 return 1; >> 465 /* prologue seems bogus... */ >> 466 err: >> 467 return -1; 109 } 468 } 110 469 111 #ifdef CONFIG_COMPAT !! 470 static struct mips_frame_info schedule_mfi __read_mostly; 112 static bool compat_mode_supported __read_mostl << 113 471 114 bool compat_elf_check_arch(Elf32_Ehdr *hdr) !! 472 #ifdef CONFIG_KALLSYMS >> 473 static unsigned long get___schedule_addr(void) 115 { 474 { 116 return compat_mode_supported && !! 475 return kallsyms_lookup_name("__schedule"); 117 hdr->e_machine == EM_RISCV && << 118 hdr->e_ident[EI_CLASS] == ELFCL << 119 } 476 } >> 477 #else >> 478 static unsigned long get___schedule_addr(void) >> 479 { >> 480 union mips_instruction *ip = (void *)schedule; >> 481 int max_insns = 8; >> 482 int i; 120 483 121 static int __init compat_mode_detect(void) !! 484 for (i = 0; i < max_insns; i++, ip++) { >> 485 if (ip->j_format.opcode == j_op) >> 486 return J_TARGET(ip, ip->j_format.target); >> 487 } >> 488 return 0; >> 489 } >> 490 #endif >> 491 >> 492 static int __init frame_info_init(void) 122 { 493 { 123 unsigned long tmp = csr_read(CSR_STATU !! 494 unsigned long size = 0; >> 495 #ifdef CONFIG_KALLSYMS >> 496 unsigned long ofs; >> 497 #endif >> 498 unsigned long addr; 124 499 125 csr_write(CSR_STATUS, (tmp & ~SR_UXL) !! 500 addr = get___schedule_addr(); 126 compat_mode_supported = !! 501 if (!addr) 127 (csr_read(CSR_STATUS) !! 502 addr = (unsigned long)schedule; 128 503 129 csr_write(CSR_STATUS, tmp); !! 504 #ifdef CONFIG_KALLSYMS >> 505 kallsyms_lookup_size_offset(addr, &size, &ofs); >> 506 #endif >> 507 schedule_mfi.func = (void *)addr; >> 508 schedule_mfi.func_size = size; 130 509 131 pr_info("riscv: ELF compat mode %s", !! 510 get_frame_info(&schedule_mfi); 132 compat_mode_supported !! 511 >> 512 /* >> 513 * Without schedule() frame info, result given by >> 514 * thread_saved_pc() and get_wchan() are not reliable. >> 515 */ >> 516 if (schedule_mfi.pc_offset < 0) >> 517 printk("Can't analyze schedule() prologue at %p\n", schedule); 133 518 134 return 0; 519 return 0; 135 } 520 } 136 early_initcall(compat_mode_detect); << 137 #endif << 138 521 139 void start_thread(struct pt_regs *regs, unsign !! 522 arch_initcall(frame_info_init); 140 unsigned long sp) !! 523 >> 524 /* >> 525 * Return saved PC of a blocked thread. >> 526 */ >> 527 static unsigned long thread_saved_pc(struct task_struct *tsk) 141 { 528 { 142 regs->status = SR_PIE; !! 529 struct thread_struct *t = &tsk->thread; 143 if (has_fpu()) { !! 530 144 regs->status |= SR_FS_INITIAL; !! 531 /* New born processes are a special case */ >> 532 if (t->reg31 == (unsigned long) ret_from_fork) >> 533 return t->reg31; >> 534 if (schedule_mfi.pc_offset < 0) >> 535 return 0; >> 536 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; >> 537 } >> 538 >> 539 >> 540 #ifdef CONFIG_KALLSYMS >> 541 /* generic stack unwinding function */ >> 542 unsigned long notrace unwind_stack_by_address(unsigned long stack_page, >> 543 unsigned long *sp, >> 544 unsigned long pc, >> 545 unsigned long *ra) >> 546 { >> 547 unsigned long low, high, irq_stack_high; >> 548 struct mips_frame_info info; >> 549 unsigned long size, ofs; >> 550 struct pt_regs *regs; >> 551 int leaf; >> 552 >> 553 if (!stack_page) >> 554 return 0; >> 555 >> 556 /* >> 557 * IRQ stacks start at IRQ_STACK_START >> 558 * task stacks at THREAD_SIZE - 32 >> 559 */ >> 560 low = stack_page; >> 561 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { >> 562 high = stack_page + IRQ_STACK_START; >> 563 irq_stack_high = high; >> 564 } else { >> 565 high = stack_page + THREAD_SIZE - 32; >> 566 irq_stack_high = 0; >> 567 } >> 568 >> 569 /* >> 570 * If we reached the top of the interrupt stack, start unwinding >> 571 * the interrupted task stack. >> 572 */ >> 573 if (unlikely(*sp == irq_stack_high)) { >> 574 unsigned long task_sp = *(unsigned long *)*sp; >> 575 >> 576 /* >> 577 * Check that the pointer saved in the IRQ stack head points to >> 578 * something within the stack of the current task >> 579 */ >> 580 if (!object_is_on_stack((void *)task_sp)) >> 581 return 0; >> 582 145 /* 583 /* 146 * Restore the initial value t !! 584 * Follow pointer to tasks kernel stack frame where interrupted 147 * before starting the user pr !! 585 * state was saved. 148 */ 586 */ 149 fstate_restore(current, regs); !! 587 regs = (struct pt_regs *)task_sp; >> 588 pc = regs->cp0_epc; >> 589 if (!user_mode(regs) && __kernel_text_address(pc)) { >> 590 *sp = regs->regs[29]; >> 591 *ra = regs->regs[31]; >> 592 return pc; >> 593 } >> 594 return 0; >> 595 } >> 596 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) >> 597 return 0; >> 598 /* >> 599 * Return ra if an exception occurred at the first instruction >> 600 */ >> 601 if (unlikely(ofs == 0)) { >> 602 pc = *ra; >> 603 *ra = 0; >> 604 return pc; 150 } 605 } 151 regs->epc = pc; << 152 regs->sp = sp; << 153 606 154 #ifdef CONFIG_64BIT !! 607 info.func = (void *)(pc - ofs); 155 regs->status &= ~SR_UXL; !! 608 info.func_size = ofs; /* analyze from start to ofs */ >> 609 leaf = get_frame_info(&info); >> 610 if (leaf < 0) >> 611 return 0; 156 612 157 if (is_compat_task()) !! 613 if (*sp < low || *sp + info.frame_size > high) 158 regs->status |= SR_UXL_32; !! 614 return 0; >> 615 >> 616 if (leaf) >> 617 /* >> 618 * For some extreme cases, get_frame_info() can >> 619 * consider wrongly a nested function as a leaf >> 620 * one. In that cases avoid to return always the >> 621 * same value. >> 622 */ >> 623 pc = pc != *ra ? *ra : 0; 159 else 624 else 160 regs->status |= SR_UXL_64; !! 625 pc = ((unsigned long *)(*sp))[info.pc_offset]; 161 #endif !! 626 >> 627 *sp += info.frame_size; >> 628 *ra = 0; >> 629 return __kernel_text_address(pc) ? pc : 0; >> 630 } >> 631 EXPORT_SYMBOL(unwind_stack_by_address); >> 632 >> 633 /* used by show_backtrace() */ >> 634 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, >> 635 unsigned long pc, unsigned long *ra) >> 636 { >> 637 unsigned long stack_page = 0; >> 638 int cpu; >> 639 >> 640 for_each_possible_cpu(cpu) { >> 641 if (on_irq_stack(cpu, *sp)) { >> 642 stack_page = (unsigned long)irq_stack[cpu]; >> 643 break; >> 644 } >> 645 } >> 646 >> 647 if (!stack_page) >> 648 stack_page = (unsigned long)task_stack_page(task); >> 649 >> 650 return unwind_stack_by_address(stack_page, sp, pc, ra); 162 } 651 } >> 652 #endif 163 653 164 void flush_thread(void) !! 654 /* >> 655 * get_wchan - a maintenance nightmare^W^Wpain in the ass ... >> 656 */ >> 657 unsigned long get_wchan(struct task_struct *task) 165 { 658 { 166 #ifdef CONFIG_FPU !! 659 unsigned long pc = 0; 167 /* !! 660 #ifdef CONFIG_KALLSYMS 168 * Reset FPU state and context !! 661 unsigned long sp; 169 * frm: round to nearest, ties to !! 662 unsigned long ra = 0; 170 * fflags: accrued exceptions cle << 171 */ << 172 fstate_off(current, task_pt_regs(curre << 173 memset(¤t->thread.fstate, 0, siz << 174 #endif 663 #endif 175 #ifdef CONFIG_RISCV_ISA_V !! 664 176 /* Reset vector state */ !! 665 if (!task || task == current || task_is_running(task)) 177 riscv_v_vstate_ctrl_init(current); !! 666 goto out; 178 riscv_v_vstate_off(task_pt_regs(curren !! 667 if (!task_stack_page(task)) 179 kfree(current->thread.vstate.datap); !! 668 goto out; 180 memset(¤t->thread.vstate, 0, siz !! 669 181 clear_tsk_thread_flag(current, TIF_RIS !! 670 pc = thread_saved_pc(task); >> 671 >> 672 #ifdef CONFIG_KALLSYMS >> 673 sp = task->thread.reg29 + schedule_mfi.frame_size; >> 674 >> 675 while (in_sched_functions(pc)) >> 676 pc = unwind_stack(task, &sp, pc, &ra); 182 #endif 677 #endif >> 678 >> 679 out: >> 680 return pc; 183 } 681 } 184 682 185 void arch_release_task_struct(struct task_stru !! 683 unsigned long mips_stack_top(void) 186 { 684 { 187 /* Free the vector context of datap. * !! 685 unsigned long top = TASK_SIZE & PAGE_MASK; 188 if (has_vector()) !! 686 189 riscv_v_thread_free(tsk); !! 687 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { >> 688 /* One page for branch delay slot "emulation" */ >> 689 top -= PAGE_SIZE; >> 690 } >> 691 >> 692 /* Space for the VDSO, data page & GIC user page */ >> 693 top -= PAGE_ALIGN(current->thread.abi->vdso->size); >> 694 top -= PAGE_SIZE; >> 695 top -= mips_gic_present() ? PAGE_SIZE : 0; >> 696 >> 697 /* Space for cache colour alignment */ >> 698 if (cpu_has_dc_aliases) >> 699 top -= shm_align_mask + 1; >> 700 >> 701 /* Space to randomize the VDSO base */ >> 702 if (current->flags & PF_RANDOMIZE) >> 703 top -= VDSO_RANDOMIZE_SIZE; >> 704 >> 705 return top; 190 } 706 } 191 707 192 int arch_dup_task_struct(struct task_struct *d !! 708 /* >> 709 * Don't forget that the stack pointer must be aligned on a 8 bytes >> 710 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. >> 711 */ >> 712 unsigned long arch_align_stack(unsigned long sp) 193 { 713 { 194 fstate_save(src, task_pt_regs(src)); !! 714 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) 195 *dst = *src; !! 715 sp -= get_random_int() & ~PAGE_MASK; 196 /* clear entire V context, including d !! 716 197 memset(&dst->thread.vstate, 0, sizeof( !! 717 return sp & ALMASK; 198 memset(&dst->thread.kernel_vstate, 0, !! 718 } 199 clear_tsk_thread_flag(dst, TIF_RISCV_V !! 719 >> 720 static struct cpumask backtrace_csd_busy; >> 721 >> 722 static void handle_backtrace(void *info) >> 723 { >> 724 nmi_cpu_backtrace(get_irq_regs()); >> 725 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); >> 726 } >> 727 >> 728 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) = >> 729 CSD_INIT(handle_backtrace, NULL); >> 730 >> 731 static void raise_backtrace(cpumask_t *mask) >> 732 { >> 733 call_single_data_t *csd; >> 734 int cpu; >> 735 >> 736 for_each_cpu(cpu, mask) { >> 737 /* >> 738 * If we previously sent an IPI to the target CPU & it hasn't >> 739 * cleared its bit in the busy cpumask then it didn't handle >> 740 * our previous IPI & it's not safe for us to reuse the >> 741 * call_single_data_t. >> 742 */ >> 743 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { >> 744 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", >> 745 cpu); >> 746 continue; >> 747 } >> 748 >> 749 csd = &per_cpu(backtrace_csd, cpu); >> 750 smp_call_function_single_async(cpu, csd); >> 751 } >> 752 } >> 753 >> 754 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) >> 755 { >> 756 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); >> 757 } >> 758 >> 759 int mips_get_process_fp_mode(struct task_struct *task) >> 760 { >> 761 int value = 0; 200 762 >> 763 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) >> 764 value |= PR_FP_MODE_FR; >> 765 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) >> 766 value |= PR_FP_MODE_FRE; >> 767 >> 768 return value; >> 769 } >> 770 >> 771 static long prepare_for_fp_mode_switch(void *unused) >> 772 { >> 773 /* >> 774 * This is icky, but we use this to simply ensure that all CPUs have >> 775 * context switched, regardless of whether they were previously running >> 776 * kernel or user code. This ensures that no CPU that a mode-switching >> 777 * program may execute on keeps its FPU enabled (& in the old mode) >> 778 * throughout the mode switch. >> 779 */ 201 return 0; 780 return 0; 202 } 781 } 203 782 204 int copy_thread(struct task_struct *p, const s !! 783 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) 205 { 784 { 206 unsigned long clone_flags = args->flag !! 785 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; 207 unsigned long usp = args->stack; !! 786 struct task_struct *t; 208 unsigned long tls = args->tls; !! 787 struct cpumask process_cpus; 209 struct pt_regs *childregs = task_pt_re !! 788 int cpu; >> 789 >> 790 /* If nothing to change, return right away, successfully. */ >> 791 if (value == mips_get_process_fp_mode(task)) >> 792 return 0; >> 793 >> 794 /* Only accept a mode change if 64-bit FP enabled for o32. */ >> 795 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) >> 796 return -EOPNOTSUPP; >> 797 >> 798 /* And only for o32 tasks. */ >> 799 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) >> 800 return -EOPNOTSUPP; >> 801 >> 802 /* Check the value is valid */ >> 803 if (value & ~known_bits) >> 804 return -EOPNOTSUPP; >> 805 >> 806 /* Setting FRE without FR is not supported. */ >> 807 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) >> 808 return -EOPNOTSUPP; >> 809 >> 810 /* Avoid inadvertently triggering emulation */ >> 811 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && >> 812 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) >> 813 return -EOPNOTSUPP; >> 814 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) >> 815 return -EOPNOTSUPP; >> 816 >> 817 /* FR = 0 not supported in MIPS R6 */ >> 818 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) >> 819 return -EOPNOTSUPP; >> 820 >> 821 /* Indicate the new FP mode in each thread */ >> 822 for_each_thread(task, t) { >> 823 /* Update desired FP register width */ >> 824 if (value & PR_FP_MODE_FR) { >> 825 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); >> 826 } else { >> 827 set_tsk_thread_flag(t, TIF_32BIT_FPREGS); >> 828 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); >> 829 } >> 830 >> 831 /* Update desired FP single layout */ >> 832 if (value & PR_FP_MODE_FRE) >> 833 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); >> 834 else >> 835 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); >> 836 } 210 837 211 memset(&p->thread.s, 0, sizeof(p->thre !! 838 /* >> 839 * We need to ensure that all threads in the process have switched mode >> 840 * before returning, in order to allow userland to not worry about >> 841 * races. We can do this by forcing all CPUs that any thread in the >> 842 * process may be running on to schedule something else - in this case >> 843 * prepare_for_fp_mode_switch(). >> 844 * >> 845 * We begin by generating a mask of all CPUs that any thread in the >> 846 * process may be running on. >> 847 */ >> 848 cpumask_clear(&process_cpus); >> 849 for_each_thread(task, t) >> 850 cpumask_set_cpu(task_cpu(t), &process_cpus); 212 851 213 /* p->thread holds context to be resto !! 852 /* 214 if (unlikely(args->fn)) { !! 853 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs. 215 /* Kernel thread */ !! 854 * 216 memset(childregs, 0, sizeof(st !! 855 * The CPUs may have rescheduled already since we switched mode or 217 /* Supervisor/Machine, irqs on !! 856 * generated the cpumask, but that doesn't matter. If the task in this 218 childregs->status = SR_PP | SR !! 857 * process is scheduled out then our scheduling >> 858 * prepare_for_fp_mode_switch() will simply be redundant. If it's >> 859 * scheduled in then it will already have picked up the new FP mode >> 860 * whilst doing so. >> 861 */ >> 862 get_online_cpus(); >> 863 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) >> 864 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); >> 865 put_online_cpus(); 219 866 220 p->thread.s[0] = (unsigned lon << 221 p->thread.s[1] = (unsigned lon << 222 } else { << 223 *childregs = *(current_pt_regs << 224 /* Turn off status.VS */ << 225 riscv_v_vstate_off(childregs); << 226 if (usp) /* User fork */ << 227 childregs->sp = usp; << 228 if (clone_flags & CLONE_SETTLS << 229 childregs->tp = tls; << 230 childregs->a0 = 0; /* Return v << 231 p->thread.s[0] = 0; << 232 } << 233 p->thread.riscv_v_flags = 0; << 234 if (has_vector()) << 235 riscv_v_thread_alloc(p); << 236 p->thread.ra = (unsigned long)ret_from << 237 p->thread.sp = (unsigned long)childreg << 238 return 0; 867 return 0; 239 } 868 } 240 869 241 void __init arch_task_cache_init(void) !! 870 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) >> 871 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs) 242 { 872 { 243 riscv_v_setup_ctx_cache(); !! 873 unsigned int i; >> 874 >> 875 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { >> 876 /* k0/k1 are copied as zero. */ >> 877 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) >> 878 uregs[i] = 0; >> 879 else >> 880 uregs[i] = regs->regs[i - MIPS32_EF_R0]; >> 881 } >> 882 >> 883 uregs[MIPS32_EF_LO] = regs->lo; >> 884 uregs[MIPS32_EF_HI] = regs->hi; >> 885 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; >> 886 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; >> 887 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; >> 888 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; >> 889 } >> 890 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ >> 891 >> 892 #ifdef CONFIG_64BIT >> 893 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs) >> 894 { >> 895 unsigned int i; >> 896 >> 897 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { >> 898 /* k0/k1 are copied as zero. */ >> 899 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) >> 900 uregs[i] = 0; >> 901 else >> 902 uregs[i] = regs->regs[i - MIPS64_EF_R0]; >> 903 } >> 904 >> 905 uregs[MIPS64_EF_LO] = regs->lo; >> 906 uregs[MIPS64_EF_HI] = regs->hi; >> 907 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; >> 908 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; >> 909 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; >> 910 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; 244 } 911 } >> 912 #endif /* CONFIG_64BIT */ 245 913
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.