1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) 4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 5 * Copyright (C) 2004 PathScale, Inc 6 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 7 */ 8 9 #include <stdlib.h> 10 #include <stdarg.h> 11 #include <stdbool.h> 12 #include <errno.h> 13 #include <signal.h> 14 #include <string.h> 15 #include <strings.h> 16 #include <as-layout.h> 17 #include <kern_util.h> 18 #include <os.h> 19 #include <sysdep/mcontext.h> 20 #include <um_malloc.h> 21 #include <sys/ucontext.h> 22 #include <timetravel.h> 23 24 void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = { 25 [SIGTRAP] = relay_signal, 26 [SIGFPE] = relay_signal, 27 [SIGILL] = relay_signal, 28 [SIGWINCH] = winch, 29 [SIGBUS] = bus_handler, 30 [SIGSEGV] = segv_handler, 31 [SIGIO] = sigio_handler, 32 }; 33 34 static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc) 35 { 36 struct uml_pt_regs r; 37 int save_errno = errno; 38 39 r.is_user = 0; 40 if (sig == SIGSEGV) { 41 /* For segfaults, we want the data from the sigcontext. */ 42 get_regs_from_mc(&r, mc); 43 GET_FAULTINFO_FROM_MC(r.faultinfo, mc); 44 } 45 46 /* enable signals if sig isn't IRQ signal */ 47 if ((sig != SIGIO) && (sig != SIGWINCH)) 48 unblock_signals_trace(); 49 50 (*sig_info[sig])(sig, si, &r); 51 52 errno = save_errno; 53 } 54 55 /* 56 * These are the asynchronous signals. SIGPROF is excluded because we want to 57 * be able to profile all of UML, not just the non-critical sections. If 58 * profiling is not thread-safe, then that is not my problem. We can disable 59 * profiling when SMP is enabled in that case. 60 */ 61 #define SIGIO_BIT 0 62 #define SIGIO_MASK (1 << SIGIO_BIT) 63 64 #define SIGALRM_BIT 1 65 #define SIGALRM_MASK (1 << SIGALRM_BIT) 66 67 int signals_enabled; 68 #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT 69 static int signals_blocked, signals_blocked_pending; 70 #endif 71 static unsigned int signals_pending; 72 static unsigned int signals_active = 0; 73 74 static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc) 75 { 76 int enabled = signals_enabled; 77 78 #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT 79 if ((signals_blocked || 80 __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) && 81 (sig == SIGIO)) { 82 /* increment so unblock will do another round */ 83 __atomic_add_fetch(&signals_blocked_pending, 1, 84 __ATOMIC_SEQ_CST); 85 return; 86 } 87 #endif 88 89 if (!enabled && (sig == SIGIO)) { 90 /* 91 * In TT_MODE_EXTERNAL, need to still call time-travel 92 * handlers. This will mark signals_pending by itself 93 * (only if necessary.) 94 * Note we won't get here if signals are hard-blocked 95 * (which is handled above), in that case the hard- 96 * unblock will handle things. 97 */ 98 if (time_travel_mode == TT_MODE_EXTERNAL) 99 sigio_run_timetravel_handlers(); 100 else 101 signals_pending |= SIGIO_MASK; 102 return; 103 } 104 105 block_signals_trace(); 106 107 sig_handler_common(sig, si, mc); 108 109 um_set_signals_trace(enabled); 110 } 111 112 static void timer_real_alarm_handler(mcontext_t *mc) 113 { 114 struct uml_pt_regs regs; 115 116 if (mc != NULL) 117 get_regs_from_mc(®s, mc); 118 else 119 memset(®s, 0, sizeof(regs)); 120 timer_handler(SIGALRM, NULL, ®s); 121 } 122 123 static void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) 124 { 125 int enabled; 126 127 enabled = signals_enabled; 128 if (!signals_enabled) { 129 signals_pending |= SIGALRM_MASK; 130 return; 131 } 132 133 block_signals_trace(); 134 135 signals_active |= SIGALRM_MASK; 136 137 timer_real_alarm_handler(mc); 138 139 signals_active &= ~SIGALRM_MASK; 140 141 um_set_signals_trace(enabled); 142 } 143 144 void deliver_alarm(void) { 145 timer_alarm_handler(SIGALRM, NULL, NULL); 146 } 147 148 void timer_set_signal_handler(void) 149 { 150 set_handler(SIGALRM); 151 } 152 153 void set_sigstack(void *sig_stack, int size) 154 { 155 stack_t stack = { 156 .ss_flags = 0, 157 .ss_sp = sig_stack, 158 .ss_size = size 159 }; 160 161 if (sigaltstack(&stack, NULL) != 0) 162 panic("enabling signal stack failed, errno = %d\n", errno); 163 } 164 165 static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc) 166 { 167 uml_pm_wake(); 168 } 169 170 void register_pm_wake_signal(void) 171 { 172 set_handler(SIGUSR1); 173 } 174 175 static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = { 176 [SIGSEGV] = sig_handler, 177 [SIGBUS] = sig_handler, 178 [SIGILL] = sig_handler, 179 [SIGFPE] = sig_handler, 180 [SIGTRAP] = sig_handler, 181 182 [SIGIO] = sig_handler, 183 [SIGWINCH] = sig_handler, 184 [SIGALRM] = timer_alarm_handler, 185 186 [SIGUSR1] = sigusr1_handler, 187 }; 188 189 static void hard_handler(int sig, siginfo_t *si, void *p) 190 { 191 ucontext_t *uc = p; 192 mcontext_t *mc = &uc->uc_mcontext; 193 unsigned long pending = 1UL << sig; 194 195 do { 196 int nested, bail; 197 198 /* 199 * pending comes back with one bit set for each 200 * interrupt that arrived while setting up the stack, 201 * plus a bit for this interrupt, plus the zero bit is 202 * set if this is a nested interrupt. 203 * If bail is true, then we interrupted another 204 * handler setting up the stack. In this case, we 205 * have to return, and the upper handler will deal 206 * with this interrupt. 207 */ 208 bail = to_irq_stack(&pending); 209 if (bail) 210 return; 211 212 nested = pending & 1; 213 pending &= ~1; 214 215 while ((sig = ffs(pending)) != 0){ 216 sig--; 217 pending &= ~(1 << sig); 218 (*handlers[sig])(sig, (struct siginfo *)si, mc); 219 } 220 221 /* 222 * Again, pending comes back with a mask of signals 223 * that arrived while tearing down the stack. If this 224 * is non-zero, we just go back, set up the stack 225 * again, and handle the new interrupts. 226 */ 227 if (!nested) 228 pending = from_irq_stack(nested); 229 } while (pending); 230 } 231 232 void set_handler(int sig) 233 { 234 struct sigaction action; 235 int flags = SA_SIGINFO | SA_ONSTACK; 236 sigset_t sig_mask; 237 238 action.sa_sigaction = hard_handler; 239 240 /* block irq ones */ 241 sigemptyset(&action.sa_mask); 242 sigaddset(&action.sa_mask, SIGIO); 243 sigaddset(&action.sa_mask, SIGWINCH); 244 sigaddset(&action.sa_mask, SIGALRM); 245 246 if (sig == SIGSEGV) 247 flags |= SA_NODEFER; 248 249 if (sigismember(&action.sa_mask, sig)) 250 flags |= SA_RESTART; /* if it's an irq signal */ 251 252 action.sa_flags = flags; 253 action.sa_restorer = NULL; 254 if (sigaction(sig, &action, NULL) < 0) 255 panic("sigaction failed - errno = %d\n", errno); 256 257 sigemptyset(&sig_mask); 258 sigaddset(&sig_mask, sig); 259 if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0) 260 panic("sigprocmask failed - errno = %d\n", errno); 261 } 262 263 void send_sigio_to_self(void) 264 { 265 kill(os_getpid(), SIGIO); 266 } 267 268 int change_sig(int signal, int on) 269 { 270 sigset_t sigset; 271 272 sigemptyset(&sigset); 273 sigaddset(&sigset, signal); 274 if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0) 275 return -errno; 276 277 return 0; 278 } 279 280 void block_signals(void) 281 { 282 signals_enabled = 0; 283 /* 284 * This must return with signals disabled, so this barrier 285 * ensures that writes are flushed out before the return. 286 * This might matter if gcc figures out how to inline this and 287 * decides to shuffle this code into the caller. 288 */ 289 barrier(); 290 } 291 292 void unblock_signals(void) 293 { 294 int save_pending; 295 296 if (signals_enabled == 1) 297 return; 298 299 signals_enabled = 1; 300 #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT 301 deliver_time_travel_irqs(); 302 #endif 303 304 /* 305 * We loop because the IRQ handler returns with interrupts off. So, 306 * interrupts may have arrived and we need to re-enable them and 307 * recheck signals_pending. 308 */ 309 while (1) { 310 /* 311 * Save and reset save_pending after enabling signals. This 312 * way, signals_pending won't be changed while we're reading it. 313 * 314 * Setting signals_enabled and reading signals_pending must 315 * happen in this order, so have the barrier here. 316 */ 317 barrier(); 318 319 save_pending = signals_pending; 320 if (save_pending == 0) 321 return; 322 323 signals_pending = 0; 324 325 /* 326 * We have pending interrupts, so disable signals, as the 327 * handlers expect them off when they are called. They will 328 * be enabled again above. We need to trace this, as we're 329 * expected to be enabling interrupts already, but any more 330 * tracing that happens inside the handlers we call for the 331 * pending signals will mess up the tracing state. 332 */ 333 signals_enabled = 0; 334 um_trace_signals_off(); 335 336 /* 337 * Deal with SIGIO first because the alarm handler might 338 * schedule, leaving the pending SIGIO stranded until we come 339 * back here. 340 * 341 * SIGIO's handler doesn't use siginfo or mcontext, 342 * so they can be NULL. 343 */ 344 if (save_pending & SIGIO_MASK) 345 sig_handler_common(SIGIO, NULL, NULL); 346 347 /* Do not reenter the handler */ 348 349 if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK))) 350 timer_real_alarm_handler(NULL); 351 352 /* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */ 353 354 if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK)) 355 return; 356 357 /* Re-enable signals and trace that we're doing so. */ 358 um_trace_signals_on(); 359 signals_enabled = 1; 360 } 361 } 362 363 int um_set_signals(int enable) 364 { 365 int ret; 366 if (signals_enabled == enable) 367 return enable; 368 369 ret = signals_enabled; 370 if (enable) 371 unblock_signals(); 372 else block_signals(); 373 374 return ret; 375 } 376 377 int um_set_signals_trace(int enable) 378 { 379 int ret; 380 if (signals_enabled == enable) 381 return enable; 382 383 ret = signals_enabled; 384 if (enable) 385 unblock_signals_trace(); 386 else 387 block_signals_trace(); 388 389 return ret; 390 } 391 392 #ifdef UML_CONFIG_UML_TIME_TRAVEL_SUPPORT 393 void mark_sigio_pending(void) 394 { 395 /* 396 * It would seem that this should be atomic so 397 * it isn't a read-modify-write with a signal 398 * that could happen in the middle, losing the 399 * value set by the signal. 400 * 401 * However, this function is only called when in 402 * time-travel=ext simulation mode, in which case 403 * the only signal ever pending is SIGIO, which 404 * is blocked while this can be called, and the 405 * timer signal (SIGALRM) cannot happen. 406 */ 407 signals_pending |= SIGIO_MASK; 408 } 409 410 void block_signals_hard(void) 411 { 412 signals_blocked++; 413 barrier(); 414 } 415 416 void unblock_signals_hard(void) 417 { 418 static bool unblocking; 419 420 if (!signals_blocked) 421 panic("unblocking signals while not blocked"); 422 423 if (--signals_blocked) 424 return; 425 /* 426 * Must be set to 0 before we check pending so the 427 * SIGIO handler will run as normal unless we're still 428 * going to process signals_blocked_pending. 429 */ 430 barrier(); 431 432 /* 433 * Note that block_signals_hard()/unblock_signals_hard() can be called 434 * within the unblock_signals()/sigio_run_timetravel_handlers() below. 435 * This would still be prone to race conditions since it's actually a 436 * call _within_ e.g. vu_req_read_message(), where we observed this 437 * issue, which loops. Thus, if the inner call handles the recorded 438 * pending signals, we can get out of the inner call with the real 439 * signal hander no longer blocked, and still have a race. Thus don't 440 * handle unblocking in the inner call, if it happens, but only in 441 * the outermost call - 'unblocking' serves as an ownership for the 442 * signals_blocked_pending decrement. 443 */ 444 if (unblocking) 445 return; 446 unblocking = true; 447 448 while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) { 449 if (signals_enabled) { 450 /* signals are enabled so we can touch this */ 451 signals_pending |= SIGIO_MASK; 452 /* 453 * this is a bit inefficient, but that's 454 * not really important 455 */ 456 block_signals(); 457 unblock_signals(); 458 } else { 459 /* 460 * we need to run time-travel handlers even 461 * if not enabled 462 */ 463 sigio_run_timetravel_handlers(); 464 } 465 466 /* 467 * The decrement of signals_blocked_pending must be atomic so 468 * that the signal handler will either happen before or after 469 * the decrement, not during a read-modify-write: 470 * - If it happens before, it can increment it and we'll 471 * decrement it and do another round in the loop. 472 * - If it happens after it'll see 0 for both signals_blocked 473 * and signals_blocked_pending and thus run the handler as 474 * usual (subject to signals_enabled, but that's unrelated.) 475 * 476 * Note that a call to unblock_signals_hard() within the calls 477 * to unblock_signals() or sigio_run_timetravel_handlers() above 478 * will do nothing due to the 'unblocking' state, so this cannot 479 * underflow as the only one decrementing will be the outermost 480 * one. 481 */ 482 if (__atomic_sub_fetch(&signals_blocked_pending, 1, 483 __ATOMIC_SEQ_CST) < 0) 484 panic("signals_blocked_pending underflow"); 485 } 486 487 unblocking = false; 488 } 489 #endif 490 491 int os_is_signal_stack(void) 492 { 493 stack_t ss; 494 sigaltstack(NULL, &ss); 495 496 return ss.ss_flags & SS_ONSTACK; 497 } 498
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.