1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * linux/kernel/exit.c 3 * linux/kernel/exit.c 4 * 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 6 */ 7 7 8 #include <linux/mm.h> 8 #include <linux/mm.h> 9 #include <linux/slab.h> 9 #include <linux/slab.h> 10 #include <linux/sched/autogroup.h> 10 #include <linux/sched/autogroup.h> 11 #include <linux/sched/mm.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/stat.h> 12 #include <linux/sched/stat.h> 13 #include <linux/sched/task.h> 13 #include <linux/sched/task.h> 14 #include <linux/sched/task_stack.h> 14 #include <linux/sched/task_stack.h> 15 #include <linux/sched/cputime.h> 15 #include <linux/sched/cputime.h> 16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h> 17 #include <linux/module.h> 17 #include <linux/module.h> 18 #include <linux/capability.h> 18 #include <linux/capability.h> 19 #include <linux/completion.h> 19 #include <linux/completion.h> 20 #include <linux/personality.h> 20 #include <linux/personality.h> 21 #include <linux/tty.h> 21 #include <linux/tty.h> 22 #include <linux/iocontext.h> 22 #include <linux/iocontext.h> 23 #include <linux/key.h> 23 #include <linux/key.h> 24 #include <linux/cpu.h> 24 #include <linux/cpu.h> 25 #include <linux/acct.h> 25 #include <linux/acct.h> 26 #include <linux/tsacct_kern.h> 26 #include <linux/tsacct_kern.h> 27 #include <linux/file.h> 27 #include <linux/file.h> 28 #include <linux/fdtable.h> 28 #include <linux/fdtable.h> 29 #include <linux/freezer.h> 29 #include <linux/freezer.h> 30 #include <linux/binfmts.h> 30 #include <linux/binfmts.h> 31 #include <linux/nsproxy.h> 31 #include <linux/nsproxy.h> 32 #include <linux/pid_namespace.h> 32 #include <linux/pid_namespace.h> 33 #include <linux/ptrace.h> 33 #include <linux/ptrace.h> 34 #include <linux/profile.h> 34 #include <linux/profile.h> 35 #include <linux/mount.h> 35 #include <linux/mount.h> 36 #include <linux/proc_fs.h> 36 #include <linux/proc_fs.h> 37 #include <linux/kthread.h> 37 #include <linux/kthread.h> 38 #include <linux/mempolicy.h> 38 #include <linux/mempolicy.h> 39 #include <linux/taskstats_kern.h> 39 #include <linux/taskstats_kern.h> 40 #include <linux/delayacct.h> 40 #include <linux/delayacct.h> 41 #include <linux/cgroup.h> 41 #include <linux/cgroup.h> 42 #include <linux/syscalls.h> 42 #include <linux/syscalls.h> 43 #include <linux/signal.h> 43 #include <linux/signal.h> 44 #include <linux/posix-timers.h> 44 #include <linux/posix-timers.h> 45 #include <linux/cn_proc.h> 45 #include <linux/cn_proc.h> 46 #include <linux/mutex.h> 46 #include <linux/mutex.h> 47 #include <linux/futex.h> 47 #include <linux/futex.h> 48 #include <linux/pipe_fs_i.h> 48 #include <linux/pipe_fs_i.h> 49 #include <linux/audit.h> /* for audit_free() * 49 #include <linux/audit.h> /* for audit_free() */ 50 #include <linux/resource.h> 50 #include <linux/resource.h> 51 #include <linux/task_io_accounting_ops.h> << 52 #include <linux/blkdev.h> 51 #include <linux/blkdev.h> 53 #include <linux/task_work.h> !! 52 #include <linux/task_io_accounting_ops.h> >> 53 #include <linux/tracehook.h> 54 #include <linux/fs_struct.h> 54 #include <linux/fs_struct.h> 55 #include <linux/init_task.h> 55 #include <linux/init_task.h> 56 #include <linux/perf_event.h> 56 #include <linux/perf_event.h> 57 #include <trace/events/sched.h> 57 #include <trace/events/sched.h> 58 #include <linux/hw_breakpoint.h> 58 #include <linux/hw_breakpoint.h> 59 #include <linux/oom.h> 59 #include <linux/oom.h> 60 #include <linux/writeback.h> 60 #include <linux/writeback.h> 61 #include <linux/shm.h> 61 #include <linux/shm.h> 62 #include <linux/kcov.h> 62 #include <linux/kcov.h> 63 #include <linux/kmsan.h> << 64 #include <linux/random.h> 63 #include <linux/random.h> 65 #include <linux/rcuwait.h> 64 #include <linux/rcuwait.h> 66 #include <linux/compat.h> 65 #include <linux/compat.h> 67 #include <linux/io_uring.h> << 68 #include <linux/kprobes.h> << 69 #include <linux/rethook.h> << 70 #include <linux/sysfs.h> << 71 #include <linux/user_events.h> << 72 #include <linux/uaccess.h> << 73 << 74 #include <uapi/linux/wait.h> << 75 66 >> 67 #include <linux/uaccess.h> 76 #include <asm/unistd.h> 68 #include <asm/unistd.h> >> 69 #include <asm/pgtable.h> 77 #include <asm/mmu_context.h> 70 #include <asm/mmu_context.h> 78 71 79 #include "exit.h" << 80 << 81 /* << 82 * The default value should be high enough to << 83 * crashes its kernel from time to time, but l << 84 * overflowing 32-bit refcounts or the ldsem w << 85 */ << 86 static unsigned int oops_limit = 10000; << 87 << 88 #ifdef CONFIG_SYSCTL << 89 static struct ctl_table kern_exit_table[] = { << 90 { << 91 .procname = "oops_limit" << 92 .data = &oops_limit, << 93 .maxlen = sizeof(oops_ << 94 .mode = 0644, << 95 .proc_handler = proc_douintv << 96 }, << 97 }; << 98 << 99 static __init int kernel_exit_sysctls_init(voi << 100 { << 101 register_sysctl_init("kernel", kern_ex << 102 return 0; << 103 } << 104 late_initcall(kernel_exit_sysctls_init); << 105 #endif << 106 << 107 static atomic_t oops_count = ATOMIC_INIT(0); << 108 << 109 #ifdef CONFIG_SYSFS << 110 static ssize_t oops_count_show(struct kobject << 111 char *page) << 112 { << 113 return sysfs_emit(page, "%d\n", atomic << 114 } << 115 << 116 static struct kobj_attribute oops_count_attr = << 117 << 118 static __init int kernel_exit_sysfs_init(void) << 119 { << 120 sysfs_add_file_to_group(kernel_kobj, & << 121 return 0; << 122 } << 123 late_initcall(kernel_exit_sysfs_init); << 124 #endif << 125 << 126 static void __unhash_process(struct task_struc 72 static void __unhash_process(struct task_struct *p, bool group_dead) 127 { 73 { 128 nr_threads--; 74 nr_threads--; 129 detach_pid(p, PIDTYPE_PID); 75 detach_pid(p, PIDTYPE_PID); 130 if (group_dead) { 76 if (group_dead) { 131 detach_pid(p, PIDTYPE_TGID); 77 detach_pid(p, PIDTYPE_TGID); 132 detach_pid(p, PIDTYPE_PGID); 78 detach_pid(p, PIDTYPE_PGID); 133 detach_pid(p, PIDTYPE_SID); 79 detach_pid(p, PIDTYPE_SID); 134 80 135 list_del_rcu(&p->tasks); 81 list_del_rcu(&p->tasks); 136 list_del_init(&p->sibling); 82 list_del_init(&p->sibling); 137 __this_cpu_dec(process_counts) 83 __this_cpu_dec(process_counts); 138 } 84 } >> 85 list_del_rcu(&p->thread_group); 139 list_del_rcu(&p->thread_node); 86 list_del_rcu(&p->thread_node); 140 } 87 } 141 88 142 /* 89 /* 143 * This function expects the tasklist_lock wri 90 * This function expects the tasklist_lock write-locked. 144 */ 91 */ 145 static void __exit_signal(struct task_struct * 92 static void __exit_signal(struct task_struct *tsk) 146 { 93 { 147 struct signal_struct *sig = tsk->signa 94 struct signal_struct *sig = tsk->signal; 148 bool group_dead = thread_group_leader( 95 bool group_dead = thread_group_leader(tsk); 149 struct sighand_struct *sighand; 96 struct sighand_struct *sighand; 150 struct tty_struct *tty; !! 97 struct tty_struct *uninitialized_var(tty); 151 u64 utime, stime; 98 u64 utime, stime; 152 99 153 sighand = rcu_dereference_check(tsk->s 100 sighand = rcu_dereference_check(tsk->sighand, 154 lockde 101 lockdep_tasklist_lock_is_held()); 155 spin_lock(&sighand->siglock); 102 spin_lock(&sighand->siglock); 156 103 157 #ifdef CONFIG_POSIX_TIMERS 104 #ifdef CONFIG_POSIX_TIMERS 158 posix_cpu_timers_exit(tsk); 105 posix_cpu_timers_exit(tsk); 159 if (group_dead) !! 106 if (group_dead) { 160 posix_cpu_timers_exit_group(ts 107 posix_cpu_timers_exit_group(tsk); >> 108 } else { >> 109 /* >> 110 * This can only happen if the caller is de_thread(). >> 111 * FIXME: this is the temporary hack, we should teach >> 112 * posix-cpu-timers to handle this case correctly. >> 113 */ >> 114 if (unlikely(has_group_leader_pid(tsk))) >> 115 posix_cpu_timers_exit_group(tsk); >> 116 } 161 #endif 117 #endif 162 118 163 if (group_dead) { 119 if (group_dead) { 164 tty = sig->tty; 120 tty = sig->tty; 165 sig->tty = NULL; 121 sig->tty = NULL; 166 } else { 122 } else { 167 /* 123 /* 168 * If there is any task waitin 124 * If there is any task waiting for the group exit 169 * then notify it: 125 * then notify it: 170 */ 126 */ 171 if (sig->notify_count > 0 && ! 127 if (sig->notify_count > 0 && !--sig->notify_count) 172 wake_up_process(sig->g !! 128 wake_up_process(sig->group_exit_task); 173 129 174 if (tsk == sig->curr_target) 130 if (tsk == sig->curr_target) 175 sig->curr_target = nex 131 sig->curr_target = next_thread(tsk); 176 } 132 } 177 133 178 add_device_randomness((const void*) &t 134 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 179 sizeof(unsigned 135 sizeof(unsigned long long)); 180 136 181 /* 137 /* 182 * Accumulate here the counters for al 138 * Accumulate here the counters for all threads as they die. We could 183 * skip the group leader because it is 139 * skip the group leader because it is the last user of signal_struct, 184 * but we want to avoid the race with 140 * but we want to avoid the race with thread_group_cputime() which can 185 * see the empty ->thread_head list. 141 * see the empty ->thread_head list. 186 */ 142 */ 187 task_cputime(tsk, &utime, &stime); 143 task_cputime(tsk, &utime, &stime); 188 write_seqlock(&sig->stats_lock); 144 write_seqlock(&sig->stats_lock); 189 sig->utime += utime; 145 sig->utime += utime; 190 sig->stime += stime; 146 sig->stime += stime; 191 sig->gtime += task_gtime(tsk); 147 sig->gtime += task_gtime(tsk); 192 sig->min_flt += tsk->min_flt; 148 sig->min_flt += tsk->min_flt; 193 sig->maj_flt += tsk->maj_flt; 149 sig->maj_flt += tsk->maj_flt; 194 sig->nvcsw += tsk->nvcsw; 150 sig->nvcsw += tsk->nvcsw; 195 sig->nivcsw += tsk->nivcsw; 151 sig->nivcsw += tsk->nivcsw; 196 sig->inblock += task_io_get_inblock(ts 152 sig->inblock += task_io_get_inblock(tsk); 197 sig->oublock += task_io_get_oublock(ts 153 sig->oublock += task_io_get_oublock(tsk); 198 task_io_accounting_add(&sig->ioac, &ts 154 task_io_accounting_add(&sig->ioac, &tsk->ioac); 199 sig->sum_sched_runtime += tsk->se.sum_ 155 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 200 sig->nr_threads--; 156 sig->nr_threads--; 201 __unhash_process(tsk, group_dead); 157 __unhash_process(tsk, group_dead); 202 write_sequnlock(&sig->stats_lock); 158 write_sequnlock(&sig->stats_lock); 203 159 204 /* 160 /* 205 * Do this under ->siglock, we can rac 161 * Do this under ->siglock, we can race with another thread 206 * doing sigqueue_free() if we have SI 162 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 207 */ 163 */ 208 flush_sigqueue(&tsk->pending); 164 flush_sigqueue(&tsk->pending); 209 tsk->sighand = NULL; 165 tsk->sighand = NULL; 210 spin_unlock(&sighand->siglock); 166 spin_unlock(&sighand->siglock); 211 167 212 __cleanup_sighand(sighand); 168 __cleanup_sighand(sighand); 213 clear_tsk_thread_flag(tsk, TIF_SIGPEND 169 clear_tsk_thread_flag(tsk, TIF_SIGPENDING); 214 if (group_dead) { 170 if (group_dead) { 215 flush_sigqueue(&sig->shared_pe 171 flush_sigqueue(&sig->shared_pending); 216 tty_kref_put(tty); 172 tty_kref_put(tty); 217 } 173 } 218 } 174 } 219 175 220 static void delayed_put_task_struct(struct rcu 176 static void delayed_put_task_struct(struct rcu_head *rhp) 221 { 177 { 222 struct task_struct *tsk = container_of 178 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 223 179 224 kprobe_flush_task(tsk); << 225 rethook_flush_task(tsk); << 226 perf_event_delayed_put(tsk); 180 perf_event_delayed_put(tsk); 227 trace_sched_process_free(tsk); 181 trace_sched_process_free(tsk); 228 put_task_struct(tsk); 182 put_task_struct(tsk); 229 } 183 } 230 184 231 void put_task_struct_rcu_user(struct task_stru << 232 { << 233 if (refcount_dec_and_test(&task->rcu_u << 234 call_rcu(&task->rcu, delayed_p << 235 } << 236 << 237 void __weak release_thread(struct task_struct << 238 { << 239 } << 240 185 241 void release_task(struct task_struct *p) 186 void release_task(struct task_struct *p) 242 { 187 { 243 struct task_struct *leader; 188 struct task_struct *leader; 244 struct pid *thread_pid; << 245 int zap_leader; 189 int zap_leader; 246 repeat: 190 repeat: 247 /* don't need to get the RCU readlock 191 /* don't need to get the RCU readlock here - the process is dead and 248 * can't be modifying its own credenti 192 * can't be modifying its own credentials. But shut RCU-lockdep up */ 249 rcu_read_lock(); 193 rcu_read_lock(); 250 dec_rlimit_ucounts(task_ucounts(p), UC !! 194 atomic_dec(&__task_cred(p)->user->processes); 251 rcu_read_unlock(); 195 rcu_read_unlock(); 252 196 >> 197 proc_flush_task(p); 253 cgroup_release(p); 198 cgroup_release(p); 254 199 255 write_lock_irq(&tasklist_lock); 200 write_lock_irq(&tasklist_lock); 256 ptrace_release_task(p); 201 ptrace_release_task(p); 257 thread_pid = get_pid(p->thread_pid); << 258 __exit_signal(p); 202 __exit_signal(p); 259 203 260 /* 204 /* 261 * If we are the last non-leader membe 205 * If we are the last non-leader member of the thread 262 * group, and the leader is zombie, th 206 * group, and the leader is zombie, then notify the 263 * group leader's parent process. (if 207 * group leader's parent process. (if it wants notification.) 264 */ 208 */ 265 zap_leader = 0; 209 zap_leader = 0; 266 leader = p->group_leader; 210 leader = p->group_leader; 267 if (leader != p && thread_group_empty( 211 if (leader != p && thread_group_empty(leader) 268 && leader->exit_state 212 && leader->exit_state == EXIT_ZOMBIE) { 269 /* 213 /* 270 * If we were the last child t 214 * If we were the last child thread and the leader has 271 * exited already, and the lea 215 * exited already, and the leader's parent ignores SIGCHLD, 272 * then we are the one who sho 216 * then we are the one who should release the leader. 273 */ 217 */ 274 zap_leader = do_notify_parent( 218 zap_leader = do_notify_parent(leader, leader->exit_signal); 275 if (zap_leader) 219 if (zap_leader) 276 leader->exit_state = E 220 leader->exit_state = EXIT_DEAD; 277 } 221 } 278 222 279 write_unlock_irq(&tasklist_lock); 223 write_unlock_irq(&tasklist_lock); 280 proc_flush_pid(thread_pid); << 281 put_pid(thread_pid); << 282 release_thread(p); 224 release_thread(p); 283 put_task_struct_rcu_user(p); !! 225 call_rcu(&p->rcu, delayed_put_task_struct); 284 226 285 p = leader; 227 p = leader; 286 if (unlikely(zap_leader)) 228 if (unlikely(zap_leader)) 287 goto repeat; 229 goto repeat; 288 } 230 } 289 231 290 int rcuwait_wake_up(struct rcuwait *w) !! 232 /* >> 233 * Note that if this function returns a valid task_struct pointer (!NULL) >> 234 * task->usage must remain >0 for the duration of the RCU critical section. >> 235 */ >> 236 struct task_struct *task_rcu_dereference(struct task_struct **ptask) >> 237 { >> 238 struct sighand_struct *sighand; >> 239 struct task_struct *task; >> 240 >> 241 /* >> 242 * We need to verify that release_task() was not called and thus >> 243 * delayed_put_task_struct() can't run and drop the last reference >> 244 * before rcu_read_unlock(). We check task->sighand != NULL, >> 245 * but we can read the already freed and reused memory. >> 246 */ >> 247 retry: >> 248 task = rcu_dereference(*ptask); >> 249 if (!task) >> 250 return NULL; >> 251 >> 252 probe_kernel_address(&task->sighand, sighand); >> 253 >> 254 /* >> 255 * Pairs with atomic_dec_and_test() in put_task_struct(). If this task >> 256 * was already freed we can not miss the preceding update of this >> 257 * pointer. >> 258 */ >> 259 smp_rmb(); >> 260 if (unlikely(task != READ_ONCE(*ptask))) >> 261 goto retry; >> 262 >> 263 /* >> 264 * We've re-checked that "task == *ptask", now we have two different >> 265 * cases: >> 266 * >> 267 * 1. This is actually the same task/task_struct. In this case >> 268 * sighand != NULL tells us it is still alive. >> 269 * >> 270 * 2. This is another task which got the same memory for task_struct. >> 271 * We can't know this of course, and we can not trust >> 272 * sighand != NULL. >> 273 * >> 274 * In this case we actually return a random value, but this is >> 275 * correct. >> 276 * >> 277 * If we return NULL - we can pretend that we actually noticed that >> 278 * *ptask was updated when the previous task has exited. Or pretend >> 279 * that probe_slab_address(&sighand) reads NULL. >> 280 * >> 281 * If we return the new task (because sighand is not NULL for any >> 282 * reason) - this is fine too. This (new) task can't go away before >> 283 * another gp pass. >> 284 * >> 285 * And note: We could even eliminate the false positive if re-read >> 286 * task->sighand once again to avoid the falsely NULL. But this case >> 287 * is very unlikely so we don't care. >> 288 */ >> 289 if (!sighand) >> 290 return NULL; >> 291 >> 292 return task; >> 293 } >> 294 >> 295 void rcuwait_wake_up(struct rcuwait *w) 291 { 296 { 292 int ret = 0; << 293 struct task_struct *task; 297 struct task_struct *task; 294 298 295 rcu_read_lock(); 299 rcu_read_lock(); 296 300 297 /* 301 /* 298 * Order condition vs @task, such that 302 * Order condition vs @task, such that everything prior to the load 299 * of @task is visible. This is the co 303 * of @task is visible. This is the condition as to why the user called 300 * rcuwait_wake() in the first place. !! 304 * rcuwait_trywake() in the first place. Pairs with set_current_state() 301 * barrier (A) in rcuwait_wait_event() 305 * barrier (A) in rcuwait_wait_event(). 302 * 306 * 303 * WAIT WAKE 307 * WAIT WAKE 304 * [S] tsk = current [S] cond = t 308 * [S] tsk = current [S] cond = true 305 * MB (A) MB (B) 309 * MB (A) MB (B) 306 * [L] cond [L] tsk 310 * [L] cond [L] tsk 307 */ 311 */ 308 smp_mb(); /* (B) */ 312 smp_mb(); /* (B) */ 309 313 >> 314 /* >> 315 * Avoid using task_rcu_dereference() magic as long as we are careful, >> 316 * see comment in rcuwait_wait_event() regarding ->exit_state. >> 317 */ 310 task = rcu_dereference(w->task); 318 task = rcu_dereference(w->task); 311 if (task) 319 if (task) 312 ret = wake_up_process(task); !! 320 wake_up_process(task); 313 rcu_read_unlock(); 321 rcu_read_unlock(); 314 << 315 return ret; << 316 } 322 } 317 EXPORT_SYMBOL_GPL(rcuwait_wake_up); << 318 323 319 /* 324 /* 320 * Determine if a process group is "orphaned", 325 * Determine if a process group is "orphaned", according to the POSIX 321 * definition in 2.2.2.52. Orphaned process g 326 * definition in 2.2.2.52. Orphaned process groups are not to be affected 322 * by terminal-generated stop signals. Newly 327 * by terminal-generated stop signals. Newly orphaned process groups are 323 * to receive a SIGHUP and a SIGCONT. 328 * to receive a SIGHUP and a SIGCONT. 324 * 329 * 325 * "I ask you, have you ever known what it is 330 * "I ask you, have you ever known what it is to be an orphan?" 326 */ 331 */ 327 static int will_become_orphaned_pgrp(struct pi 332 static int will_become_orphaned_pgrp(struct pid *pgrp, 328 struct 333 struct task_struct *ignored_task) 329 { 334 { 330 struct task_struct *p; 335 struct task_struct *p; 331 336 332 do_each_pid_task(pgrp, PIDTYPE_PGID, p 337 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 333 if ((p == ignored_task) || 338 if ((p == ignored_task) || 334 (p->exit_state && thread_g 339 (p->exit_state && thread_group_empty(p)) || 335 is_global_init(p->real_par 340 is_global_init(p->real_parent)) 336 continue; 341 continue; 337 342 338 if (task_pgrp(p->real_parent) 343 if (task_pgrp(p->real_parent) != pgrp && 339 task_session(p->real_paren 344 task_session(p->real_parent) == task_session(p)) 340 return 0; 345 return 0; 341 } while_each_pid_task(pgrp, PIDTYPE_PG 346 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 342 347 343 return 1; 348 return 1; 344 } 349 } 345 350 346 int is_current_pgrp_orphaned(void) 351 int is_current_pgrp_orphaned(void) 347 { 352 { 348 int retval; 353 int retval; 349 354 350 read_lock(&tasklist_lock); 355 read_lock(&tasklist_lock); 351 retval = will_become_orphaned_pgrp(tas 356 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); 352 read_unlock(&tasklist_lock); 357 read_unlock(&tasklist_lock); 353 358 354 return retval; 359 return retval; 355 } 360 } 356 361 357 static bool has_stopped_jobs(struct pid *pgrp) 362 static bool has_stopped_jobs(struct pid *pgrp) 358 { 363 { 359 struct task_struct *p; 364 struct task_struct *p; 360 365 361 do_each_pid_task(pgrp, PIDTYPE_PGID, p 366 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 362 if (p->signal->flags & SIGNAL_ 367 if (p->signal->flags & SIGNAL_STOP_STOPPED) 363 return true; 368 return true; 364 } while_each_pid_task(pgrp, PIDTYPE_PG 369 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 365 370 366 return false; 371 return false; 367 } 372 } 368 373 369 /* 374 /* 370 * Check to see if any process groups have bec 375 * Check to see if any process groups have become orphaned as 371 * a result of our exiting, and if they have a 376 * a result of our exiting, and if they have any stopped jobs, 372 * send them a SIGHUP and then a SIGCONT. (POS 377 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 373 */ 378 */ 374 static void 379 static void 375 kill_orphaned_pgrp(struct task_struct *tsk, st 380 kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) 376 { 381 { 377 struct pid *pgrp = task_pgrp(tsk); 382 struct pid *pgrp = task_pgrp(tsk); 378 struct task_struct *ignored_task = tsk 383 struct task_struct *ignored_task = tsk; 379 384 380 if (!parent) 385 if (!parent) 381 /* exit: our father is in a di 386 /* exit: our father is in a different pgrp than 382 * we are and we were the only 387 * we are and we were the only connection outside. 383 */ 388 */ 384 parent = tsk->real_parent; 389 parent = tsk->real_parent; 385 else 390 else 386 /* reparent: our child is in a 391 /* reparent: our child is in a different pgrp than 387 * we are, and it was the only 392 * we are, and it was the only connection outside. 388 */ 393 */ 389 ignored_task = NULL; 394 ignored_task = NULL; 390 395 391 if (task_pgrp(parent) != pgrp && 396 if (task_pgrp(parent) != pgrp && 392 task_session(parent) == task_sessi 397 task_session(parent) == task_session(tsk) && 393 will_become_orphaned_pgrp(pgrp, ig 398 will_become_orphaned_pgrp(pgrp, ignored_task) && 394 has_stopped_jobs(pgrp)) { 399 has_stopped_jobs(pgrp)) { 395 __kill_pgrp_info(SIGHUP, SEND_ 400 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); 396 __kill_pgrp_info(SIGCONT, SEND 401 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); 397 } 402 } 398 } 403 } 399 404 400 static void coredump_task_exit(struct task_str << 401 { << 402 struct core_state *core_state; << 403 << 404 /* << 405 * Serialize with any possible pending << 406 * We must hold siglock around checkin << 407 * and setting PF_POSTCOREDUMP. The c << 408 * will increment ->nr_threads for eac << 409 * group without PF_POSTCOREDUMP set. << 410 */ << 411 spin_lock_irq(&tsk->sighand->siglock); << 412 tsk->flags |= PF_POSTCOREDUMP; << 413 core_state = tsk->signal->core_state; << 414 spin_unlock_irq(&tsk->sighand->siglock << 415 if (core_state) { << 416 struct core_thread self; << 417 << 418 self.task = current; << 419 if (self.task->flags & PF_SIGN << 420 self.next = xchg(&core << 421 else << 422 self.task = NULL; << 423 /* << 424 * Implies mb(), the result of << 425 * to core_state->dumper. << 426 */ << 427 if (atomic_dec_and_test(&core_ << 428 complete(&core_state-> << 429 << 430 for (;;) { << 431 set_current_state(TASK << 432 if (!self.task) /* see << 433 break; << 434 schedule(); << 435 } << 436 __set_current_state(TASK_RUNNI << 437 } << 438 } << 439 << 440 #ifdef CONFIG_MEMCG 405 #ifdef CONFIG_MEMCG 441 /* drops tasklist_lock if succeeds */ << 442 static bool __try_to_set_owner(struct task_str << 443 { << 444 bool ret = false; << 445 << 446 task_lock(tsk); << 447 if (likely(tsk->mm == mm)) { << 448 /* tsk can't pass exit_mm/exec << 449 read_unlock(&tasklist_lock); << 450 WRITE_ONCE(mm->owner, tsk); << 451 lru_gen_migrate_mm(mm); << 452 ret = true; << 453 } << 454 task_unlock(tsk); << 455 return ret; << 456 } << 457 << 458 static bool try_to_set_owner(struct task_struc << 459 { << 460 struct task_struct *t; << 461 << 462 for_each_thread(g, t) { << 463 struct mm_struct *t_mm = READ_ << 464 if (t_mm == mm) { << 465 if (__try_to_set_owner << 466 return true; << 467 } else if (t_mm) << 468 break; << 469 } << 470 << 471 return false; << 472 } << 473 << 474 /* 406 /* 475 * A task is exiting. If it owned this mm, f 407 * A task is exiting. If it owned this mm, find a new owner for the mm. 476 */ 408 */ 477 void mm_update_next_owner(struct mm_struct *mm 409 void mm_update_next_owner(struct mm_struct *mm) 478 { 410 { 479 struct task_struct *g, *p = current; !! 411 struct task_struct *c, *g, *p = current; 480 412 >> 413 retry: 481 /* 414 /* 482 * If the exiting or execing task is n 415 * If the exiting or execing task is not the owner, it's 483 * someone else's problem. 416 * someone else's problem. 484 */ 417 */ 485 if (mm->owner != p) 418 if (mm->owner != p) 486 return; 419 return; 487 /* 420 /* 488 * The current owner is exiting/execin 421 * The current owner is exiting/execing and there are no other 489 * candidates. Do not leave the mm po 422 * candidates. Do not leave the mm pointing to a possibly 490 * freed task structure. 423 * freed task structure. 491 */ 424 */ 492 if (atomic_read(&mm->mm_users) <= 1) { 425 if (atomic_read(&mm->mm_users) <= 1) { 493 WRITE_ONCE(mm->owner, NULL); 426 WRITE_ONCE(mm->owner, NULL); 494 return; 427 return; 495 } 428 } 496 429 497 read_lock(&tasklist_lock); 430 read_lock(&tasklist_lock); 498 /* 431 /* 499 * Search in the children 432 * Search in the children 500 */ 433 */ 501 list_for_each_entry(g, &p->children, s !! 434 list_for_each_entry(c, &p->children, sibling) { 502 if (try_to_set_owner(g, mm)) !! 435 if (c->mm == mm) 503 goto ret; !! 436 goto assign_new_owner; 504 } 437 } >> 438 505 /* 439 /* 506 * Search in the siblings 440 * Search in the siblings 507 */ 441 */ 508 list_for_each_entry(g, &p->real_parent !! 442 list_for_each_entry(c, &p->real_parent->children, sibling) { 509 if (try_to_set_owner(g, mm)) !! 443 if (c->mm == mm) 510 goto ret; !! 444 goto assign_new_owner; 511 } 445 } >> 446 512 /* 447 /* 513 * Search through everything else, we 448 * Search through everything else, we should not get here often. 514 */ 449 */ 515 for_each_process(g) { 450 for_each_process(g) { 516 if (atomic_read(&mm->mm_users) << 517 break; << 518 if (g->flags & PF_KTHREAD) 451 if (g->flags & PF_KTHREAD) 519 continue; 452 continue; 520 if (try_to_set_owner(g, mm)) !! 453 for_each_thread(g, c) { 521 goto ret; !! 454 if (c->mm == mm) >> 455 goto assign_new_owner; >> 456 if (c->mm) >> 457 break; >> 458 } 522 } 459 } 523 read_unlock(&tasklist_lock); 460 read_unlock(&tasklist_lock); 524 /* 461 /* 525 * We found no owner yet mm_users > 1: 462 * We found no owner yet mm_users > 1: this implies that we are 526 * most likely racing with swapoff (tr 463 * most likely racing with swapoff (try_to_unuse()) or /proc or 527 * ptrace or page migration (get_task_ 464 * ptrace or page migration (get_task_mm()). Mark owner as NULL. 528 */ 465 */ 529 WRITE_ONCE(mm->owner, NULL); 466 WRITE_ONCE(mm->owner, NULL); 530 ret: << 531 return; 467 return; 532 468 >> 469 assign_new_owner: >> 470 BUG_ON(c == p); >> 471 get_task_struct(c); >> 472 /* >> 473 * The task_lock protects c->mm from changing. >> 474 * We always want mm->owner->mm == mm >> 475 */ >> 476 task_lock(c); >> 477 /* >> 478 * Delay read_unlock() till we have the task_lock() >> 479 * to ensure that c does not slip away underneath us >> 480 */ >> 481 read_unlock(&tasklist_lock); >> 482 if (c->mm != mm) { >> 483 task_unlock(c); >> 484 put_task_struct(c); >> 485 goto retry; >> 486 } >> 487 WRITE_ONCE(mm->owner, c); >> 488 task_unlock(c); >> 489 put_task_struct(c); 533 } 490 } 534 #endif /* CONFIG_MEMCG */ 491 #endif /* CONFIG_MEMCG */ 535 492 536 /* 493 /* 537 * Turn us into a lazy TLB process if we 494 * Turn us into a lazy TLB process if we 538 * aren't already.. 495 * aren't already.. 539 */ 496 */ 540 static void exit_mm(void) 497 static void exit_mm(void) 541 { 498 { 542 struct mm_struct *mm = current->mm; 499 struct mm_struct *mm = current->mm; >> 500 struct core_state *core_state; 543 501 544 exit_mm_release(current, mm); !! 502 mm_release(current, mm); 545 if (!mm) 503 if (!mm) 546 return; 504 return; 547 mmap_read_lock(mm); !! 505 sync_mm_rss(mm); 548 mmgrab_lazy_tlb(mm); !! 506 /* >> 507 * Serialize with any possible pending coredump. >> 508 * We must hold mmap_sem around checking core_state >> 509 * and clearing tsk->mm. The core-inducing thread >> 510 * will increment ->nr_threads for each thread in the >> 511 * group with ->mm != NULL. >> 512 */ >> 513 down_read(&mm->mmap_sem); >> 514 core_state = mm->core_state; >> 515 if (core_state) { >> 516 struct core_thread self; >> 517 >> 518 up_read(&mm->mmap_sem); >> 519 >> 520 self.task = current; >> 521 self.next = xchg(&core_state->dumper.next, &self); >> 522 /* >> 523 * Implies mb(), the result of xchg() must be visible >> 524 * to core_state->dumper. >> 525 */ >> 526 if (atomic_dec_and_test(&core_state->nr_threads)) >> 527 complete(&core_state->startup); >> 528 >> 529 for (;;) { >> 530 set_current_state(TASK_UNINTERRUPTIBLE); >> 531 if (!self.task) /* see coredump_finish() */ >> 532 break; >> 533 freezable_schedule(); >> 534 } >> 535 __set_current_state(TASK_RUNNING); >> 536 down_read(&mm->mmap_sem); >> 537 } >> 538 mmgrab(mm); 549 BUG_ON(mm != current->active_mm); 539 BUG_ON(mm != current->active_mm); 550 /* more a memory barrier than a real l 540 /* more a memory barrier than a real lock */ 551 task_lock(current); 541 task_lock(current); 552 /* << 553 * When a thread stops operating on an << 554 * in membarrier_private_expedited() m << 555 * tsk->mm, and the loop in membarrier << 556 * not observe a MEMBARRIER_STATE_GLOB << 557 * rq->membarrier_state, so those woul << 558 * Membarrier requires a memory barrie << 559 * user-space memory, before clearing << 560 * rq->membarrier_state. << 561 */ << 562 smp_mb__after_spinlock(); << 563 local_irq_disable(); << 564 current->mm = NULL; 542 current->mm = NULL; 565 membarrier_update_current_mm(NULL); !! 543 up_read(&mm->mmap_sem); 566 enter_lazy_tlb(mm, current); 544 enter_lazy_tlb(mm, current); 567 local_irq_enable(); << 568 task_unlock(current); 545 task_unlock(current); 569 mmap_read_unlock(mm); << 570 mm_update_next_owner(mm); 546 mm_update_next_owner(mm); 571 mmput(mm); 547 mmput(mm); 572 if (test_thread_flag(TIF_MEMDIE)) 548 if (test_thread_flag(TIF_MEMDIE)) 573 exit_oom_victim(); 549 exit_oom_victim(); 574 } 550 } 575 551 576 static struct task_struct *find_alive_thread(s 552 static struct task_struct *find_alive_thread(struct task_struct *p) 577 { 553 { 578 struct task_struct *t; 554 struct task_struct *t; 579 555 580 for_each_thread(p, t) { 556 for_each_thread(p, t) { 581 if (!(t->flags & PF_EXITING)) 557 if (!(t->flags & PF_EXITING)) 582 return t; 558 return t; 583 } 559 } 584 return NULL; 560 return NULL; 585 } 561 } 586 562 587 static struct task_struct *find_child_reaper(s 563 static struct task_struct *find_child_reaper(struct task_struct *father, 588 564 struct list_head *dead) 589 __releases(&tasklist_lock) 565 __releases(&tasklist_lock) 590 __acquires(&tasklist_lock) 566 __acquires(&tasklist_lock) 591 { 567 { 592 struct pid_namespace *pid_ns = task_ac 568 struct pid_namespace *pid_ns = task_active_pid_ns(father); 593 struct task_struct *reaper = pid_ns->c 569 struct task_struct *reaper = pid_ns->child_reaper; 594 struct task_struct *p, *n; 570 struct task_struct *p, *n; 595 571 596 if (likely(reaper != father)) 572 if (likely(reaper != father)) 597 return reaper; 573 return reaper; 598 574 599 reaper = find_alive_thread(father); 575 reaper = find_alive_thread(father); 600 if (reaper) { 576 if (reaper) { 601 pid_ns->child_reaper = reaper; 577 pid_ns->child_reaper = reaper; 602 return reaper; 578 return reaper; 603 } 579 } 604 580 605 write_unlock_irq(&tasklist_lock); 581 write_unlock_irq(&tasklist_lock); >> 582 if (unlikely(pid_ns == &init_pid_ns)) { >> 583 panic("Attempted to kill init! exitcode=0x%08x\n", >> 584 father->signal->group_exit_code ?: father->exit_code); >> 585 } 606 586 607 list_for_each_entry_safe(p, n, dead, p 587 list_for_each_entry_safe(p, n, dead, ptrace_entry) { 608 list_del_init(&p->ptrace_entry 588 list_del_init(&p->ptrace_entry); 609 release_task(p); 589 release_task(p); 610 } 590 } 611 591 612 zap_pid_ns_processes(pid_ns); 592 zap_pid_ns_processes(pid_ns); 613 write_lock_irq(&tasklist_lock); 593 write_lock_irq(&tasklist_lock); 614 594 615 return father; 595 return father; 616 } 596 } 617 597 618 /* 598 /* 619 * When we die, we re-parent all our children, 599 * When we die, we re-parent all our children, and try to: 620 * 1. give them to another thread in our threa 600 * 1. give them to another thread in our thread group, if such a member exists 621 * 2. give it to the first ancestor process wh 601 * 2. give it to the first ancestor process which prctl'd itself as a 622 * child_subreaper for its children (like a 602 * child_subreaper for its children (like a service manager) 623 * 3. give it to the init process (PID 1) in o 603 * 3. give it to the init process (PID 1) in our pid namespace 624 */ 604 */ 625 static struct task_struct *find_new_reaper(str 605 static struct task_struct *find_new_reaper(struct task_struct *father, 626 str 606 struct task_struct *child_reaper) 627 { 607 { 628 struct task_struct *thread, *reaper; 608 struct task_struct *thread, *reaper; 629 609 630 thread = find_alive_thread(father); 610 thread = find_alive_thread(father); 631 if (thread) 611 if (thread) 632 return thread; 612 return thread; 633 613 634 if (father->signal->has_child_subreape 614 if (father->signal->has_child_subreaper) { 635 unsigned int ns_level = task_p 615 unsigned int ns_level = task_pid(father)->level; 636 /* 616 /* 637 * Find the first ->is_child_s 617 * Find the first ->is_child_subreaper ancestor in our pid_ns. 638 * We can't check reaper != ch 618 * We can't check reaper != child_reaper to ensure we do not 639 * cross the namespaces, the e 619 * cross the namespaces, the exiting parent could be injected 640 * by setns() + fork(). 620 * by setns() + fork(). 641 * We check pid->level, this i 621 * We check pid->level, this is slightly more efficient than 642 * task_active_pid_ns(reaper) 622 * task_active_pid_ns(reaper) != task_active_pid_ns(father). 643 */ 623 */ 644 for (reaper = father->real_par 624 for (reaper = father->real_parent; 645 task_pid(reaper)->level = 625 task_pid(reaper)->level == ns_level; 646 reaper = reaper->real_par 626 reaper = reaper->real_parent) { 647 if (reaper == &init_ta 627 if (reaper == &init_task) 648 break; 628 break; 649 if (!reaper->signal->i 629 if (!reaper->signal->is_child_subreaper) 650 continue; 630 continue; 651 thread = find_alive_th 631 thread = find_alive_thread(reaper); 652 if (thread) 632 if (thread) 653 return thread; 633 return thread; 654 } 634 } 655 } 635 } 656 636 657 return child_reaper; 637 return child_reaper; 658 } 638 } 659 639 660 /* 640 /* 661 * Any that need to be release_task'd are put o 641 * Any that need to be release_task'd are put on the @dead list. 662 */ 642 */ 663 static void reparent_leader(struct task_struct 643 static void reparent_leader(struct task_struct *father, struct task_struct *p, 664 struct list_he 644 struct list_head *dead) 665 { 645 { 666 if (unlikely(p->exit_state == EXIT_DEA 646 if (unlikely(p->exit_state == EXIT_DEAD)) 667 return; 647 return; 668 648 669 /* We don't want people slaying init. 649 /* We don't want people slaying init. */ 670 p->exit_signal = SIGCHLD; 650 p->exit_signal = SIGCHLD; 671 651 672 /* If it has exited notify the new par 652 /* If it has exited notify the new parent about this child's death. */ 673 if (!p->ptrace && 653 if (!p->ptrace && 674 p->exit_state == EXIT_ZOMBIE && th 654 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 675 if (do_notify_parent(p, p->exi 655 if (do_notify_parent(p, p->exit_signal)) { 676 p->exit_state = EXIT_D 656 p->exit_state = EXIT_DEAD; 677 list_add(&p->ptrace_en 657 list_add(&p->ptrace_entry, dead); 678 } 658 } 679 } 659 } 680 660 681 kill_orphaned_pgrp(p, father); 661 kill_orphaned_pgrp(p, father); 682 } 662 } 683 663 684 /* 664 /* 685 * This does two things: 665 * This does two things: 686 * 666 * 687 * A. Make init inherit all the child process 667 * A. Make init inherit all the child processes 688 * B. Check to see if any process groups have 668 * B. Check to see if any process groups have become orphaned 689 * as a result of our exiting, and if the 669 * as a result of our exiting, and if they have any stopped 690 * jobs, send them a SIGHUP and then a SI 670 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) 691 */ 671 */ 692 static void forget_original_parent(struct task 672 static void forget_original_parent(struct task_struct *father, 693 struct 673 struct list_head *dead) 694 { 674 { 695 struct task_struct *p, *t, *reaper; 675 struct task_struct *p, *t, *reaper; 696 676 697 if (unlikely(!list_empty(&father->ptra 677 if (unlikely(!list_empty(&father->ptraced))) 698 exit_ptrace(father, dead); 678 exit_ptrace(father, dead); 699 679 700 /* Can drop and reacquire tasklist_loc 680 /* Can drop and reacquire tasklist_lock */ 701 reaper = find_child_reaper(father, dea 681 reaper = find_child_reaper(father, dead); 702 if (list_empty(&father->children)) 682 if (list_empty(&father->children)) 703 return; 683 return; 704 684 705 reaper = find_new_reaper(father, reape 685 reaper = find_new_reaper(father, reaper); 706 list_for_each_entry(p, &father->childr 686 list_for_each_entry(p, &father->children, sibling) { 707 for_each_thread(p, t) { 687 for_each_thread(p, t) { 708 RCU_INIT_POINTER(t->re !! 688 t->real_parent = reaper; 709 BUG_ON((!t->ptrace) != !! 689 BUG_ON((!t->ptrace) != (t->parent == father)); 710 if (likely(!t->ptrace) 690 if (likely(!t->ptrace)) 711 t->parent = t- 691 t->parent = t->real_parent; 712 if (t->pdeath_signal) 692 if (t->pdeath_signal) 713 group_send_sig 693 group_send_sig_info(t->pdeath_signal, 714 694 SEND_SIG_NOINFO, t, 715 695 PIDTYPE_TGID); 716 } 696 } 717 /* 697 /* 718 * If this is a threaded repar 698 * If this is a threaded reparent there is no need to 719 * notify anyone anything has 699 * notify anyone anything has happened. 720 */ 700 */ 721 if (!same_thread_group(reaper, 701 if (!same_thread_group(reaper, father)) 722 reparent_leader(father 702 reparent_leader(father, p, dead); 723 } 703 } 724 list_splice_tail_init(&father->childre 704 list_splice_tail_init(&father->children, &reaper->children); 725 } 705 } 726 706 727 /* 707 /* 728 * Send signals to all our closest relatives s 708 * Send signals to all our closest relatives so that they know 729 * to properly mourn us.. 709 * to properly mourn us.. 730 */ 710 */ 731 static void exit_notify(struct task_struct *ts 711 static void exit_notify(struct task_struct *tsk, int group_dead) 732 { 712 { 733 bool autoreap; 713 bool autoreap; 734 struct task_struct *p, *n; 714 struct task_struct *p, *n; 735 LIST_HEAD(dead); 715 LIST_HEAD(dead); 736 716 737 write_lock_irq(&tasklist_lock); 717 write_lock_irq(&tasklist_lock); 738 forget_original_parent(tsk, &dead); 718 forget_original_parent(tsk, &dead); 739 719 740 if (group_dead) 720 if (group_dead) 741 kill_orphaned_pgrp(tsk->group_ 721 kill_orphaned_pgrp(tsk->group_leader, NULL); 742 722 743 tsk->exit_state = EXIT_ZOMBIE; 723 tsk->exit_state = EXIT_ZOMBIE; 744 /* << 745 * sub-thread or delay_group_leader(), << 746 * PIDFD_THREAD waiters. << 747 */ << 748 if (!thread_group_empty(tsk)) << 749 do_notify_pidfd(tsk); << 750 << 751 if (unlikely(tsk->ptrace)) { 724 if (unlikely(tsk->ptrace)) { 752 int sig = thread_group_leader( 725 int sig = thread_group_leader(tsk) && 753 thread_group_e 726 thread_group_empty(tsk) && 754 !ptrace_repare 727 !ptrace_reparented(tsk) ? 755 tsk->exit_signal : SIG 728 tsk->exit_signal : SIGCHLD; 756 autoreap = do_notify_parent(ts 729 autoreap = do_notify_parent(tsk, sig); 757 } else if (thread_group_leader(tsk)) { 730 } else if (thread_group_leader(tsk)) { 758 autoreap = thread_group_empty( 731 autoreap = thread_group_empty(tsk) && 759 do_notify_parent(tsk, 732 do_notify_parent(tsk, tsk->exit_signal); 760 } else { 733 } else { 761 autoreap = true; 734 autoreap = true; 762 } 735 } 763 736 764 if (autoreap) { 737 if (autoreap) { 765 tsk->exit_state = EXIT_DEAD; 738 tsk->exit_state = EXIT_DEAD; 766 list_add(&tsk->ptrace_entry, & 739 list_add(&tsk->ptrace_entry, &dead); 767 } 740 } 768 741 769 /* mt-exec, de_thread() is waiting for 742 /* mt-exec, de_thread() is waiting for group leader */ 770 if (unlikely(tsk->signal->notify_count 743 if (unlikely(tsk->signal->notify_count < 0)) 771 wake_up_process(tsk->signal->g !! 744 wake_up_process(tsk->signal->group_exit_task); 772 write_unlock_irq(&tasklist_lock); 745 write_unlock_irq(&tasklist_lock); 773 746 774 list_for_each_entry_safe(p, n, &dead, 747 list_for_each_entry_safe(p, n, &dead, ptrace_entry) { 775 list_del_init(&p->ptrace_entry 748 list_del_init(&p->ptrace_entry); 776 release_task(p); 749 release_task(p); 777 } 750 } 778 } 751 } 779 752 780 #ifdef CONFIG_DEBUG_STACK_USAGE 753 #ifdef CONFIG_DEBUG_STACK_USAGE 781 unsigned long stack_not_used(struct task_struc << 782 { << 783 unsigned long *n = end_of_stack(p); << 784 << 785 do { /* Skip over canary */ << 786 # ifdef CONFIG_STACK_GROWSUP << 787 n--; << 788 # else << 789 n++; << 790 # endif << 791 } while (!*n); << 792 << 793 # ifdef CONFIG_STACK_GROWSUP << 794 return (unsigned long)end_of_stack(p) << 795 # else << 796 return (unsigned long)n - (unsigned lo << 797 # endif << 798 } << 799 << 800 /* Count the maximum pages reached in kernel s << 801 static inline void kstack_histogram(unsigned l << 802 { << 803 #ifdef CONFIG_VM_EVENT_COUNTERS << 804 if (used_stack <= 1024) << 805 count_vm_event(KSTACK_1K); << 806 #if THREAD_SIZE > 1024 << 807 else if (used_stack <= 2048) << 808 count_vm_event(KSTACK_2K); << 809 #endif << 810 #if THREAD_SIZE > 2048 << 811 else if (used_stack <= 4096) << 812 count_vm_event(KSTACK_4K); << 813 #endif << 814 #if THREAD_SIZE > 4096 << 815 else if (used_stack <= 8192) << 816 count_vm_event(KSTACK_8K); << 817 #endif << 818 #if THREAD_SIZE > 8192 << 819 else if (used_stack <= 16384) << 820 count_vm_event(KSTACK_16K); << 821 #endif << 822 #if THREAD_SIZE > 16384 << 823 else if (used_stack <= 32768) << 824 count_vm_event(KSTACK_32K); << 825 #endif << 826 #if THREAD_SIZE > 32768 << 827 else if (used_stack <= 65536) << 828 count_vm_event(KSTACK_64K); << 829 #endif << 830 #if THREAD_SIZE > 65536 << 831 else << 832 count_vm_event(KSTACK_REST); << 833 #endif << 834 #endif /* CONFIG_VM_EVENT_COUNTERS */ << 835 } << 836 << 837 static void check_stack_usage(void) 754 static void check_stack_usage(void) 838 { 755 { 839 static DEFINE_SPINLOCK(low_water_lock) 756 static DEFINE_SPINLOCK(low_water_lock); 840 static int lowest_to_date = THREAD_SIZ 757 static int lowest_to_date = THREAD_SIZE; 841 unsigned long free; 758 unsigned long free; 842 759 843 free = stack_not_used(current); 760 free = stack_not_used(current); 844 kstack_histogram(THREAD_SIZE - free); << 845 761 846 if (free >= lowest_to_date) 762 if (free >= lowest_to_date) 847 return; 763 return; 848 764 849 spin_lock(&low_water_lock); 765 spin_lock(&low_water_lock); 850 if (free < lowest_to_date) { 766 if (free < lowest_to_date) { 851 pr_info("%s (%d) used greatest 767 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n", 852 current->comm, task_pi 768 current->comm, task_pid_nr(current), free); 853 lowest_to_date = free; 769 lowest_to_date = free; 854 } 770 } 855 spin_unlock(&low_water_lock); 771 spin_unlock(&low_water_lock); 856 } 772 } 857 #else 773 #else 858 static inline void check_stack_usage(void) {} 774 static inline void check_stack_usage(void) {} 859 #endif 775 #endif 860 776 861 static void synchronize_group_exit(struct task << 862 { << 863 struct sighand_struct *sighand = tsk-> << 864 struct signal_struct *signal = tsk->si << 865 << 866 spin_lock_irq(&sighand->siglock); << 867 signal->quick_threads--; << 868 if ((signal->quick_threads == 0) && << 869 !(signal->flags & SIGNAL_GROUP_EXI << 870 signal->flags = SIGNAL_GROUP_E << 871 signal->group_exit_code = code << 872 signal->group_stop_count = 0; << 873 } << 874 spin_unlock_irq(&sighand->siglock); << 875 } << 876 << 877 void __noreturn do_exit(long code) 777 void __noreturn do_exit(long code) 878 { 778 { 879 struct task_struct *tsk = current; 779 struct task_struct *tsk = current; 880 int group_dead; 780 int group_dead; 881 781 882 WARN_ON(irqs_disabled()); !! 782 profile_task_exit(tsk); >> 783 kcov_task_exit(tsk); 883 784 884 synchronize_group_exit(tsk, code); !! 785 WARN_ON(blk_needs_flush_plug(tsk)); 885 786 886 WARN_ON(tsk->plug); !! 787 if (unlikely(in_interrupt())) >> 788 panic("Aiee, killing interrupt handler!"); >> 789 if (unlikely(!tsk->pid)) >> 790 panic("Attempted to kill the idle task!"); 887 791 888 kcov_task_exit(tsk); !! 792 /* 889 kmsan_task_exit(tsk); !! 793 * If do_exit is called because this processes oopsed, it's possible >> 794 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before >> 795 * continuing. Amongst other possible reasons, this is to prevent >> 796 * mm_release()->clear_child_tid() from writing to a user-controlled >> 797 * kernel address. >> 798 */ >> 799 set_fs(USER_DS); 890 800 891 coredump_task_exit(tsk); << 892 ptrace_event(PTRACE_EVENT_EXIT, code); 801 ptrace_event(PTRACE_EVENT_EXIT, code); 893 user_events_exit(tsk); << 894 802 895 io_uring_files_cancel(); !! 803 validate_creds_for_do_exit(tsk); >> 804 >> 805 /* >> 806 * We're taking recursive faults here in do_exit. Safest is to just >> 807 * leave this task alone and wait for reboot. >> 808 */ >> 809 if (unlikely(tsk->flags & PF_EXITING)) { >> 810 pr_alert("Fixing recursive fault but reboot is needed!\n"); >> 811 /* >> 812 * We can do this unlocked here. The futex code uses >> 813 * this flag just to verify whether the pi state >> 814 * cleanup has been done or not. In the worst case it >> 815 * loops once more. We pretend that the cleanup was >> 816 * done as there is no way to return. Either the >> 817 * OWNER_DIED bit is set by now or we push the blocked >> 818 * task into the wait for ever nirwana as well. >> 819 */ >> 820 tsk->flags |= PF_EXITPIDONE; >> 821 set_current_state(TASK_UNINTERRUPTIBLE); >> 822 schedule(); >> 823 } >> 824 896 exit_signals(tsk); /* sets PF_EXITING 825 exit_signals(tsk); /* sets PF_EXITING */ >> 826 /* >> 827 * Ensure that all new tsk->pi_lock acquisitions must observe >> 828 * PF_EXITING. Serializes against futex.c:attach_to_pi_owner(). >> 829 */ >> 830 smp_mb(); >> 831 /* >> 832 * Ensure that we must observe the pi_state in exit_mm() -> >> 833 * mm_release() -> exit_pi_state_list(). >> 834 */ >> 835 raw_spin_lock_irq(&tsk->pi_lock); >> 836 raw_spin_unlock_irq(&tsk->pi_lock); 897 837 898 seccomp_filter_release(tsk); !! 838 if (unlikely(in_atomic())) { >> 839 pr_info("note: %s[%d] exited with preempt_count %d\n", >> 840 current->comm, task_pid_nr(current), >> 841 preempt_count()); >> 842 preempt_count_set(PREEMPT_ENABLED); >> 843 } 899 844 >> 845 /* sync mm's RSS info before statistics gathering */ >> 846 if (tsk->mm) >> 847 sync_mm_rss(tsk->mm); 900 acct_update_integrals(tsk); 848 acct_update_integrals(tsk); 901 group_dead = atomic_dec_and_test(&tsk- 849 group_dead = atomic_dec_and_test(&tsk->signal->live); 902 if (group_dead) { 850 if (group_dead) { 903 /* << 904 * If the last thread of globa << 905 * immediately to get a useabl << 906 */ << 907 if (unlikely(is_global_init(ts << 908 panic("Attempted to ki << 909 tsk->signal->g << 910 << 911 #ifdef CONFIG_POSIX_TIMERS 851 #ifdef CONFIG_POSIX_TIMERS 912 hrtimer_cancel(&tsk->signal->r 852 hrtimer_cancel(&tsk->signal->real_timer); 913 exit_itimers(tsk); !! 853 exit_itimers(tsk->signal); 914 #endif 854 #endif 915 if (tsk->mm) 855 if (tsk->mm) 916 setmax_mm_hiwater_rss( 856 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); 917 } 857 } 918 acct_collect(code, group_dead); 858 acct_collect(code, group_dead); 919 if (group_dead) 859 if (group_dead) 920 tty_audit_exit(); 860 tty_audit_exit(); 921 audit_free(tsk); 861 audit_free(tsk); 922 862 923 tsk->exit_code = code; 863 tsk->exit_code = code; 924 taskstats_exit(tsk, group_dead); 864 taskstats_exit(tsk, group_dead); 925 865 926 exit_mm(); 866 exit_mm(); 927 867 928 if (group_dead) 868 if (group_dead) 929 acct_process(); 869 acct_process(); 930 trace_sched_process_exit(tsk); 870 trace_sched_process_exit(tsk); 931 871 932 exit_sem(tsk); 872 exit_sem(tsk); 933 exit_shm(tsk); 873 exit_shm(tsk); 934 exit_files(tsk); 874 exit_files(tsk); 935 exit_fs(tsk); 875 exit_fs(tsk); 936 if (group_dead) 876 if (group_dead) 937 disassociate_ctty(1); 877 disassociate_ctty(1); 938 exit_task_namespaces(tsk); 878 exit_task_namespaces(tsk); 939 exit_task_work(tsk); 879 exit_task_work(tsk); 940 exit_thread(tsk); 880 exit_thread(tsk); >> 881 exit_umh(tsk); 941 882 942 /* 883 /* 943 * Flush inherited counters to the par 884 * Flush inherited counters to the parent - before the parent 944 * gets woken up by child-exit notific 885 * gets woken up by child-exit notifications. 945 * 886 * 946 * because of cgroup mode, must be cal 887 * because of cgroup mode, must be called before cgroup_exit() 947 */ 888 */ 948 perf_event_exit_task(tsk); 889 perf_event_exit_task(tsk); 949 890 950 sched_autogroup_exit_task(tsk); 891 sched_autogroup_exit_task(tsk); 951 cgroup_exit(tsk); 892 cgroup_exit(tsk); 952 893 953 /* 894 /* 954 * FIXME: do that only when needed, us 895 * FIXME: do that only when needed, using sched_exit tracepoint 955 */ 896 */ 956 flush_ptrace_hw_breakpoint(tsk); 897 flush_ptrace_hw_breakpoint(tsk); 957 898 958 exit_tasks_rcu_start(); 899 exit_tasks_rcu_start(); 959 exit_notify(tsk, group_dead); 900 exit_notify(tsk, group_dead); 960 proc_exit_connector(tsk); 901 proc_exit_connector(tsk); 961 mpol_put_task_policy(tsk); 902 mpol_put_task_policy(tsk); 962 #ifdef CONFIG_FUTEX 903 #ifdef CONFIG_FUTEX 963 if (unlikely(current->pi_state_cache)) 904 if (unlikely(current->pi_state_cache)) 964 kfree(current->pi_state_cache) 905 kfree(current->pi_state_cache); 965 #endif 906 #endif 966 /* 907 /* 967 * Make sure we are holding no locks: 908 * Make sure we are holding no locks: 968 */ 909 */ 969 debug_check_no_locks_held(); 910 debug_check_no_locks_held(); >> 911 /* >> 912 * We can do this unlocked here. The futex code uses this flag >> 913 * just to verify whether the pi state cleanup has been done >> 914 * or not. In the worst case it loops once more. >> 915 */ >> 916 tsk->flags |= PF_EXITPIDONE; 970 917 971 if (tsk->io_context) 918 if (tsk->io_context) 972 exit_io_context(tsk); 919 exit_io_context(tsk); 973 920 974 if (tsk->splice_pipe) 921 if (tsk->splice_pipe) 975 free_pipe_info(tsk->splice_pip 922 free_pipe_info(tsk->splice_pipe); 976 923 977 if (tsk->task_frag.page) 924 if (tsk->task_frag.page) 978 put_page(tsk->task_frag.page); 925 put_page(tsk->task_frag.page); 979 926 980 exit_task_stack_account(tsk); !! 927 validate_creds_for_do_exit(tsk); 981 928 982 check_stack_usage(); 929 check_stack_usage(); 983 preempt_disable(); 930 preempt_disable(); 984 if (tsk->nr_dirtied) 931 if (tsk->nr_dirtied) 985 __this_cpu_add(dirty_throttle_ 932 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 986 exit_rcu(); 933 exit_rcu(); 987 exit_tasks_rcu_finish(); 934 exit_tasks_rcu_finish(); 988 935 989 lockdep_free_task(tsk); 936 lockdep_free_task(tsk); 990 do_task_dead(); 937 do_task_dead(); 991 } 938 } >> 939 EXPORT_SYMBOL_GPL(do_exit); 992 940 993 void __noreturn make_task_dead(int signr) !! 941 void complete_and_exit(struct completion *comp, long code) 994 { 942 { 995 /* !! 943 if (comp) 996 * Take the task off the cpu after som !! 944 complete(comp); 997 * happened. << 998 * << 999 * We can get here from a kernel oops, << 1000 * Start by checking for critical err << 1001 * Then fix up important state like U << 1002 * Then do everything else. << 1003 */ << 1004 struct task_struct *tsk = current; << 1005 unsigned int limit; << 1006 << 1007 if (unlikely(in_interrupt())) << 1008 panic("Aiee, killing interrup << 1009 if (unlikely(!tsk->pid)) << 1010 panic("Attempted to kill the << 1011 << 1012 if (unlikely(irqs_disabled())) { << 1013 pr_info("note: %s[%d] exited << 1014 current->comm, task_p << 1015 local_irq_enable(); << 1016 } << 1017 if (unlikely(in_atomic())) { << 1018 pr_info("note: %s[%d] exited << 1019 current->comm, task_p << 1020 preempt_count()); << 1021 preempt_count_set(PREEMPT_ENA << 1022 } << 1023 945 1024 /* !! 946 do_exit(code); 1025 * Every time the system oopses, if t << 1026 * to an object was held, the referen << 1027 * If the oops doesn't also leak memo << 1028 * reference counters to wrap around << 1029 * This means that repeated oopsing c << 1030 * exploitable through repeated oopsi << 1031 * To make sure this can't happen, pl << 1032 * kernel may oops without panic(). << 1033 */ << 1034 limit = READ_ONCE(oops_limit); << 1035 if (atomic_inc_return(&oops_count) >= << 1036 panic("Oopsed too often (kern << 1037 << 1038 /* << 1039 * We're taking recursive faults here << 1040 * leave this task alone and wait for << 1041 */ << 1042 if (unlikely(tsk->flags & PF_EXITING) << 1043 pr_alert("Fixing recursive fa << 1044 futex_exit_recursive(tsk); << 1045 tsk->exit_state = EXIT_DEAD; << 1046 refcount_inc(&tsk->rcu_users) << 1047 do_task_dead(); << 1048 } << 1049 << 1050 do_exit(signr); << 1051 } 947 } >> 948 EXPORT_SYMBOL(complete_and_exit); 1052 949 1053 SYSCALL_DEFINE1(exit, int, error_code) 950 SYSCALL_DEFINE1(exit, int, error_code) 1054 { 951 { 1055 do_exit((error_code&0xff)<<8); 952 do_exit((error_code&0xff)<<8); 1056 } 953 } 1057 954 1058 /* 955 /* 1059 * Take down every thread in the group. This 956 * Take down every thread in the group. This is called by fatal signals 1060 * as well as by sys_exit_group (below). 957 * as well as by sys_exit_group (below). 1061 */ 958 */ 1062 void __noreturn !! 959 void 1063 do_group_exit(int exit_code) 960 do_group_exit(int exit_code) 1064 { 961 { 1065 struct signal_struct *sig = current-> 962 struct signal_struct *sig = current->signal; 1066 963 1067 if (sig->flags & SIGNAL_GROUP_EXIT) !! 964 BUG_ON(exit_code & 0x80); /* core dumps don't get here */ >> 965 >> 966 if (signal_group_exit(sig)) 1068 exit_code = sig->group_exit_c 967 exit_code = sig->group_exit_code; 1069 else if (sig->group_exec_task) !! 968 else if (!thread_group_empty(current)) { 1070 exit_code = 0; << 1071 else { << 1072 struct sighand_struct *const 969 struct sighand_struct *const sighand = current->sighand; 1073 970 1074 spin_lock_irq(&sighand->siglo 971 spin_lock_irq(&sighand->siglock); 1075 if (sig->flags & SIGNAL_GROUP !! 972 if (signal_group_exit(sig)) 1076 /* Another thread got 973 /* Another thread got here before we took the lock. */ 1077 exit_code = sig->grou 974 exit_code = sig->group_exit_code; 1078 else if (sig->group_exec_task << 1079 exit_code = 0; << 1080 else { 975 else { 1081 sig->group_exit_code 976 sig->group_exit_code = exit_code; 1082 sig->flags = SIGNAL_G 977 sig->flags = SIGNAL_GROUP_EXIT; 1083 zap_other_threads(cur 978 zap_other_threads(current); 1084 } 979 } 1085 spin_unlock_irq(&sighand->sig 980 spin_unlock_irq(&sighand->siglock); 1086 } 981 } 1087 982 1088 do_exit(exit_code); 983 do_exit(exit_code); 1089 /* NOTREACHED */ 984 /* NOTREACHED */ 1090 } 985 } 1091 986 1092 /* 987 /* 1093 * this kills every thread in the thread grou 988 * this kills every thread in the thread group. Note that any externally 1094 * wait4()-ing process will get the correct e 989 * wait4()-ing process will get the correct exit code - even if this 1095 * thread is not the thread group leader. 990 * thread is not the thread group leader. 1096 */ 991 */ 1097 SYSCALL_DEFINE1(exit_group, int, error_code) 992 SYSCALL_DEFINE1(exit_group, int, error_code) 1098 { 993 { 1099 do_group_exit((error_code & 0xff) << 994 do_group_exit((error_code & 0xff) << 8); 1100 /* NOTREACHED */ 995 /* NOTREACHED */ 1101 return 0; 996 return 0; 1102 } 997 } 1103 998 >> 999 struct waitid_info { >> 1000 pid_t pid; >> 1001 uid_t uid; >> 1002 int status; >> 1003 int cause; >> 1004 }; >> 1005 >> 1006 struct wait_opts { >> 1007 enum pid_type wo_type; >> 1008 int wo_flags; >> 1009 struct pid *wo_pid; >> 1010 >> 1011 struct waitid_info *wo_info; >> 1012 int wo_stat; >> 1013 struct rusage *wo_rusage; >> 1014 >> 1015 wait_queue_entry_t child_wait; >> 1016 int notask_error; >> 1017 }; >> 1018 1104 static int eligible_pid(struct wait_opts *wo, 1019 static int eligible_pid(struct wait_opts *wo, struct task_struct *p) 1105 { 1020 { 1106 return wo->wo_type == PIDTYPE_MAX || 1021 return wo->wo_type == PIDTYPE_MAX || 1107 task_pid_type(p, wo->wo_type) 1022 task_pid_type(p, wo->wo_type) == wo->wo_pid; 1108 } 1023 } 1109 1024 1110 static int 1025 static int 1111 eligible_child(struct wait_opts *wo, bool ptr 1026 eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p) 1112 { 1027 { 1113 if (!eligible_pid(wo, p)) 1028 if (!eligible_pid(wo, p)) 1114 return 0; 1029 return 0; 1115 1030 1116 /* 1031 /* 1117 * Wait for all children (clone and n 1032 * Wait for all children (clone and not) if __WALL is set or 1118 * if it is traced by us. 1033 * if it is traced by us. 1119 */ 1034 */ 1120 if (ptrace || (wo->wo_flags & __WALL) 1035 if (ptrace || (wo->wo_flags & __WALL)) 1121 return 1; 1036 return 1; 1122 1037 1123 /* 1038 /* 1124 * Otherwise, wait for clone children 1039 * Otherwise, wait for clone children *only* if __WCLONE is set; 1125 * otherwise, wait for non-clone chil 1040 * otherwise, wait for non-clone children *only*. 1126 * 1041 * 1127 * Note: a "clone" child here is one 1042 * Note: a "clone" child here is one that reports to its parent 1128 * using a signal other than SIGCHLD, 1043 * using a signal other than SIGCHLD, or a non-leader thread which 1129 * we can only see if it is traced by 1044 * we can only see if it is traced by us. 1130 */ 1045 */ 1131 if ((p->exit_signal != SIGCHLD) ^ !!( 1046 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) 1132 return 0; 1047 return 0; 1133 1048 1134 return 1; 1049 return 1; 1135 } 1050 } 1136 1051 1137 /* 1052 /* 1138 * Handle sys_wait4 work for one task in stat 1053 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold 1139 * read_lock(&tasklist_lock) on entry. If we 1054 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1140 * the lock and this task is uninteresting. 1055 * the lock and this task is uninteresting. If we return nonzero, we have 1141 * released the lock and the system call shou 1056 * released the lock and the system call should return. 1142 */ 1057 */ 1143 static int wait_task_zombie(struct wait_opts 1058 static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) 1144 { 1059 { 1145 int state, status; 1060 int state, status; 1146 pid_t pid = task_pid_vnr(p); 1061 pid_t pid = task_pid_vnr(p); 1147 uid_t uid = from_kuid_munged(current_ 1062 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1148 struct waitid_info *infop; 1063 struct waitid_info *infop; 1149 1064 1150 if (!likely(wo->wo_flags & WEXITED)) 1065 if (!likely(wo->wo_flags & WEXITED)) 1151 return 0; 1066 return 0; 1152 1067 1153 if (unlikely(wo->wo_flags & WNOWAIT)) 1068 if (unlikely(wo->wo_flags & WNOWAIT)) { 1154 status = (p->signal->flags & !! 1069 status = p->exit_code; 1155 ? p->signal->group_ex << 1156 get_task_struct(p); 1070 get_task_struct(p); 1157 read_unlock(&tasklist_lock); 1071 read_unlock(&tasklist_lock); 1158 sched_annotate_sleep(); 1072 sched_annotate_sleep(); 1159 if (wo->wo_rusage) 1073 if (wo->wo_rusage) 1160 getrusage(p, RUSAGE_B 1074 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1161 put_task_struct(p); 1075 put_task_struct(p); 1162 goto out_info; 1076 goto out_info; 1163 } 1077 } 1164 /* 1078 /* 1165 * Move the task's state to DEAD/TRAC 1079 * Move the task's state to DEAD/TRACE, only one thread can do this. 1166 */ 1080 */ 1167 state = (ptrace_reparented(p) && thre 1081 state = (ptrace_reparented(p) && thread_group_leader(p)) ? 1168 EXIT_TRACE : EXIT_DEAD; 1082 EXIT_TRACE : EXIT_DEAD; 1169 if (cmpxchg(&p->exit_state, EXIT_ZOMB 1083 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) 1170 return 0; 1084 return 0; 1171 /* 1085 /* 1172 * We own this thread, nobody else ca 1086 * We own this thread, nobody else can reap it. 1173 */ 1087 */ 1174 read_unlock(&tasklist_lock); 1088 read_unlock(&tasklist_lock); 1175 sched_annotate_sleep(); 1089 sched_annotate_sleep(); 1176 1090 1177 /* 1091 /* 1178 * Check thread_group_leader() to exc 1092 * Check thread_group_leader() to exclude the traced sub-threads. 1179 */ 1093 */ 1180 if (state == EXIT_DEAD && thread_grou 1094 if (state == EXIT_DEAD && thread_group_leader(p)) { 1181 struct signal_struct *sig = p 1095 struct signal_struct *sig = p->signal; 1182 struct signal_struct *psig = 1096 struct signal_struct *psig = current->signal; 1183 unsigned long maxrss; 1097 unsigned long maxrss; 1184 u64 tgutime, tgstime; 1098 u64 tgutime, tgstime; 1185 1099 1186 /* 1100 /* 1187 * The resource counters for 1101 * The resource counters for the group leader are in its 1188 * own task_struct. Those fo 1102 * own task_struct. Those for dead threads in the group 1189 * are in its signal_struct, 1103 * are in its signal_struct, as are those for the child 1190 * processes it has previousl 1104 * processes it has previously reaped. All these 1191 * accumulate in the parent's 1105 * accumulate in the parent's signal_struct c* fields. 1192 * 1106 * 1193 * We don't bother to take a 1107 * We don't bother to take a lock here to protect these 1194 * p->signal fields because t 1108 * p->signal fields because the whole thread group is dead 1195 * and nobody can change them 1109 * and nobody can change them. 1196 * 1110 * 1197 * psig->stats_lock also prot !! 1111 * psig->stats_lock also protects us from our sub-theads 1198 * which can reap other child !! 1112 * which can reap other children at the same time. Until >> 1113 * we change k_getrusage()-like users to rely on this lock >> 1114 * we have to take ->siglock as well. 1199 * 1115 * 1200 * We use thread_group_cputim 1116 * We use thread_group_cputime_adjusted() to get times for 1201 * the thread group, which co 1117 * the thread group, which consolidates times for all threads 1202 * in the group including the 1118 * in the group including the group leader. 1203 */ 1119 */ 1204 thread_group_cputime_adjusted 1120 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1205 write_seqlock_irq(&psig->stat !! 1121 spin_lock_irq(¤t->sighand->siglock); >> 1122 write_seqlock(&psig->stats_lock); 1206 psig->cutime += tgutime + sig 1123 psig->cutime += tgutime + sig->cutime; 1207 psig->cstime += tgstime + sig 1124 psig->cstime += tgstime + sig->cstime; 1208 psig->cgtime += task_gtime(p) 1125 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; 1209 psig->cmin_flt += 1126 psig->cmin_flt += 1210 p->min_flt + sig->min 1127 p->min_flt + sig->min_flt + sig->cmin_flt; 1211 psig->cmaj_flt += 1128 psig->cmaj_flt += 1212 p->maj_flt + sig->maj 1129 p->maj_flt + sig->maj_flt + sig->cmaj_flt; 1213 psig->cnvcsw += 1130 psig->cnvcsw += 1214 p->nvcsw + sig->nvcsw 1131 p->nvcsw + sig->nvcsw + sig->cnvcsw; 1215 psig->cnivcsw += 1132 psig->cnivcsw += 1216 p->nivcsw + sig->nivc 1133 p->nivcsw + sig->nivcsw + sig->cnivcsw; 1217 psig->cinblock += 1134 psig->cinblock += 1218 task_io_get_inblock(p 1135 task_io_get_inblock(p) + 1219 sig->inblock + sig->c 1136 sig->inblock + sig->cinblock; 1220 psig->coublock += 1137 psig->coublock += 1221 task_io_get_oublock(p 1138 task_io_get_oublock(p) + 1222 sig->oublock + sig->c 1139 sig->oublock + sig->coublock; 1223 maxrss = max(sig->maxrss, sig 1140 maxrss = max(sig->maxrss, sig->cmaxrss); 1224 if (psig->cmaxrss < maxrss) 1141 if (psig->cmaxrss < maxrss) 1225 psig->cmaxrss = maxrs 1142 psig->cmaxrss = maxrss; 1226 task_io_accounting_add(&psig- 1143 task_io_accounting_add(&psig->ioac, &p->ioac); 1227 task_io_accounting_add(&psig- 1144 task_io_accounting_add(&psig->ioac, &sig->ioac); 1228 write_sequnlock_irq(&psig->st !! 1145 write_sequnlock(&psig->stats_lock); >> 1146 spin_unlock_irq(¤t->sighand->siglock); 1229 } 1147 } 1230 1148 1231 if (wo->wo_rusage) 1149 if (wo->wo_rusage) 1232 getrusage(p, RUSAGE_BOTH, wo- 1150 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1233 status = (p->signal->flags & SIGNAL_G 1151 status = (p->signal->flags & SIGNAL_GROUP_EXIT) 1234 ? p->signal->group_exit_code 1152 ? p->signal->group_exit_code : p->exit_code; 1235 wo->wo_stat = status; 1153 wo->wo_stat = status; 1236 1154 1237 if (state == EXIT_TRACE) { 1155 if (state == EXIT_TRACE) { 1238 write_lock_irq(&tasklist_lock 1156 write_lock_irq(&tasklist_lock); 1239 /* We dropped tasklist, ptrac 1157 /* We dropped tasklist, ptracer could die and untrace */ 1240 ptrace_unlink(p); 1158 ptrace_unlink(p); 1241 1159 1242 /* If parent wants a zombie, 1160 /* If parent wants a zombie, don't release it now */ 1243 state = EXIT_ZOMBIE; 1161 state = EXIT_ZOMBIE; 1244 if (do_notify_parent(p, p->ex 1162 if (do_notify_parent(p, p->exit_signal)) 1245 state = EXIT_DEAD; 1163 state = EXIT_DEAD; 1246 p->exit_state = state; 1164 p->exit_state = state; 1247 write_unlock_irq(&tasklist_lo 1165 write_unlock_irq(&tasklist_lock); 1248 } 1166 } 1249 if (state == EXIT_DEAD) 1167 if (state == EXIT_DEAD) 1250 release_task(p); 1168 release_task(p); 1251 1169 1252 out_info: 1170 out_info: 1253 infop = wo->wo_info; 1171 infop = wo->wo_info; 1254 if (infop) { 1172 if (infop) { 1255 if ((status & 0x7f) == 0) { 1173 if ((status & 0x7f) == 0) { 1256 infop->cause = CLD_EX 1174 infop->cause = CLD_EXITED; 1257 infop->status = statu 1175 infop->status = status >> 8; 1258 } else { 1176 } else { 1259 infop->cause = (statu 1177 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; 1260 infop->status = statu 1178 infop->status = status & 0x7f; 1261 } 1179 } 1262 infop->pid = pid; 1180 infop->pid = pid; 1263 infop->uid = uid; 1181 infop->uid = uid; 1264 } 1182 } 1265 1183 1266 return pid; 1184 return pid; 1267 } 1185 } 1268 1186 1269 static int *task_stopped_code(struct task_str 1187 static int *task_stopped_code(struct task_struct *p, bool ptrace) 1270 { 1188 { 1271 if (ptrace) { 1189 if (ptrace) { 1272 if (task_is_traced(p) && !(p- 1190 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) 1273 return &p->exit_code; 1191 return &p->exit_code; 1274 } else { 1192 } else { 1275 if (p->signal->flags & SIGNAL 1193 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1276 return &p->signal->gr 1194 return &p->signal->group_exit_code; 1277 } 1195 } 1278 return NULL; 1196 return NULL; 1279 } 1197 } 1280 1198 1281 /** 1199 /** 1282 * wait_task_stopped - Wait for %TASK_STOPPED 1200 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED 1283 * @wo: wait options 1201 * @wo: wait options 1284 * @ptrace: is the wait for ptrace 1202 * @ptrace: is the wait for ptrace 1285 * @p: task to wait for 1203 * @p: task to wait for 1286 * 1204 * 1287 * Handle sys_wait4() work for %p in state %T 1205 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED. 1288 * 1206 * 1289 * CONTEXT: 1207 * CONTEXT: 1290 * read_lock(&tasklist_lock), which is releas 1208 * read_lock(&tasklist_lock), which is released if return value is 1291 * non-zero. Also, grabs and releases @p->si 1209 * non-zero. Also, grabs and releases @p->sighand->siglock. 1292 * 1210 * 1293 * RETURNS: 1211 * RETURNS: 1294 * 0 if wait condition didn't exist and searc 1212 * 0 if wait condition didn't exist and search for other wait conditions 1295 * should continue. Non-zero return, -errno 1213 * should continue. Non-zero return, -errno on failure and @p's pid on 1296 * success, implies that tasklist_lock is rel 1214 * success, implies that tasklist_lock is released and wait condition 1297 * search should terminate. 1215 * search should terminate. 1298 */ 1216 */ 1299 static int wait_task_stopped(struct wait_opts 1217 static int wait_task_stopped(struct wait_opts *wo, 1300 int ptrace, s 1218 int ptrace, struct task_struct *p) 1301 { 1219 { 1302 struct waitid_info *infop; 1220 struct waitid_info *infop; 1303 int exit_code, *p_code, why; 1221 int exit_code, *p_code, why; 1304 uid_t uid = 0; /* unneeded, required 1222 uid_t uid = 0; /* unneeded, required by compiler */ 1305 pid_t pid; 1223 pid_t pid; 1306 1224 1307 /* 1225 /* 1308 * Traditionally we see ptrace'd stop 1226 * Traditionally we see ptrace'd stopped tasks regardless of options. 1309 */ 1227 */ 1310 if (!ptrace && !(wo->wo_flags & WUNTR 1228 if (!ptrace && !(wo->wo_flags & WUNTRACED)) 1311 return 0; 1229 return 0; 1312 1230 1313 if (!task_stopped_code(p, ptrace)) 1231 if (!task_stopped_code(p, ptrace)) 1314 return 0; 1232 return 0; 1315 1233 1316 exit_code = 0; 1234 exit_code = 0; 1317 spin_lock_irq(&p->sighand->siglock); 1235 spin_lock_irq(&p->sighand->siglock); 1318 1236 1319 p_code = task_stopped_code(p, ptrace) 1237 p_code = task_stopped_code(p, ptrace); 1320 if (unlikely(!p_code)) 1238 if (unlikely(!p_code)) 1321 goto unlock_sig; 1239 goto unlock_sig; 1322 1240 1323 exit_code = *p_code; 1241 exit_code = *p_code; 1324 if (!exit_code) 1242 if (!exit_code) 1325 goto unlock_sig; 1243 goto unlock_sig; 1326 1244 1327 if (!unlikely(wo->wo_flags & WNOWAIT) 1245 if (!unlikely(wo->wo_flags & WNOWAIT)) 1328 *p_code = 0; 1246 *p_code = 0; 1329 1247 1330 uid = from_kuid_munged(current_user_n 1248 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1331 unlock_sig: 1249 unlock_sig: 1332 spin_unlock_irq(&p->sighand->siglock) 1250 spin_unlock_irq(&p->sighand->siglock); 1333 if (!exit_code) 1251 if (!exit_code) 1334 return 0; 1252 return 0; 1335 1253 1336 /* 1254 /* 1337 * Now we are pretty sure this task i 1255 * Now we are pretty sure this task is interesting. 1338 * Make sure it doesn't get reaped ou 1256 * Make sure it doesn't get reaped out from under us while we 1339 * give up the lock and then examine 1257 * give up the lock and then examine it below. We don't want to 1340 * keep holding onto the tasklist_loc 1258 * keep holding onto the tasklist_lock while we call getrusage and 1341 * possibly take page faults for user 1259 * possibly take page faults for user memory. 1342 */ 1260 */ 1343 get_task_struct(p); 1261 get_task_struct(p); 1344 pid = task_pid_vnr(p); 1262 pid = task_pid_vnr(p); 1345 why = ptrace ? CLD_TRAPPED : CLD_STOP 1263 why = ptrace ? CLD_TRAPPED : CLD_STOPPED; 1346 read_unlock(&tasklist_lock); 1264 read_unlock(&tasklist_lock); 1347 sched_annotate_sleep(); 1265 sched_annotate_sleep(); 1348 if (wo->wo_rusage) 1266 if (wo->wo_rusage) 1349 getrusage(p, RUSAGE_BOTH, wo- 1267 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1350 put_task_struct(p); 1268 put_task_struct(p); 1351 1269 1352 if (likely(!(wo->wo_flags & WNOWAIT)) 1270 if (likely(!(wo->wo_flags & WNOWAIT))) 1353 wo->wo_stat = (exit_code << 8 1271 wo->wo_stat = (exit_code << 8) | 0x7f; 1354 1272 1355 infop = wo->wo_info; 1273 infop = wo->wo_info; 1356 if (infop) { 1274 if (infop) { 1357 infop->cause = why; 1275 infop->cause = why; 1358 infop->status = exit_code; 1276 infop->status = exit_code; 1359 infop->pid = pid; 1277 infop->pid = pid; 1360 infop->uid = uid; 1278 infop->uid = uid; 1361 } 1279 } 1362 return pid; 1280 return pid; 1363 } 1281 } 1364 1282 1365 /* 1283 /* 1366 * Handle do_wait work for one task in a live 1284 * Handle do_wait work for one task in a live, non-stopped state. 1367 * read_lock(&tasklist_lock) on entry. If we 1285 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1368 * the lock and this task is uninteresting. 1286 * the lock and this task is uninteresting. If we return nonzero, we have 1369 * released the lock and the system call shou 1287 * released the lock and the system call should return. 1370 */ 1288 */ 1371 static int wait_task_continued(struct wait_op 1289 static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) 1372 { 1290 { 1373 struct waitid_info *infop; 1291 struct waitid_info *infop; 1374 pid_t pid; 1292 pid_t pid; 1375 uid_t uid; 1293 uid_t uid; 1376 1294 1377 if (!unlikely(wo->wo_flags & WCONTINU 1295 if (!unlikely(wo->wo_flags & WCONTINUED)) 1378 return 0; 1296 return 0; 1379 1297 1380 if (!(p->signal->flags & SIGNAL_STOP_ 1298 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1381 return 0; 1299 return 0; 1382 1300 1383 spin_lock_irq(&p->sighand->siglock); 1301 spin_lock_irq(&p->sighand->siglock); 1384 /* Re-check with the lock held. */ 1302 /* Re-check with the lock held. */ 1385 if (!(p->signal->flags & SIGNAL_STOP_ 1303 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { 1386 spin_unlock_irq(&p->sighand-> 1304 spin_unlock_irq(&p->sighand->siglock); 1387 return 0; 1305 return 0; 1388 } 1306 } 1389 if (!unlikely(wo->wo_flags & WNOWAIT) 1307 if (!unlikely(wo->wo_flags & WNOWAIT)) 1390 p->signal->flags &= ~SIGNAL_S 1308 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1391 uid = from_kuid_munged(current_user_n 1309 uid = from_kuid_munged(current_user_ns(), task_uid(p)); 1392 spin_unlock_irq(&p->sighand->siglock) 1310 spin_unlock_irq(&p->sighand->siglock); 1393 1311 1394 pid = task_pid_vnr(p); 1312 pid = task_pid_vnr(p); 1395 get_task_struct(p); 1313 get_task_struct(p); 1396 read_unlock(&tasklist_lock); 1314 read_unlock(&tasklist_lock); 1397 sched_annotate_sleep(); 1315 sched_annotate_sleep(); 1398 if (wo->wo_rusage) 1316 if (wo->wo_rusage) 1399 getrusage(p, RUSAGE_BOTH, wo- 1317 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); 1400 put_task_struct(p); 1318 put_task_struct(p); 1401 1319 1402 infop = wo->wo_info; 1320 infop = wo->wo_info; 1403 if (!infop) { 1321 if (!infop) { 1404 wo->wo_stat = 0xffff; 1322 wo->wo_stat = 0xffff; 1405 } else { 1323 } else { 1406 infop->cause = CLD_CONTINUED; 1324 infop->cause = CLD_CONTINUED; 1407 infop->pid = pid; 1325 infop->pid = pid; 1408 infop->uid = uid; 1326 infop->uid = uid; 1409 infop->status = SIGCONT; 1327 infop->status = SIGCONT; 1410 } 1328 } 1411 return pid; 1329 return pid; 1412 } 1330 } 1413 1331 1414 /* 1332 /* 1415 * Consider @p for a wait by @parent. 1333 * Consider @p for a wait by @parent. 1416 * 1334 * 1417 * -ECHILD should be in ->notask_error before 1335 * -ECHILD should be in ->notask_error before the first call. 1418 * Returns nonzero for a final return, when w 1336 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1419 * Returns zero if the search for a child sho 1337 * Returns zero if the search for a child should continue; 1420 * then ->notask_error is 0 if @p is an eligi 1338 * then ->notask_error is 0 if @p is an eligible child, 1421 * or still -ECHILD. 1339 * or still -ECHILD. 1422 */ 1340 */ 1423 static int wait_consider_task(struct wait_opt 1341 static int wait_consider_task(struct wait_opts *wo, int ptrace, 1424 struct task_s 1342 struct task_struct *p) 1425 { 1343 { 1426 /* 1344 /* 1427 * We can race with wait_task_zombie( 1345 * We can race with wait_task_zombie() from another thread. 1428 * Ensure that EXIT_ZOMBIE -> EXIT_DE 1346 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition 1429 * can't confuse the checks below. 1347 * can't confuse the checks below. 1430 */ 1348 */ 1431 int exit_state = READ_ONCE(p->exit_st 1349 int exit_state = READ_ONCE(p->exit_state); 1432 int ret; 1350 int ret; 1433 1351 1434 if (unlikely(exit_state == EXIT_DEAD) 1352 if (unlikely(exit_state == EXIT_DEAD)) 1435 return 0; 1353 return 0; 1436 1354 1437 ret = eligible_child(wo, ptrace, p); 1355 ret = eligible_child(wo, ptrace, p); 1438 if (!ret) 1356 if (!ret) 1439 return ret; 1357 return ret; 1440 1358 1441 if (unlikely(exit_state == EXIT_TRACE 1359 if (unlikely(exit_state == EXIT_TRACE)) { 1442 /* 1360 /* 1443 * ptrace == 0 means we are t 1361 * ptrace == 0 means we are the natural parent. In this case 1444 * we should clear notask_err 1362 * we should clear notask_error, debugger will notify us. 1445 */ 1363 */ 1446 if (likely(!ptrace)) 1364 if (likely(!ptrace)) 1447 wo->notask_error = 0; 1365 wo->notask_error = 0; 1448 return 0; 1366 return 0; 1449 } 1367 } 1450 1368 1451 if (likely(!ptrace) && unlikely(p->pt 1369 if (likely(!ptrace) && unlikely(p->ptrace)) { 1452 /* 1370 /* 1453 * If it is traced by its rea 1371 * If it is traced by its real parent's group, just pretend 1454 * the caller is ptrace_do_wa 1372 * the caller is ptrace_do_wait() and reap this child if it 1455 * is zombie. 1373 * is zombie. 1456 * 1374 * 1457 * This also hides group stop 1375 * This also hides group stop state from real parent; otherwise 1458 * a single stop can be repor 1376 * a single stop can be reported twice as group and ptrace stop. 1459 * If a ptracer wants to dist 1377 * If a ptracer wants to distinguish these two events for its 1460 * own children it should cre 1378 * own children it should create a separate process which takes 1461 * the role of real parent. 1379 * the role of real parent. 1462 */ 1380 */ 1463 if (!ptrace_reparented(p)) 1381 if (!ptrace_reparented(p)) 1464 ptrace = 1; 1382 ptrace = 1; 1465 } 1383 } 1466 1384 1467 /* slay zombie? */ 1385 /* slay zombie? */ 1468 if (exit_state == EXIT_ZOMBIE) { 1386 if (exit_state == EXIT_ZOMBIE) { 1469 /* we don't reap group leader 1387 /* we don't reap group leaders with subthreads */ 1470 if (!delay_group_leader(p)) { 1388 if (!delay_group_leader(p)) { 1471 /* 1389 /* 1472 * A zombie ptracee i 1390 * A zombie ptracee is only visible to its ptracer. 1473 * Notification and r 1391 * Notification and reaping will be cascaded to the 1474 * real parent when t 1392 * real parent when the ptracer detaches. 1475 */ 1393 */ 1476 if (unlikely(ptrace) 1394 if (unlikely(ptrace) || likely(!p->ptrace)) 1477 return wait_t 1395 return wait_task_zombie(wo, p); 1478 } 1396 } 1479 1397 1480 /* 1398 /* 1481 * Allow access to stopped/co 1399 * Allow access to stopped/continued state via zombie by 1482 * falling through. Clearing 1400 * falling through. Clearing of notask_error is complex. 1483 * 1401 * 1484 * When !@ptrace: 1402 * When !@ptrace: 1485 * 1403 * 1486 * If WEXITED is set, notask_ 1404 * If WEXITED is set, notask_error should naturally be 1487 * cleared. If not, subset o 1405 * cleared. If not, subset of WSTOPPED|WCONTINUED is set, 1488 * so, if there are live subt 1406 * so, if there are live subthreads, there are events to 1489 * wait for. If all subthrea 1407 * wait for. If all subthreads are dead, it's still safe 1490 * to clear - this function w 1408 * to clear - this function will be called again in finite 1491 * amount time once all the s 1409 * amount time once all the subthreads are released and 1492 * will then return without c 1410 * will then return without clearing. 1493 * 1411 * 1494 * When @ptrace: 1412 * When @ptrace: 1495 * 1413 * 1496 * Stopped state is per-task 1414 * Stopped state is per-task and thus can't change once the 1497 * target task dies. Only co 1415 * target task dies. Only continued and exited can happen. 1498 * Clear notask_error if WCON 1416 * Clear notask_error if WCONTINUED | WEXITED. 1499 */ 1417 */ 1500 if (likely(!ptrace) || (wo->w 1418 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) 1501 wo->notask_error = 0; 1419 wo->notask_error = 0; 1502 } else { 1420 } else { 1503 /* 1421 /* 1504 * @p is alive and it's gonna 1422 * @p is alive and it's gonna stop, continue or exit, so 1505 * there always is something 1423 * there always is something to wait for. 1506 */ 1424 */ 1507 wo->notask_error = 0; 1425 wo->notask_error = 0; 1508 } 1426 } 1509 1427 1510 /* 1428 /* 1511 * Wait for stopped. Depending on @p 1429 * Wait for stopped. Depending on @ptrace, different stopped state 1512 * is used and the two don't interact 1430 * is used and the two don't interact with each other. 1513 */ 1431 */ 1514 ret = wait_task_stopped(wo, ptrace, p 1432 ret = wait_task_stopped(wo, ptrace, p); 1515 if (ret) 1433 if (ret) 1516 return ret; 1434 return ret; 1517 1435 1518 /* 1436 /* 1519 * Wait for continued. There's only 1437 * Wait for continued. There's only one continued state and the 1520 * ptracer can consume it which can c 1438 * ptracer can consume it which can confuse the real parent. Don't 1521 * use WCONTINUED from ptracer. You 1439 * use WCONTINUED from ptracer. You don't need or want it. 1522 */ 1440 */ 1523 return wait_task_continued(wo, p); 1441 return wait_task_continued(wo, p); 1524 } 1442 } 1525 1443 1526 /* 1444 /* 1527 * Do the work of do_wait() for one thread in 1445 * Do the work of do_wait() for one thread in the group, @tsk. 1528 * 1446 * 1529 * -ECHILD should be in ->notask_error before 1447 * -ECHILD should be in ->notask_error before the first call. 1530 * Returns nonzero for a final return, when w 1448 * Returns nonzero for a final return, when we have unlocked tasklist_lock. 1531 * Returns zero if the search for a child sho 1449 * Returns zero if the search for a child should continue; then 1532 * ->notask_error is 0 if there were any elig 1450 * ->notask_error is 0 if there were any eligible children, 1533 * or still -ECHILD. 1451 * or still -ECHILD. 1534 */ 1452 */ 1535 static int do_wait_thread(struct wait_opts *w 1453 static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) 1536 { 1454 { 1537 struct task_struct *p; 1455 struct task_struct *p; 1538 1456 1539 list_for_each_entry(p, &tsk->children 1457 list_for_each_entry(p, &tsk->children, sibling) { 1540 int ret = wait_consider_task( 1458 int ret = wait_consider_task(wo, 0, p); 1541 1459 1542 if (ret) 1460 if (ret) 1543 return ret; 1461 return ret; 1544 } 1462 } 1545 1463 1546 return 0; 1464 return 0; 1547 } 1465 } 1548 1466 1549 static int ptrace_do_wait(struct wait_opts *w 1467 static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) 1550 { 1468 { 1551 struct task_struct *p; 1469 struct task_struct *p; 1552 1470 1553 list_for_each_entry(p, &tsk->ptraced, 1471 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { 1554 int ret = wait_consider_task( 1472 int ret = wait_consider_task(wo, 1, p); 1555 1473 1556 if (ret) 1474 if (ret) 1557 return ret; 1475 return ret; 1558 } 1476 } 1559 1477 1560 return 0; 1478 return 0; 1561 } 1479 } 1562 1480 1563 bool pid_child_should_wake(struct wait_opts * << 1564 { << 1565 if (!eligible_pid(wo, p)) << 1566 return false; << 1567 << 1568 if ((wo->wo_flags & __WNOTHREAD) && w << 1569 return false; << 1570 << 1571 return true; << 1572 } << 1573 << 1574 static int child_wait_callback(wait_queue_ent 1481 static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1575 int sync, voi 1482 int sync, void *key) 1576 { 1483 { 1577 struct wait_opts *wo = container_of(w 1484 struct wait_opts *wo = container_of(wait, struct wait_opts, 1578 1485 child_wait); 1579 struct task_struct *p = key; 1486 struct task_struct *p = key; 1580 1487 1581 if (pid_child_should_wake(wo, p)) !! 1488 if (!eligible_pid(wo, p)) 1582 return default_wake_function( !! 1489 return 0; 1583 1490 1584 return 0; !! 1491 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) >> 1492 return 0; >> 1493 >> 1494 return default_wake_function(wait, mode, sync, key); 1585 } 1495 } 1586 1496 1587 void __wake_up_parent(struct task_struct *p, 1497 void __wake_up_parent(struct task_struct *p, struct task_struct *parent) 1588 { 1498 { 1589 __wake_up_sync_key(&parent->signal->w 1499 __wake_up_sync_key(&parent->signal->wait_chldexit, 1590 TASK_INTERRUPTIBLE !! 1500 TASK_INTERRUPTIBLE, 1, p); 1591 } << 1592 << 1593 static bool is_effectively_child(struct wait_ << 1594 struct task_ << 1595 { << 1596 struct task_struct *parent = << 1597 !ptrace ? target->real_parent << 1598 << 1599 return current == parent || (!(wo->wo << 1600 same_thr << 1601 } 1501 } 1602 1502 1603 /* !! 1503 static long do_wait(struct wait_opts *wo) 1604 * Optimization for waiting on PIDTYPE_PID. N << 1605 * and tracee lists to find the target task. << 1606 */ << 1607 static int do_wait_pid(struct wait_opts *wo) << 1608 { 1504 { 1609 bool ptrace; !! 1505 struct task_struct *tsk; 1610 struct task_struct *target; << 1611 int retval; 1506 int retval; 1612 1507 1613 ptrace = false; !! 1508 trace_sched_process_wait(wo->wo_pid); 1614 target = pid_task(wo->wo_pid, PIDTYPE << 1615 if (target && is_effectively_child(wo << 1616 retval = wait_consider_task(w << 1617 if (retval) << 1618 return retval; << 1619 } << 1620 << 1621 ptrace = true; << 1622 target = pid_task(wo->wo_pid, PIDTYPE << 1623 if (target && target->ptrace && << 1624 is_effectively_child(wo, ptrace, << 1625 retval = wait_consider_task(w << 1626 if (retval) << 1627 return retval; << 1628 } << 1629 << 1630 return 0; << 1631 } << 1632 << 1633 long __do_wait(struct wait_opts *wo) << 1634 { << 1635 long retval; << 1636 1509 >> 1510 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); >> 1511 wo->child_wait.private = current; >> 1512 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); >> 1513 repeat: 1637 /* 1514 /* 1638 * If there is nothing that can match 1515 * If there is nothing that can match our criteria, just get out. 1639 * We will clear ->notask_error to ze 1516 * We will clear ->notask_error to zero if we see any child that 1640 * might later match our criteria, ev 1517 * might later match our criteria, even if we are not able to reap 1641 * it yet. 1518 * it yet. 1642 */ 1519 */ 1643 wo->notask_error = -ECHILD; 1520 wo->notask_error = -ECHILD; 1644 if ((wo->wo_type < PIDTYPE_MAX) && 1521 if ((wo->wo_type < PIDTYPE_MAX) && 1645 (!wo->wo_pid || !pid_has_task(wo-> !! 1522 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) 1646 goto notask; 1523 goto notask; 1647 1524 >> 1525 set_current_state(TASK_INTERRUPTIBLE); 1648 read_lock(&tasklist_lock); 1526 read_lock(&tasklist_lock); 1649 !! 1527 tsk = current; 1650 if (wo->wo_type == PIDTYPE_PID) { !! 1528 do { 1651 retval = do_wait_pid(wo); !! 1529 retval = do_wait_thread(wo, tsk); 1652 if (retval) 1530 if (retval) 1653 return retval; !! 1531 goto end; 1654 } else { << 1655 struct task_struct *tsk = cur << 1656 1532 1657 do { !! 1533 retval = ptrace_do_wait(wo, tsk); 1658 retval = do_wait_thre !! 1534 if (retval) 1659 if (retval) !! 1535 goto end; 1660 return retval << 1661 << 1662 retval = ptrace_do_wa << 1663 if (retval) << 1664 return retval << 1665 1536 1666 if (wo->wo_flags & __ !! 1537 if (wo->wo_flags & __WNOTHREAD) 1667 break; !! 1538 break; 1668 } while_each_thread(current, !! 1539 } while_each_thread(current, tsk); 1669 } << 1670 read_unlock(&tasklist_lock); 1540 read_unlock(&tasklist_lock); 1671 1541 1672 notask: 1542 notask: 1673 retval = wo->notask_error; 1543 retval = wo->notask_error; 1674 if (!retval && !(wo->wo_flags & WNOHA !! 1544 if (!retval && !(wo->wo_flags & WNOHANG)) { 1675 return -ERESTARTSYS; !! 1545 retval = -ERESTARTSYS; 1676 !! 1546 if (!signal_pending(current)) { 1677 return retval; !! 1547 schedule(); 1678 } !! 1548 goto repeat; 1679 !! 1549 } 1680 static long do_wait(struct wait_opts *wo) !! 1550 } 1681 { !! 1551 end: 1682 int retval; << 1683 << 1684 trace_sched_process_wait(wo->wo_pid); << 1685 << 1686 init_waitqueue_func_entry(&wo->child_ << 1687 wo->child_wait.private = current; << 1688 add_wait_queue(¤t->signal->wait << 1689 << 1690 do { << 1691 set_current_state(TASK_INTERR << 1692 retval = __do_wait(wo); << 1693 if (retval != -ERESTARTSYS) << 1694 break; << 1695 if (signal_pending(current)) << 1696 break; << 1697 schedule(); << 1698 } while (1); << 1699 << 1700 __set_current_state(TASK_RUNNING); 1552 __set_current_state(TASK_RUNNING); 1701 remove_wait_queue(¤t->signal->w 1553 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait); 1702 return retval; 1554 return retval; 1703 } 1555 } 1704 1556 1705 int kernel_waitid_prepare(struct wait_opts *w !! 1557 static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, 1706 struct waitid_info !! 1558 int options, struct rusage *ru) 1707 struct rusage *ru) << 1708 { 1559 { 1709 unsigned int f_flags = 0; !! 1560 struct wait_opts wo; 1710 struct pid *pid = NULL; 1561 struct pid *pid = NULL; 1711 enum pid_type type; 1562 enum pid_type type; >> 1563 long ret; 1712 1564 1713 if (options & ~(WNOHANG|WNOWAIT|WEXIT 1565 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED| 1714 __WNOTHREAD|__WCLONE| 1566 __WNOTHREAD|__WCLONE|__WALL)) 1715 return -EINVAL; 1567 return -EINVAL; 1716 if (!(options & (WEXITED|WSTOPPED|WCO 1568 if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) 1717 return -EINVAL; 1569 return -EINVAL; 1718 1570 1719 switch (which) { 1571 switch (which) { 1720 case P_ALL: 1572 case P_ALL: 1721 type = PIDTYPE_MAX; 1573 type = PIDTYPE_MAX; 1722 break; 1574 break; 1723 case P_PID: 1575 case P_PID: 1724 type = PIDTYPE_PID; 1576 type = PIDTYPE_PID; 1725 if (upid <= 0) 1577 if (upid <= 0) 1726 return -EINVAL; 1578 return -EINVAL; 1727 << 1728 pid = find_get_pid(upid); << 1729 break; 1579 break; 1730 case P_PGID: 1580 case P_PGID: 1731 type = PIDTYPE_PGID; 1581 type = PIDTYPE_PGID; 1732 if (upid < 0) !! 1582 if (upid <= 0) 1733 return -EINVAL; << 1734 << 1735 if (upid) << 1736 pid = find_get_pid(up << 1737 else << 1738 pid = get_task_pid(cu << 1739 break; << 1740 case P_PIDFD: << 1741 type = PIDTYPE_PID; << 1742 if (upid < 0) << 1743 return -EINVAL; 1583 return -EINVAL; 1744 << 1745 pid = pidfd_get_pid(upid, &f_ << 1746 if (IS_ERR(pid)) << 1747 return PTR_ERR(pid); << 1748 << 1749 break; 1584 break; 1750 default: 1585 default: 1751 return -EINVAL; 1586 return -EINVAL; 1752 } 1587 } 1753 1588 1754 wo->wo_type = type; !! 1589 if (type < PIDTYPE_MAX) 1755 wo->wo_pid = pid; !! 1590 pid = find_get_pid(upid); 1756 wo->wo_flags = options; << 1757 wo->wo_info = infop; << 1758 wo->wo_rusage = ru; << 1759 if (f_flags & O_NONBLOCK) << 1760 wo->wo_flags |= WNOHANG; << 1761 << 1762 return 0; << 1763 } << 1764 << 1765 static long kernel_waitid(int which, pid_t up << 1766 int options, struct << 1767 { << 1768 struct wait_opts wo; << 1769 long ret; << 1770 << 1771 ret = kernel_waitid_prepare(&wo, whic << 1772 if (ret) << 1773 return ret; << 1774 1591 >> 1592 wo.wo_type = type; >> 1593 wo.wo_pid = pid; >> 1594 wo.wo_flags = options; >> 1595 wo.wo_info = infop; >> 1596 wo.wo_rusage = ru; 1775 ret = do_wait(&wo); 1597 ret = do_wait(&wo); 1776 if (!ret && !(options & WNOHANG) && ( << 1777 ret = -EAGAIN; << 1778 1598 1779 put_pid(wo.wo_pid); !! 1599 put_pid(pid); 1780 return ret; 1600 return ret; 1781 } 1601 } 1782 1602 1783 SYSCALL_DEFINE5(waitid, int, which, pid_t, up 1603 SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, 1784 infop, int, options, struct r 1604 infop, int, options, struct rusage __user *, ru) 1785 { 1605 { 1786 struct rusage r; 1606 struct rusage r; 1787 struct waitid_info info = {.status = 1607 struct waitid_info info = {.status = 0}; 1788 long err = kernel_waitid(which, upid, 1608 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL); 1789 int signo = 0; 1609 int signo = 0; 1790 1610 1791 if (err > 0) { 1611 if (err > 0) { 1792 signo = SIGCHLD; 1612 signo = SIGCHLD; 1793 err = 0; 1613 err = 0; 1794 if (ru && copy_to_user(ru, &r 1614 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1795 return -EFAULT; 1615 return -EFAULT; 1796 } 1616 } 1797 if (!infop) 1617 if (!infop) 1798 return err; 1618 return err; 1799 1619 1800 if (!user_write_access_begin(infop, s !! 1620 if (!user_access_begin(infop, sizeof(*infop))) 1801 return -EFAULT; 1621 return -EFAULT; 1802 1622 1803 unsafe_put_user(signo, &infop->si_sig 1623 unsafe_put_user(signo, &infop->si_signo, Efault); 1804 unsafe_put_user(0, &infop->si_errno, 1624 unsafe_put_user(0, &infop->si_errno, Efault); 1805 unsafe_put_user(info.cause, &infop->s 1625 unsafe_put_user(info.cause, &infop->si_code, Efault); 1806 unsafe_put_user(info.pid, &infop->si_ 1626 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1807 unsafe_put_user(info.uid, &infop->si_ 1627 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1808 unsafe_put_user(info.status, &infop-> 1628 unsafe_put_user(info.status, &infop->si_status, Efault); 1809 user_write_access_end(); !! 1629 user_access_end(); 1810 return err; 1630 return err; 1811 Efault: 1631 Efault: 1812 user_write_access_end(); !! 1632 user_access_end(); 1813 return -EFAULT; 1633 return -EFAULT; 1814 } 1634 } 1815 1635 1816 long kernel_wait4(pid_t upid, int __user *sta 1636 long kernel_wait4(pid_t upid, int __user *stat_addr, int options, 1817 struct rusage *ru) 1637 struct rusage *ru) 1818 { 1638 { 1819 struct wait_opts wo; 1639 struct wait_opts wo; 1820 struct pid *pid = NULL; 1640 struct pid *pid = NULL; 1821 enum pid_type type; 1641 enum pid_type type; 1822 long ret; 1642 long ret; 1823 1643 1824 if (options & ~(WNOHANG|WUNTRACED|WCO 1644 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1825 __WNOTHREAD|__WCLONE| 1645 __WNOTHREAD|__WCLONE|__WALL)) 1826 return -EINVAL; 1646 return -EINVAL; 1827 1647 1828 /* -INT_MIN is not defined */ 1648 /* -INT_MIN is not defined */ 1829 if (upid == INT_MIN) 1649 if (upid == INT_MIN) 1830 return -ESRCH; 1650 return -ESRCH; 1831 1651 1832 if (upid == -1) 1652 if (upid == -1) 1833 type = PIDTYPE_MAX; 1653 type = PIDTYPE_MAX; 1834 else if (upid < 0) { 1654 else if (upid < 0) { 1835 type = PIDTYPE_PGID; 1655 type = PIDTYPE_PGID; 1836 pid = find_get_pid(-upid); 1656 pid = find_get_pid(-upid); 1837 } else if (upid == 0) { 1657 } else if (upid == 0) { 1838 type = PIDTYPE_PGID; 1658 type = PIDTYPE_PGID; 1839 pid = get_task_pid(current, P 1659 pid = get_task_pid(current, PIDTYPE_PGID); 1840 } else /* upid > 0 */ { 1660 } else /* upid > 0 */ { 1841 type = PIDTYPE_PID; 1661 type = PIDTYPE_PID; 1842 pid = find_get_pid(upid); 1662 pid = find_get_pid(upid); 1843 } 1663 } 1844 1664 1845 wo.wo_type = type; 1665 wo.wo_type = type; 1846 wo.wo_pid = pid; 1666 wo.wo_pid = pid; 1847 wo.wo_flags = options | WEXITED; 1667 wo.wo_flags = options | WEXITED; 1848 wo.wo_info = NULL; 1668 wo.wo_info = NULL; 1849 wo.wo_stat = 0; 1669 wo.wo_stat = 0; 1850 wo.wo_rusage = ru; 1670 wo.wo_rusage = ru; 1851 ret = do_wait(&wo); 1671 ret = do_wait(&wo); 1852 put_pid(pid); 1672 put_pid(pid); 1853 if (ret > 0 && stat_addr && put_user( 1673 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr)) 1854 ret = -EFAULT; 1674 ret = -EFAULT; 1855 1675 1856 return ret; 1676 return ret; 1857 } 1677 } 1858 1678 1859 int kernel_wait(pid_t pid, int *stat) << 1860 { << 1861 struct wait_opts wo = { << 1862 .wo_type = PIDTYPE_PID << 1863 .wo_pid = find_get_pi << 1864 .wo_flags = WEXITED, << 1865 }; << 1866 int ret; << 1867 << 1868 ret = do_wait(&wo); << 1869 if (ret > 0 && wo.wo_stat) << 1870 *stat = wo.wo_stat; << 1871 put_pid(wo.wo_pid); << 1872 return ret; << 1873 } << 1874 << 1875 SYSCALL_DEFINE4(wait4, pid_t, upid, int __use 1679 SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, 1876 int, options, struct rusage _ 1680 int, options, struct rusage __user *, ru) 1877 { 1681 { 1878 struct rusage r; 1682 struct rusage r; 1879 long err = kernel_wait4(upid, stat_ad 1683 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL); 1880 1684 1881 if (err > 0) { 1685 if (err > 0) { 1882 if (ru && copy_to_user(ru, &r 1686 if (ru && copy_to_user(ru, &r, sizeof(struct rusage))) 1883 return -EFAULT; 1687 return -EFAULT; 1884 } 1688 } 1885 return err; 1689 return err; 1886 } 1690 } 1887 1691 1888 #ifdef __ARCH_WANT_SYS_WAITPID 1692 #ifdef __ARCH_WANT_SYS_WAITPID 1889 1693 1890 /* 1694 /* 1891 * sys_waitpid() remains for compatibility. w 1695 * sys_waitpid() remains for compatibility. waitpid() should be 1892 * implemented by calling sys_wait4() from li 1696 * implemented by calling sys_wait4() from libc.a. 1893 */ 1697 */ 1894 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __us 1698 SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) 1895 { 1699 { 1896 return kernel_wait4(pid, stat_addr, o 1700 return kernel_wait4(pid, stat_addr, options, NULL); 1897 } 1701 } 1898 1702 1899 #endif 1703 #endif 1900 1704 1901 #ifdef CONFIG_COMPAT 1705 #ifdef CONFIG_COMPAT 1902 COMPAT_SYSCALL_DEFINE4(wait4, 1706 COMPAT_SYSCALL_DEFINE4(wait4, 1903 compat_pid_t, pid, 1707 compat_pid_t, pid, 1904 compat_uint_t __user *, stat_addr, 1708 compat_uint_t __user *, stat_addr, 1905 int, options, 1709 int, options, 1906 struct compat_rusage __user *, ru) 1710 struct compat_rusage __user *, ru) 1907 { 1711 { 1908 struct rusage r; 1712 struct rusage r; 1909 long err = kernel_wait4(pid, stat_add 1713 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL); 1910 if (err > 0) { 1714 if (err > 0) { 1911 if (ru && put_compat_rusage(& 1715 if (ru && put_compat_rusage(&r, ru)) 1912 return -EFAULT; 1716 return -EFAULT; 1913 } 1717 } 1914 return err; 1718 return err; 1915 } 1719 } 1916 1720 1917 COMPAT_SYSCALL_DEFINE5(waitid, 1721 COMPAT_SYSCALL_DEFINE5(waitid, 1918 int, which, compat_pid_t, pid 1722 int, which, compat_pid_t, pid, 1919 struct compat_siginfo __user 1723 struct compat_siginfo __user *, infop, int, options, 1920 struct compat_rusage __user * 1724 struct compat_rusage __user *, uru) 1921 { 1725 { 1922 struct rusage ru; 1726 struct rusage ru; 1923 struct waitid_info info = {.status = 1727 struct waitid_info info = {.status = 0}; 1924 long err = kernel_waitid(which, pid, 1728 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL); 1925 int signo = 0; 1729 int signo = 0; 1926 if (err > 0) { 1730 if (err > 0) { 1927 signo = SIGCHLD; 1731 signo = SIGCHLD; 1928 err = 0; 1732 err = 0; 1929 if (uru) { 1733 if (uru) { 1930 /* kernel_waitid() ov 1734 /* kernel_waitid() overwrites everything in ru */ 1931 if (COMPAT_USE_64BIT_ 1735 if (COMPAT_USE_64BIT_TIME) 1932 err = copy_to 1736 err = copy_to_user(uru, &ru, sizeof(ru)); 1933 else 1737 else 1934 err = put_com 1738 err = put_compat_rusage(&ru, uru); 1935 if (err) 1739 if (err) 1936 return -EFAUL 1740 return -EFAULT; 1937 } 1741 } 1938 } 1742 } 1939 1743 1940 if (!infop) 1744 if (!infop) 1941 return err; 1745 return err; 1942 1746 1943 if (!user_write_access_begin(infop, s !! 1747 if (!user_access_begin(infop, sizeof(*infop))) 1944 return -EFAULT; 1748 return -EFAULT; 1945 1749 1946 unsafe_put_user(signo, &infop->si_sig 1750 unsafe_put_user(signo, &infop->si_signo, Efault); 1947 unsafe_put_user(0, &infop->si_errno, 1751 unsafe_put_user(0, &infop->si_errno, Efault); 1948 unsafe_put_user(info.cause, &infop->s 1752 unsafe_put_user(info.cause, &infop->si_code, Efault); 1949 unsafe_put_user(info.pid, &infop->si_ 1753 unsafe_put_user(info.pid, &infop->si_pid, Efault); 1950 unsafe_put_user(info.uid, &infop->si_ 1754 unsafe_put_user(info.uid, &infop->si_uid, Efault); 1951 unsafe_put_user(info.status, &infop-> 1755 unsafe_put_user(info.status, &infop->si_status, Efault); 1952 user_write_access_end(); !! 1756 user_access_end(); 1953 return err; 1757 return err; 1954 Efault: 1758 Efault: 1955 user_write_access_end(); !! 1759 user_access_end(); 1956 return -EFAULT; 1760 return -EFAULT; 1957 } 1761 } 1958 #endif 1762 #endif 1959 1763 1960 /* !! 1764 __weak void abort(void) 1961 * This needs to be __function_aligned as GCC << 1962 * implementation of abort() cold and drops a << 1963 * -falign-functions=N. << 1964 * << 1965 * See https://gcc.gnu.org/bugzilla/show_bug. << 1966 */ << 1967 __weak __function_aligned void abort(void) << 1968 { 1765 { 1969 BUG(); 1766 BUG(); 1970 1767 1971 /* if that doesn't kill us, halt */ 1768 /* if that doesn't kill us, halt */ 1972 panic("Oops failed to kill thread"); 1769 panic("Oops failed to kill thread"); 1973 } 1770 } 1974 EXPORT_SYMBOL(abort); 1771 EXPORT_SYMBOL(abort); 1975 1772
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.