~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/sched/task.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_SCHED_TASK_H
  3 #define _LINUX_SCHED_TASK_H
  4 
  5 /*
  6  * Interface between the scheduler and various task lifetime (fork()/exit())
  7  * functionality:
  8  */
  9 
 10 #include <linux/rcupdate.h>
 11 #include <linux/refcount.h>
 12 #include <linux/sched.h>
 13 #include <linux/uaccess.h>
 14 
 15 struct task_struct;
 16 struct rusage;
 17 union thread_union;
 18 struct css_set;
 19 
 20 /* All the bits taken by the old clone syscall. */
 21 #define CLONE_LEGACY_FLAGS 0xffffffffULL
 22 
 23 struct kernel_clone_args {
 24         u64 flags;
 25         int __user *pidfd;
 26         int __user *child_tid;
 27         int __user *parent_tid;
 28         const char *name;
 29         int exit_signal;
 30         u32 kthread:1;
 31         u32 io_thread:1;
 32         u32 user_worker:1;
 33         u32 no_files:1;
 34         unsigned long stack;
 35         unsigned long stack_size;
 36         unsigned long tls;
 37         pid_t *set_tid;
 38         /* Number of elements in *set_tid */
 39         size_t set_tid_size;
 40         int cgroup;
 41         int idle;
 42         int (*fn)(void *);
 43         void *fn_arg;
 44         struct cgroup *cgrp;
 45         struct css_set *cset;
 46 };
 47 
 48 /*
 49  * This serializes "schedule()" and also protects
 50  * the run-queue from deletions/modifications (but
 51  * _adding_ to the beginning of the run-queue has
 52  * a separate lock).
 53  */
 54 extern rwlock_t tasklist_lock;
 55 extern spinlock_t mmlist_lock;
 56 
 57 extern union thread_union init_thread_union;
 58 extern struct task_struct init_task;
 59 
 60 extern int lockdep_tasklist_lock_is_held(void);
 61 
 62 extern asmlinkage void schedule_tail(struct task_struct *prev);
 63 extern void init_idle(struct task_struct *idle, int cpu);
 64 
 65 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
 66 extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
 67 extern void sched_post_fork(struct task_struct *p);
 68 extern void sched_dead(struct task_struct *p);
 69 
 70 void __noreturn do_task_dead(void);
 71 void __noreturn make_task_dead(int signr);
 72 
 73 extern void mm_cache_init(void);
 74 extern void proc_caches_init(void);
 75 
 76 extern void fork_init(void);
 77 
 78 extern void release_task(struct task_struct * p);
 79 
 80 extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
 81 
 82 extern void flush_thread(void);
 83 
 84 #ifdef CONFIG_HAVE_EXIT_THREAD
 85 extern void exit_thread(struct task_struct *tsk);
 86 #else
 87 static inline void exit_thread(struct task_struct *tsk)
 88 {
 89 }
 90 #endif
 91 extern __noreturn void do_group_exit(int);
 92 
 93 extern void exit_files(struct task_struct *);
 94 extern void exit_itimers(struct task_struct *);
 95 
 96 extern pid_t kernel_clone(struct kernel_clone_args *kargs);
 97 struct task_struct *copy_process(struct pid *pid, int trace, int node,
 98                                  struct kernel_clone_args *args);
 99 struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
100 struct task_struct *fork_idle(int);
101 extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
102                             unsigned long flags);
103 extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
104 extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
105 int kernel_wait(pid_t pid, int *stat);
106 
107 extern void free_task(struct task_struct *tsk);
108 
109 /* sched_exec is called by processes performing an exec */
110 #ifdef CONFIG_SMP
111 extern void sched_exec(void);
112 #else
113 #define sched_exec()   {}
114 #endif
115 
116 static inline struct task_struct *get_task_struct(struct task_struct *t)
117 {
118         refcount_inc(&t->usage);
119         return t;
120 }
121 
122 extern void __put_task_struct(struct task_struct *t);
123 extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
124 
125 static inline void put_task_struct(struct task_struct *t)
126 {
127         if (!refcount_dec_and_test(&t->usage))
128                 return;
129 
130         /*
131          * In !RT, it is always safe to call __put_task_struct().
132          * Under RT, we can only call it in preemptible context.
133          */
134         if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
135                 static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
136 
137                 lock_map_acquire_try(&put_task_map);
138                 __put_task_struct(t);
139                 lock_map_release(&put_task_map);
140                 return;
141         }
142 
143         /*
144          * under PREEMPT_RT, we can't call put_task_struct
145          * in atomic context because it will indirectly
146          * acquire sleeping locks.
147          *
148          * call_rcu() will schedule delayed_put_task_struct_rcu()
149          * to be called in process context.
150          *
151          * __put_task_struct() is called when
152          * refcount_dec_and_test(&t->usage) succeeds.
153          *
154          * This means that it can't "conflict" with
155          * put_task_struct_rcu_user() which abuses ->rcu the same
156          * way; rcu_users has a reference so task->usage can't be
157          * zero after rcu_users 1 -> 0 transition.
158          *
159          * delayed_free_task() also uses ->rcu, but it is only called
160          * when it fails to fork a process. Therefore, there is no
161          * way it can conflict with put_task_struct().
162          */
163         call_rcu(&t->rcu, __put_task_struct_rcu_cb);
164 }
165 
166 DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
167 
168 static inline void put_task_struct_many(struct task_struct *t, int nr)
169 {
170         if (refcount_sub_and_test(nr, &t->usage))
171                 __put_task_struct(t);
172 }
173 
174 void put_task_struct_rcu_user(struct task_struct *task);
175 
176 /* Free all architecture-specific resources held by a thread. */
177 void release_thread(struct task_struct *dead_task);
178 
179 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
180 extern int arch_task_struct_size __read_mostly;
181 #else
182 # define arch_task_struct_size (sizeof(struct task_struct))
183 #endif
184 
185 #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
186 /*
187  * If an architecture has not declared a thread_struct whitelist we
188  * must assume something there may need to be copied to userspace.
189  */
190 static inline void arch_thread_struct_whitelist(unsigned long *offset,
191                                                 unsigned long *size)
192 {
193         *offset = 0;
194         /* Handle dynamically sized thread_struct. */
195         *size = arch_task_struct_size - offsetof(struct task_struct, thread);
196 }
197 #endif
198 
199 #ifdef CONFIG_VMAP_STACK
200 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
201 {
202         return t->stack_vm_area;
203 }
204 #else
205 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
206 {
207         return NULL;
208 }
209 #endif
210 
211 /*
212  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
213  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
214  * pins the final release of task.io_context.  Also protects ->cpuset and
215  * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
216  *
217  * Nests both inside and outside of read_lock(&tasklist_lock).
218  * It must not be nested with write_lock_irq(&tasklist_lock),
219  * neither inside nor outside.
220  */
221 static inline void task_lock(struct task_struct *p)
222 {
223         spin_lock(&p->alloc_lock);
224 }
225 
226 static inline void task_unlock(struct task_struct *p)
227 {
228         spin_unlock(&p->alloc_lock);
229 }
230 
231 DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
232 
233 #endif /* _LINUX_SCHED_TASK_H */
234 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php