~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/stats.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _KERNEL_STATS_H
  3 #define _KERNEL_STATS_H
  4 
  5 #ifdef CONFIG_SCHEDSTATS
  6 
  7 extern struct static_key_false sched_schedstats;
  8 
  9 /*
 10  * Expects runqueue lock to be held for atomicity of update
 11  */
 12 static inline void
 13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 14 {
 15         if (rq) {
 16                 rq->rq_sched_info.run_delay += delta;
 17                 rq->rq_sched_info.pcount++;
 18         }
 19 }
 20 
 21 /*
 22  * Expects runqueue lock to be held for atomicity of update
 23  */
 24 static inline void
 25 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 26 {
 27         if (rq)
 28                 rq->rq_cpu_time += delta;
 29 }
 30 
 31 static inline void
 32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
 33 {
 34         if (rq)
 35                 rq->rq_sched_info.run_delay += delta;
 36 }
 37 #define   schedstat_enabled()           static_branch_unlikely(&sched_schedstats)
 38 #define __schedstat_inc(var)            do { var++; } while (0)
 39 #define   schedstat_inc(var)            do { if (schedstat_enabled()) { var++; } } while (0)
 40 #define __schedstat_add(var, amt)       do { var += (amt); } while (0)
 41 #define   schedstat_add(var, amt)       do { if (schedstat_enabled()) { var += (amt); } } while (0)
 42 #define __schedstat_set(var, val)       do { var = (val); } while (0)
 43 #define   schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
 44 #define   schedstat_val(var)            (var)
 45 #define   schedstat_val_or_zero(var)    ((schedstat_enabled()) ? (var) : 0)
 46 
 47 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
 48                                struct sched_statistics *stats);
 49 
 50 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
 51                              struct sched_statistics *stats);
 52 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
 53                                     struct sched_statistics *stats);
 54 
 55 static inline void
 56 check_schedstat_required(void)
 57 {
 58         if (schedstat_enabled())
 59                 return;
 60 
 61         /* Force schedstat enabled if a dependent tracepoint is active */
 62         if (trace_sched_stat_wait_enabled()    ||
 63             trace_sched_stat_sleep_enabled()   ||
 64             trace_sched_stat_iowait_enabled()  ||
 65             trace_sched_stat_blocked_enabled() ||
 66             trace_sched_stat_runtime_enabled())
 67                 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
 68 }
 69 
 70 #else /* !CONFIG_SCHEDSTATS: */
 71 
 72 static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
 73 static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
 74 static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
 75 # define   schedstat_enabled()          0
 76 # define __schedstat_inc(var)           do { } while (0)
 77 # define   schedstat_inc(var)           do { } while (0)
 78 # define __schedstat_add(var, amt)      do { } while (0)
 79 # define   schedstat_add(var, amt)      do { } while (0)
 80 # define __schedstat_set(var, val)      do { } while (0)
 81 # define   schedstat_set(var, val)      do { } while (0)
 82 # define   schedstat_val(var)           0
 83 # define   schedstat_val_or_zero(var)   0
 84 
 85 # define __update_stats_wait_start(rq, p, stats)       do { } while (0)
 86 # define __update_stats_wait_end(rq, p, stats)         do { } while (0)
 87 # define __update_stats_enqueue_sleeper(rq, p, stats)  do { } while (0)
 88 # define check_schedstat_required()                    do { } while (0)
 89 
 90 #endif /* CONFIG_SCHEDSTATS */
 91 
 92 #ifdef CONFIG_FAIR_GROUP_SCHED
 93 struct sched_entity_stats {
 94         struct sched_entity     se;
 95         struct sched_statistics stats;
 96 } __no_randomize_layout;
 97 #endif
 98 
 99 static inline struct sched_statistics *
100 __schedstats_from_se(struct sched_entity *se)
101 {
102 #ifdef CONFIG_FAIR_GROUP_SCHED
103         if (!entity_is_task(se))
104                 return &container_of(se, struct sched_entity_stats, se)->stats;
105 #endif
106         return &task_of(se)->stats;
107 }
108 
109 #ifdef CONFIG_PSI
110 void psi_task_change(struct task_struct *task, int clear, int set);
111 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
112                      bool sleep);
113 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
114 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
115 #else
116 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
117                                        struct task_struct *prev) {}
118 #endif /*CONFIG_IRQ_TIME_ACCOUNTING */
119 /*
120  * PSI tracks state that persists across sleeps, such as iowaits and
121  * memory stalls. As a result, it has to distinguish between sleeps,
122  * where a task's runnable state changes, and requeues, where a task
123  * and its state are being moved between CPUs and runqueues.
124  */
125 static inline void psi_enqueue(struct task_struct *p, bool wakeup)
126 {
127         int clear = 0, set = TSK_RUNNING;
128 
129         if (static_branch_likely(&psi_disabled))
130                 return;
131 
132         if (p->in_memstall)
133                 set |= TSK_MEMSTALL_RUNNING;
134 
135         if (!wakeup) {
136                 if (p->in_memstall)
137                         set |= TSK_MEMSTALL;
138         } else {
139                 if (p->in_iowait)
140                         clear |= TSK_IOWAIT;
141         }
142 
143         psi_task_change(p, clear, set);
144 }
145 
146 static inline void psi_dequeue(struct task_struct *p, bool sleep)
147 {
148         if (static_branch_likely(&psi_disabled))
149                 return;
150 
151         /*
152          * A voluntary sleep is a dequeue followed by a task switch. To
153          * avoid walking all ancestors twice, psi_task_switch() handles
154          * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
155          * Do nothing here.
156          */
157         if (sleep)
158                 return;
159 
160         psi_task_change(p, p->psi_flags, 0);
161 }
162 
163 static inline void psi_ttwu_dequeue(struct task_struct *p)
164 {
165         if (static_branch_likely(&psi_disabled))
166                 return;
167         /*
168          * Is the task being migrated during a wakeup? Make sure to
169          * deregister its sleep-persistent psi states from the old
170          * queue, and let psi_enqueue() know it has to requeue.
171          */
172         if (unlikely(p->psi_flags)) {
173                 struct rq_flags rf;
174                 struct rq *rq;
175 
176                 rq = __task_rq_lock(p, &rf);
177                 psi_task_change(p, p->psi_flags, 0);
178                 __task_rq_unlock(rq, &rf);
179         }
180 }
181 
182 static inline void psi_sched_switch(struct task_struct *prev,
183                                     struct task_struct *next,
184                                     bool sleep)
185 {
186         if (static_branch_likely(&psi_disabled))
187                 return;
188 
189         psi_task_switch(prev, next, sleep);
190 }
191 
192 #else /* CONFIG_PSI */
193 static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
194 static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
195 static inline void psi_ttwu_dequeue(struct task_struct *p) {}
196 static inline void psi_sched_switch(struct task_struct *prev,
197                                     struct task_struct *next,
198                                     bool sleep) {}
199 static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
200                                        struct task_struct *prev) {}
201 #endif /* CONFIG_PSI */
202 
203 #ifdef CONFIG_SCHED_INFO
204 /*
205  * We are interested in knowing how long it was from the *first* time a
206  * task was queued to the time that it finally hit a CPU, we call this routine
207  * from dequeue_task() to account for possible rq->clock skew across CPUs. The
208  * delta taken on each CPU would annul the skew.
209  */
210 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
211 {
212         unsigned long long delta = 0;
213 
214         if (!t->sched_info.last_queued)
215                 return;
216 
217         delta = rq_clock(rq) - t->sched_info.last_queued;
218         t->sched_info.last_queued = 0;
219         t->sched_info.run_delay += delta;
220 
221         rq_sched_info_dequeue(rq, delta);
222 }
223 
224 /*
225  * Called when a task finally hits the CPU.  We can now calculate how
226  * long it was waiting to run.  We also note when it began so that we
227  * can keep stats on how long its time-slice is.
228  */
229 static void sched_info_arrive(struct rq *rq, struct task_struct *t)
230 {
231         unsigned long long now, delta = 0;
232 
233         if (!t->sched_info.last_queued)
234                 return;
235 
236         now = rq_clock(rq);
237         delta = now - t->sched_info.last_queued;
238         t->sched_info.last_queued = 0;
239         t->sched_info.run_delay += delta;
240         t->sched_info.last_arrival = now;
241         t->sched_info.pcount++;
242 
243         rq_sched_info_arrive(rq, delta);
244 }
245 
246 /*
247  * This function is only called from enqueue_task(), but also only updates
248  * the timestamp if it is already not set.  It's assumed that
249  * sched_info_dequeue() will clear that stamp when appropriate.
250  */
251 static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
252 {
253         if (!t->sched_info.last_queued)
254                 t->sched_info.last_queued = rq_clock(rq);
255 }
256 
257 /*
258  * Called when a process ceases being the active-running process involuntarily
259  * due, typically, to expiring its time slice (this may also be called when
260  * switching to the idle task).  Now we can calculate how long we ran.
261  * Also, if the process is still in the TASK_RUNNING state, call
262  * sched_info_enqueue() to mark that it has now again started waiting on
263  * the runqueue.
264  */
265 static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
266 {
267         unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
268 
269         rq_sched_info_depart(rq, delta);
270 
271         if (task_is_running(t))
272                 sched_info_enqueue(rq, t);
273 }
274 
275 /*
276  * Called when tasks are switched involuntarily due, typically, to expiring
277  * their time slice.  (This may also be called when switching to or from
278  * the idle task.)  We are only called when prev != next.
279  */
280 static inline void
281 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
282 {
283         /*
284          * prev now departs the CPU.  It's not interesting to record
285          * stats about how efficient we were at scheduling the idle
286          * process, however.
287          */
288         if (prev != rq->idle)
289                 sched_info_depart(rq, prev);
290 
291         if (next != rq->idle)
292                 sched_info_arrive(rq, next);
293 }
294 
295 #else /* !CONFIG_SCHED_INFO: */
296 # define sched_info_enqueue(rq, t)      do { } while (0)
297 # define sched_info_dequeue(rq, t)      do { } while (0)
298 # define sched_info_switch(rq, t, next) do { } while (0)
299 #endif /* CONFIG_SCHED_INFO */
300 
301 #endif /* _KERNEL_STATS_H */
302 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php