~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/stats.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * /proc/schedstat implementation
  4  */
  5 
  6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
  7                                struct sched_statistics *stats)
  8 {
  9         u64 wait_start, prev_wait_start;
 10 
 11         wait_start = rq_clock(rq);
 12         prev_wait_start = schedstat_val(stats->wait_start);
 13 
 14         if (p && likely(wait_start > prev_wait_start))
 15                 wait_start -= prev_wait_start;
 16 
 17         __schedstat_set(stats->wait_start, wait_start);
 18 }
 19 
 20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
 21                              struct sched_statistics *stats)
 22 {
 23         u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
 24 
 25         if (p) {
 26                 if (task_on_rq_migrating(p)) {
 27                         /*
 28                          * Preserve migrating task's wait time so wait_start
 29                          * time stamp can be adjusted to accumulate wait time
 30                          * prior to migration.
 31                          */
 32                         __schedstat_set(stats->wait_start, delta);
 33 
 34                         return;
 35                 }
 36 
 37                 trace_sched_stat_wait(p, delta);
 38         }
 39 
 40         __schedstat_set(stats->wait_max,
 41                         max(schedstat_val(stats->wait_max), delta));
 42         __schedstat_inc(stats->wait_count);
 43         __schedstat_add(stats->wait_sum, delta);
 44         __schedstat_set(stats->wait_start, 0);
 45 }
 46 
 47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
 48                                     struct sched_statistics *stats)
 49 {
 50         u64 sleep_start, block_start;
 51 
 52         sleep_start = schedstat_val(stats->sleep_start);
 53         block_start = schedstat_val(stats->block_start);
 54 
 55         if (sleep_start) {
 56                 u64 delta = rq_clock(rq) - sleep_start;
 57 
 58                 if ((s64)delta < 0)
 59                         delta = 0;
 60 
 61                 if (unlikely(delta > schedstat_val(stats->sleep_max)))
 62                         __schedstat_set(stats->sleep_max, delta);
 63 
 64                 __schedstat_set(stats->sleep_start, 0);
 65                 __schedstat_add(stats->sum_sleep_runtime, delta);
 66 
 67                 if (p) {
 68                         account_scheduler_latency(p, delta >> 10, 1);
 69                         trace_sched_stat_sleep(p, delta);
 70                 }
 71         }
 72 
 73         if (block_start) {
 74                 u64 delta = rq_clock(rq) - block_start;
 75 
 76                 if ((s64)delta < 0)
 77                         delta = 0;
 78 
 79                 if (unlikely(delta > schedstat_val(stats->block_max)))
 80                         __schedstat_set(stats->block_max, delta);
 81 
 82                 __schedstat_set(stats->block_start, 0);
 83                 __schedstat_add(stats->sum_sleep_runtime, delta);
 84                 __schedstat_add(stats->sum_block_runtime, delta);
 85 
 86                 if (p) {
 87                         if (p->in_iowait) {
 88                                 __schedstat_add(stats->iowait_sum, delta);
 89                                 __schedstat_inc(stats->iowait_count);
 90                                 trace_sched_stat_iowait(p, delta);
 91                         }
 92 
 93                         trace_sched_stat_blocked(p, delta);
 94 
 95                         account_scheduler_latency(p, delta >> 10, 0);
 96                 }
 97         }
 98 }
 99 
100 /*
101  * Current schedstat API version.
102  *
103  * Bump this up when changing the output format or the meaning of an existing
104  * format, so that tools can adapt (or abort)
105  */
106 #define SCHEDSTAT_VERSION 16
107 
108 static int show_schedstat(struct seq_file *seq, void *v)
109 {
110         int cpu;
111 
112         if (v == (void *)1) {
113                 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114                 seq_printf(seq, "timestamp %lu\n", jiffies);
115         } else {
116                 struct rq *rq;
117 #ifdef CONFIG_SMP
118                 struct sched_domain *sd;
119                 int dcount = 0;
120 #endif
121                 cpu = (unsigned long)(v - 2);
122                 rq = cpu_rq(cpu);
123 
124                 /* runqueue-specific stats */
125                 seq_printf(seq,
126                     "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127                     cpu, rq->yld_count,
128                     rq->sched_count, rq->sched_goidle,
129                     rq->ttwu_count, rq->ttwu_local,
130                     rq->rq_cpu_time,
131                     rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
132 
133                 seq_printf(seq, "\n");
134 
135 #ifdef CONFIG_SMP
136                 /* domain-specific stats */
137                 rcu_read_lock();
138                 for_each_domain(cpu, sd) {
139                         enum cpu_idle_type itype;
140 
141                         seq_printf(seq, "domain%d %*pb", dcount++,
142                                    cpumask_pr_args(sched_domain_span(sd)));
143                         for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) {
144                                 seq_printf(seq, " %u %u %u %u %u %u %u %u",
145                                     sd->lb_count[itype],
146                                     sd->lb_balanced[itype],
147                                     sd->lb_failed[itype],
148                                     sd->lb_imbalance[itype],
149                                     sd->lb_gained[itype],
150                                     sd->lb_hot_gained[itype],
151                                     sd->lb_nobusyq[itype],
152                                     sd->lb_nobusyg[itype]);
153                         }
154                         seq_printf(seq,
155                                    " %u %u %u %u %u %u %u %u %u %u %u %u\n",
156                             sd->alb_count, sd->alb_failed, sd->alb_pushed,
157                             sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
158                             sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
159                             sd->ttwu_wake_remote, sd->ttwu_move_affine,
160                             sd->ttwu_move_balance);
161                 }
162                 rcu_read_unlock();
163 #endif
164         }
165         return 0;
166 }
167 
168 /*
169  * This iterator needs some explanation.
170  * It returns 1 for the header position.
171  * This means 2 is cpu 0.
172  * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
173  * to use cpumask_* to iterate over the CPUs.
174  */
175 static void *schedstat_start(struct seq_file *file, loff_t *offset)
176 {
177         unsigned long n = *offset;
178 
179         if (n == 0)
180                 return (void *) 1;
181 
182         n--;
183 
184         if (n > 0)
185                 n = cpumask_next(n - 1, cpu_online_mask);
186         else
187                 n = cpumask_first(cpu_online_mask);
188 
189         *offset = n + 1;
190 
191         if (n < nr_cpu_ids)
192                 return (void *)(unsigned long)(n + 2);
193 
194         return NULL;
195 }
196 
197 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
198 {
199         (*offset)++;
200 
201         return schedstat_start(file, offset);
202 }
203 
204 static void schedstat_stop(struct seq_file *file, void *data)
205 {
206 }
207 
208 static const struct seq_operations schedstat_sops = {
209         .start = schedstat_start,
210         .next  = schedstat_next,
211         .stop  = schedstat_stop,
212         .show  = show_schedstat,
213 };
214 
215 static int __init proc_schedstat_init(void)
216 {
217         proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
218         return 0;
219 }
220 subsys_initcall(proc_schedstat_init);
221 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php