~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/psi.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Pressure stall information for CPU, memory and IO
  4  *
  5  * Copyright (c) 2018 Facebook, Inc.
  6  * Author: Johannes Weiner <hannes@cmpxchg.org>
  7  *
  8  * Polling support by Suren Baghdasaryan <surenb@google.com>
  9  * Copyright (c) 2018 Google, Inc.
 10  *
 11  * When CPU, memory and IO are contended, tasks experience delays that
 12  * reduce throughput and introduce latencies into the workload. Memory
 13  * and IO contention, in addition, can cause a full loss of forward
 14  * progress in which the CPU goes idle.
 15  *
 16  * This code aggregates individual task delays into resource pressure
 17  * metrics that indicate problems with both workload health and
 18  * resource utilization.
 19  *
 20  *                      Model
 21  *
 22  * The time in which a task can execute on a CPU is our baseline for
 23  * productivity. Pressure expresses the amount of time in which this
 24  * potential cannot be realized due to resource contention.
 25  *
 26  * This concept of productivity has two components: the workload and
 27  * the CPU. To measure the impact of pressure on both, we define two
 28  * contention states for a resource: SOME and FULL.
 29  *
 30  * In the SOME state of a given resource, one or more tasks are
 31  * delayed on that resource. This affects the workload's ability to
 32  * perform work, but the CPU may still be executing other tasks.
 33  *
 34  * In the FULL state of a given resource, all non-idle tasks are
 35  * delayed on that resource such that nobody is advancing and the CPU
 36  * goes idle. This leaves both workload and CPU unproductive.
 37  *
 38  *      SOME = nr_delayed_tasks != 0
 39  *      FULL = nr_delayed_tasks != 0 && nr_productive_tasks == 0
 40  *
 41  * What it means for a task to be productive is defined differently
 42  * for each resource. For IO, productive means a running task. For
 43  * memory, productive means a running task that isn't a reclaimer. For
 44  * CPU, productive means an on-CPU task.
 45  *
 46  * Naturally, the FULL state doesn't exist for the CPU resource at the
 47  * system level, but exist at the cgroup level. At the cgroup level,
 48  * FULL means all non-idle tasks in the cgroup are delayed on the CPU
 49  * resource which is being used by others outside of the cgroup or
 50  * throttled by the cgroup cpu.max configuration.
 51  *
 52  * The percentage of wall clock time spent in those compound stall
 53  * states gives pressure numbers between 0 and 100 for each resource,
 54  * where the SOME percentage indicates workload slowdowns and the FULL
 55  * percentage indicates reduced CPU utilization:
 56  *
 57  *      %SOME = time(SOME) / period
 58  *      %FULL = time(FULL) / period
 59  *
 60  *                      Multiple CPUs
 61  *
 62  * The more tasks and available CPUs there are, the more work can be
 63  * performed concurrently. This means that the potential that can go
 64  * unrealized due to resource contention *also* scales with non-idle
 65  * tasks and CPUs.
 66  *
 67  * Consider a scenario where 257 number crunching tasks are trying to
 68  * run concurrently on 256 CPUs. If we simply aggregated the task
 69  * states, we would have to conclude a CPU SOME pressure number of
 70  * 100%, since *somebody* is waiting on a runqueue at all
 71  * times. However, that is clearly not the amount of contention the
 72  * workload is experiencing: only one out of 256 possible execution
 73  * threads will be contended at any given time, or about 0.4%.
 74  *
 75  * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
 76  * given time *one* of the tasks is delayed due to a lack of memory.
 77  * Again, looking purely at the task state would yield a memory FULL
 78  * pressure number of 0%, since *somebody* is always making forward
 79  * progress. But again this wouldn't capture the amount of execution
 80  * potential lost, which is 1 out of 4 CPUs, or 25%.
 81  *
 82  * To calculate wasted potential (pressure) with multiple processors,
 83  * we have to base our calculation on the number of non-idle tasks in
 84  * conjunction with the number of available CPUs, which is the number
 85  * of potential execution threads. SOME becomes then the proportion of
 86  * delayed tasks to possible threads, and FULL is the share of possible
 87  * threads that are unproductive due to delays:
 88  *
 89  *      threads = min(nr_nonidle_tasks, nr_cpus)
 90  *         SOME = min(nr_delayed_tasks / threads, 1)
 91  *         FULL = (threads - min(nr_productive_tasks, threads)) / threads
 92  *
 93  * For the 257 number crunchers on 256 CPUs, this yields:
 94  *
 95  *      threads = min(257, 256)
 96  *         SOME = min(1 / 256, 1)             = 0.4%
 97  *         FULL = (256 - min(256, 256)) / 256 = 0%
 98  *
 99  * For the 1 out of 4 memory-delayed tasks, this yields:
100  *
101  *      threads = min(4, 4)
102  *         SOME = min(1 / 4, 1)               = 25%
103  *         FULL = (4 - min(3, 4)) / 4         = 25%
104  *
105  * [ Substitute nr_cpus with 1, and you can see that it's a natural
106  *   extension of the single-CPU model. ]
107  *
108  *                      Implementation
109  *
110  * To assess the precise time spent in each such state, we would have
111  * to freeze the system on task changes and start/stop the state
112  * clocks accordingly. Obviously that doesn't scale in practice.
113  *
114  * Because the scheduler aims to distribute the compute load evenly
115  * among the available CPUs, we can track task state locally to each
116  * CPU and, at much lower frequency, extrapolate the global state for
117  * the cumulative stall times and the running averages.
118  *
119  * For each runqueue, we track:
120  *
121  *         tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
122  *         tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_productive_tasks[cpu])
123  *      tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
124  *
125  * and then periodically aggregate:
126  *
127  *      tNONIDLE = sum(tNONIDLE[i])
128  *
129  *         tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
130  *         tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
131  *
132  *         %SOME = tSOME / period
133  *         %FULL = tFULL / period
134  *
135  * This gives us an approximation of pressure that is practical
136  * cost-wise, yet way more sensitive and accurate than periodic
137  * sampling of the aggregate task states would be.
138  */
139 
140 static int psi_bug __read_mostly;
141 
142 DEFINE_STATIC_KEY_FALSE(psi_disabled);
143 static DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
144 
145 #ifdef CONFIG_PSI_DEFAULT_DISABLED
146 static bool psi_enable;
147 #else
148 static bool psi_enable = true;
149 #endif
150 static int __init setup_psi(char *str)
151 {
152         return kstrtobool(str, &psi_enable) == 0;
153 }
154 __setup("psi=", setup_psi);
155 
156 /* Running averages - we need to be higher-res than loadavg */
157 #define PSI_FREQ        (2*HZ+1)        /* 2 sec intervals */
158 #define EXP_10s         1677            /* 1/exp(2s/10s) as fixed-point */
159 #define EXP_60s         1981            /* 1/exp(2s/60s) */
160 #define EXP_300s        2034            /* 1/exp(2s/300s) */
161 
162 /* PSI trigger definitions */
163 #define WINDOW_MAX_US 10000000  /* Max window size is 10s */
164 #define UPDATES_PER_WINDOW 10   /* 10 updates per window */
165 
166 /* Sampling frequency in nanoseconds */
167 static u64 psi_period __read_mostly;
168 
169 /* System-level pressure and stall tracking */
170 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
171 struct psi_group psi_system = {
172         .pcpu = &system_group_pcpu,
173 };
174 
175 static void psi_avgs_work(struct work_struct *work);
176 
177 static void poll_timer_fn(struct timer_list *t);
178 
179 static void group_init(struct psi_group *group)
180 {
181         int cpu;
182 
183         group->enabled = true;
184         for_each_possible_cpu(cpu)
185                 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
186         group->avg_last_update = sched_clock();
187         group->avg_next_update = group->avg_last_update + psi_period;
188         mutex_init(&group->avgs_lock);
189 
190         /* Init avg trigger-related members */
191         INIT_LIST_HEAD(&group->avg_triggers);
192         memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
193         INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
194 
195         /* Init rtpoll trigger-related members */
196         atomic_set(&group->rtpoll_scheduled, 0);
197         mutex_init(&group->rtpoll_trigger_lock);
198         INIT_LIST_HEAD(&group->rtpoll_triggers);
199         group->rtpoll_min_period = U32_MAX;
200         group->rtpoll_next_update = ULLONG_MAX;
201         init_waitqueue_head(&group->rtpoll_wait);
202         timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
203         rcu_assign_pointer(group->rtpoll_task, NULL);
204 }
205 
206 void __init psi_init(void)
207 {
208         if (!psi_enable) {
209                 static_branch_enable(&psi_disabled);
210                 static_branch_disable(&psi_cgroups_enabled);
211                 return;
212         }
213 
214         if (!cgroup_psi_enabled())
215                 static_branch_disable(&psi_cgroups_enabled);
216 
217         psi_period = jiffies_to_nsecs(PSI_FREQ);
218         group_init(&psi_system);
219 }
220 
221 static u32 test_states(unsigned int *tasks, u32 state_mask)
222 {
223         const bool oncpu = state_mask & PSI_ONCPU;
224 
225         if (tasks[NR_IOWAIT]) {
226                 state_mask |= BIT(PSI_IO_SOME);
227                 if (!tasks[NR_RUNNING])
228                         state_mask |= BIT(PSI_IO_FULL);
229         }
230 
231         if (tasks[NR_MEMSTALL]) {
232                 state_mask |= BIT(PSI_MEM_SOME);
233                 if (tasks[NR_RUNNING] == tasks[NR_MEMSTALL_RUNNING])
234                         state_mask |= BIT(PSI_MEM_FULL);
235         }
236 
237         if (tasks[NR_RUNNING] > oncpu)
238                 state_mask |= BIT(PSI_CPU_SOME);
239 
240         if (tasks[NR_RUNNING] && !oncpu)
241                 state_mask |= BIT(PSI_CPU_FULL);
242 
243         if (tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] || tasks[NR_RUNNING])
244                 state_mask |= BIT(PSI_NONIDLE);
245 
246         return state_mask;
247 }
248 
249 static void get_recent_times(struct psi_group *group, int cpu,
250                              enum psi_aggregators aggregator, u32 *times,
251                              u32 *pchanged_states)
252 {
253         struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
254         int current_cpu = raw_smp_processor_id();
255         unsigned int tasks[NR_PSI_TASK_COUNTS];
256         u64 now, state_start;
257         enum psi_states s;
258         unsigned int seq;
259         u32 state_mask;
260 
261         *pchanged_states = 0;
262 
263         /* Snapshot a coherent view of the CPU state */
264         do {
265                 seq = read_seqcount_begin(&groupc->seq);
266                 now = cpu_clock(cpu);
267                 memcpy(times, groupc->times, sizeof(groupc->times));
268                 state_mask = groupc->state_mask;
269                 state_start = groupc->state_start;
270                 if (cpu == current_cpu)
271                         memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
272         } while (read_seqcount_retry(&groupc->seq, seq));
273 
274         /* Calculate state time deltas against the previous snapshot */
275         for (s = 0; s < NR_PSI_STATES; s++) {
276                 u32 delta;
277                 /*
278                  * In addition to already concluded states, we also
279                  * incorporate currently active states on the CPU,
280                  * since states may last for many sampling periods.
281                  *
282                  * This way we keep our delta sampling buckets small
283                  * (u32) and our reported pressure close to what's
284                  * actually happening.
285                  */
286                 if (state_mask & (1 << s))
287                         times[s] += now - state_start;
288 
289                 delta = times[s] - groupc->times_prev[aggregator][s];
290                 groupc->times_prev[aggregator][s] = times[s];
291 
292                 times[s] = delta;
293                 if (delta)
294                         *pchanged_states |= (1 << s);
295         }
296 
297         /*
298          * When collect_percpu_times() from the avgs_work, we don't want to
299          * re-arm avgs_work when all CPUs are IDLE. But the current CPU running
300          * this avgs_work is never IDLE, cause avgs_work can't be shut off.
301          * So for the current CPU, we need to re-arm avgs_work only when
302          * (NR_RUNNING > 1 || NR_IOWAIT > 0 || NR_MEMSTALL > 0), for other CPUs
303          * we can just check PSI_NONIDLE delta.
304          */
305         if (current_work() == &group->avgs_work.work) {
306                 bool reschedule;
307 
308                 if (cpu == current_cpu)
309                         reschedule = tasks[NR_RUNNING] +
310                                      tasks[NR_IOWAIT] +
311                                      tasks[NR_MEMSTALL] > 1;
312                 else
313                         reschedule = *pchanged_states & (1 << PSI_NONIDLE);
314 
315                 if (reschedule)
316                         *pchanged_states |= PSI_STATE_RESCHEDULE;
317         }
318 }
319 
320 static void calc_avgs(unsigned long avg[3], int missed_periods,
321                       u64 time, u64 period)
322 {
323         unsigned long pct;
324 
325         /* Fill in zeroes for periods of no activity */
326         if (missed_periods) {
327                 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
328                 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
329                 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
330         }
331 
332         /* Sample the most recent active period */
333         pct = div_u64(time * 100, period);
334         pct *= FIXED_1;
335         avg[0] = calc_load(avg[0], EXP_10s, pct);
336         avg[1] = calc_load(avg[1], EXP_60s, pct);
337         avg[2] = calc_load(avg[2], EXP_300s, pct);
338 }
339 
340 static void collect_percpu_times(struct psi_group *group,
341                                  enum psi_aggregators aggregator,
342                                  u32 *pchanged_states)
343 {
344         u64 deltas[NR_PSI_STATES - 1] = { 0, };
345         unsigned long nonidle_total = 0;
346         u32 changed_states = 0;
347         int cpu;
348         int s;
349 
350         /*
351          * Collect the per-cpu time buckets and average them into a
352          * single time sample that is normalized to wall clock time.
353          *
354          * For averaging, each CPU is weighted by its non-idle time in
355          * the sampling period. This eliminates artifacts from uneven
356          * loading, or even entirely idle CPUs.
357          */
358         for_each_possible_cpu(cpu) {
359                 u32 times[NR_PSI_STATES];
360                 u32 nonidle;
361                 u32 cpu_changed_states;
362 
363                 get_recent_times(group, cpu, aggregator, times,
364                                 &cpu_changed_states);
365                 changed_states |= cpu_changed_states;
366 
367                 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
368                 nonidle_total += nonidle;
369 
370                 for (s = 0; s < PSI_NONIDLE; s++)
371                         deltas[s] += (u64)times[s] * nonidle;
372         }
373 
374         /*
375          * Integrate the sample into the running statistics that are
376          * reported to userspace: the cumulative stall times and the
377          * decaying averages.
378          *
379          * Pressure percentages are sampled at PSI_FREQ. We might be
380          * called more often when the user polls more frequently than
381          * that; we might be called less often when there is no task
382          * activity, thus no data, and clock ticks are sporadic. The
383          * below handles both.
384          */
385 
386         /* total= */
387         for (s = 0; s < NR_PSI_STATES - 1; s++)
388                 group->total[aggregator][s] +=
389                                 div_u64(deltas[s], max(nonidle_total, 1UL));
390 
391         if (pchanged_states)
392                 *pchanged_states = changed_states;
393 }
394 
395 /* Trigger tracking window manipulations */
396 static void window_reset(struct psi_window *win, u64 now, u64 value,
397                          u64 prev_growth)
398 {
399         win->start_time = now;
400         win->start_value = value;
401         win->prev_growth = prev_growth;
402 }
403 
404 /*
405  * PSI growth tracking window update and growth calculation routine.
406  *
407  * This approximates a sliding tracking window by interpolating
408  * partially elapsed windows using historical growth data from the
409  * previous intervals. This minimizes memory requirements (by not storing
410  * all the intermediate values in the previous window) and simplifies
411  * the calculations. It works well because PSI signal changes only in
412  * positive direction and over relatively small window sizes the growth
413  * is close to linear.
414  */
415 static u64 window_update(struct psi_window *win, u64 now, u64 value)
416 {
417         u64 elapsed;
418         u64 growth;
419 
420         elapsed = now - win->start_time;
421         growth = value - win->start_value;
422         /*
423          * After each tracking window passes win->start_value and
424          * win->start_time get reset and win->prev_growth stores
425          * the average per-window growth of the previous window.
426          * win->prev_growth is then used to interpolate additional
427          * growth from the previous window assuming it was linear.
428          */
429         if (elapsed > win->size)
430                 window_reset(win, now, value, growth);
431         else {
432                 u32 remaining;
433 
434                 remaining = win->size - elapsed;
435                 growth += div64_u64(win->prev_growth * remaining, win->size);
436         }
437 
438         return growth;
439 }
440 
441 static void update_triggers(struct psi_group *group, u64 now,
442                                                    enum psi_aggregators aggregator)
443 {
444         struct psi_trigger *t;
445         u64 *total = group->total[aggregator];
446         struct list_head *triggers;
447         u64 *aggregator_total;
448 
449         if (aggregator == PSI_AVGS) {
450                 triggers = &group->avg_triggers;
451                 aggregator_total = group->avg_total;
452         } else {
453                 triggers = &group->rtpoll_triggers;
454                 aggregator_total = group->rtpoll_total;
455         }
456 
457         /*
458          * On subsequent updates, calculate growth deltas and let
459          * watchers know when their specified thresholds are exceeded.
460          */
461         list_for_each_entry(t, triggers, node) {
462                 u64 growth;
463                 bool new_stall;
464 
465                 new_stall = aggregator_total[t->state] != total[t->state];
466 
467                 /* Check for stall activity or a previous threshold breach */
468                 if (!new_stall && !t->pending_event)
469                         continue;
470                 /*
471                  * Check for new stall activity, as well as deferred
472                  * events that occurred in the last window after the
473                  * trigger had already fired (we want to ratelimit
474                  * events without dropping any).
475                  */
476                 if (new_stall) {
477                         /* Calculate growth since last update */
478                         growth = window_update(&t->win, now, total[t->state]);
479                         if (!t->pending_event) {
480                                 if (growth < t->threshold)
481                                         continue;
482 
483                                 t->pending_event = true;
484                         }
485                 }
486                 /* Limit event signaling to once per window */
487                 if (now < t->last_event_time + t->win.size)
488                         continue;
489 
490                 /* Generate an event */
491                 if (cmpxchg(&t->event, 0, 1) == 0) {
492                         if (t->of)
493                                 kernfs_notify(t->of->kn);
494                         else
495                                 wake_up_interruptible(&t->event_wait);
496                 }
497                 t->last_event_time = now;
498                 /* Reset threshold breach flag once event got generated */
499                 t->pending_event = false;
500         }
501 }
502 
503 static u64 update_averages(struct psi_group *group, u64 now)
504 {
505         unsigned long missed_periods = 0;
506         u64 expires, period;
507         u64 avg_next_update;
508         int s;
509 
510         /* avgX= */
511         expires = group->avg_next_update;
512         if (now - expires >= psi_period)
513                 missed_periods = div_u64(now - expires, psi_period);
514 
515         /*
516          * The periodic clock tick can get delayed for various
517          * reasons, especially on loaded systems. To avoid clock
518          * drift, we schedule the clock in fixed psi_period intervals.
519          * But the deltas we sample out of the per-cpu buckets above
520          * are based on the actual time elapsing between clock ticks.
521          */
522         avg_next_update = expires + ((1 + missed_periods) * psi_period);
523         period = now - (group->avg_last_update + (missed_periods * psi_period));
524         group->avg_last_update = now;
525 
526         for (s = 0; s < NR_PSI_STATES - 1; s++) {
527                 u32 sample;
528 
529                 sample = group->total[PSI_AVGS][s] - group->avg_total[s];
530                 /*
531                  * Due to the lockless sampling of the time buckets,
532                  * recorded time deltas can slip into the next period,
533                  * which under full pressure can result in samples in
534                  * excess of the period length.
535                  *
536                  * We don't want to report non-sensical pressures in
537                  * excess of 100%, nor do we want to drop such events
538                  * on the floor. Instead we punt any overage into the
539                  * future until pressure subsides. By doing this we
540                  * don't underreport the occurring pressure curve, we
541                  * just report it delayed by one period length.
542                  *
543                  * The error isn't cumulative. As soon as another
544                  * delta slips from a period P to P+1, by definition
545                  * it frees up its time T in P.
546                  */
547                 if (sample > period)
548                         sample = period;
549                 group->avg_total[s] += sample;
550                 calc_avgs(group->avg[s], missed_periods, sample, period);
551         }
552 
553         return avg_next_update;
554 }
555 
556 static void psi_avgs_work(struct work_struct *work)
557 {
558         struct delayed_work *dwork;
559         struct psi_group *group;
560         u32 changed_states;
561         u64 now;
562 
563         dwork = to_delayed_work(work);
564         group = container_of(dwork, struct psi_group, avgs_work);
565 
566         mutex_lock(&group->avgs_lock);
567 
568         now = sched_clock();
569 
570         collect_percpu_times(group, PSI_AVGS, &changed_states);
571         /*
572          * If there is task activity, periodically fold the per-cpu
573          * times and feed samples into the running averages. If things
574          * are idle and there is no data to process, stop the clock.
575          * Once restarted, we'll catch up the running averages in one
576          * go - see calc_avgs() and missed_periods.
577          */
578         if (now >= group->avg_next_update) {
579                 update_triggers(group, now, PSI_AVGS);
580                 group->avg_next_update = update_averages(group, now);
581         }
582 
583         if (changed_states & PSI_STATE_RESCHEDULE) {
584                 schedule_delayed_work(dwork, nsecs_to_jiffies(
585                                 group->avg_next_update - now) + 1);
586         }
587 
588         mutex_unlock(&group->avgs_lock);
589 }
590 
591 static void init_rtpoll_triggers(struct psi_group *group, u64 now)
592 {
593         struct psi_trigger *t;
594 
595         list_for_each_entry(t, &group->rtpoll_triggers, node)
596                 window_reset(&t->win, now,
597                                 group->total[PSI_POLL][t->state], 0);
598         memcpy(group->rtpoll_total, group->total[PSI_POLL],
599                    sizeof(group->rtpoll_total));
600         group->rtpoll_next_update = now + group->rtpoll_min_period;
601 }
602 
603 /* Schedule rtpolling if it's not already scheduled or forced. */
604 static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
605                                    bool force)
606 {
607         struct task_struct *task;
608 
609         /*
610          * atomic_xchg should be called even when !force to provide a
611          * full memory barrier (see the comment inside psi_rtpoll_work).
612          */
613         if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
614                 return;
615 
616         rcu_read_lock();
617 
618         task = rcu_dereference(group->rtpoll_task);
619         /*
620          * kworker might be NULL in case psi_trigger_destroy races with
621          * psi_task_change (hotpath) which can't use locks
622          */
623         if (likely(task))
624                 mod_timer(&group->rtpoll_timer, jiffies + delay);
625         else
626                 atomic_set(&group->rtpoll_scheduled, 0);
627 
628         rcu_read_unlock();
629 }
630 
631 static void psi_rtpoll_work(struct psi_group *group)
632 {
633         bool force_reschedule = false;
634         u32 changed_states;
635         u64 now;
636 
637         mutex_lock(&group->rtpoll_trigger_lock);
638 
639         now = sched_clock();
640 
641         if (now > group->rtpoll_until) {
642                 /*
643                  * We are either about to start or might stop rtpolling if no
644                  * state change was recorded. Resetting rtpoll_scheduled leaves
645                  * a small window for psi_group_change to sneak in and schedule
646                  * an immediate rtpoll_work before we get to rescheduling. One
647                  * potential extra wakeup at the end of the rtpolling window
648                  * should be negligible and rtpoll_next_update still keeps
649                  * updates correctly on schedule.
650                  */
651                 atomic_set(&group->rtpoll_scheduled, 0);
652                 /*
653                  * A task change can race with the rtpoll worker that is supposed to
654                  * report on it. To avoid missing events, ensure ordering between
655                  * rtpoll_scheduled and the task state accesses, such that if the
656                  * rtpoll worker misses the state update, the task change is
657                  * guaranteed to reschedule the rtpoll worker:
658                  *
659                  * rtpoll worker:
660                  *   atomic_set(rtpoll_scheduled, 0)
661                  *   smp_mb()
662                  *   LOAD states
663                  *
664                  * task change:
665                  *   STORE states
666                  *   if atomic_xchg(rtpoll_scheduled, 1) == 0:
667                  *     schedule rtpoll worker
668                  *
669                  * The atomic_xchg() implies a full barrier.
670                  */
671                 smp_mb();
672         } else {
673                 /* The rtpolling window is not over, keep rescheduling */
674                 force_reschedule = true;
675         }
676 
677 
678         collect_percpu_times(group, PSI_POLL, &changed_states);
679 
680         if (changed_states & group->rtpoll_states) {
681                 /* Initialize trigger windows when entering rtpolling mode */
682                 if (now > group->rtpoll_until)
683                         init_rtpoll_triggers(group, now);
684 
685                 /*
686                  * Keep the monitor active for at least the duration of the
687                  * minimum tracking window as long as monitor states are
688                  * changing.
689                  */
690                 group->rtpoll_until = now +
691                         group->rtpoll_min_period * UPDATES_PER_WINDOW;
692         }
693 
694         if (now > group->rtpoll_until) {
695                 group->rtpoll_next_update = ULLONG_MAX;
696                 goto out;
697         }
698 
699         if (now >= group->rtpoll_next_update) {
700                 if (changed_states & group->rtpoll_states) {
701                         update_triggers(group, now, PSI_POLL);
702                         memcpy(group->rtpoll_total, group->total[PSI_POLL],
703                                    sizeof(group->rtpoll_total));
704                 }
705                 group->rtpoll_next_update = now + group->rtpoll_min_period;
706         }
707 
708         psi_schedule_rtpoll_work(group,
709                 nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
710                 force_reschedule);
711 
712 out:
713         mutex_unlock(&group->rtpoll_trigger_lock);
714 }
715 
716 static int psi_rtpoll_worker(void *data)
717 {
718         struct psi_group *group = (struct psi_group *)data;
719 
720         sched_set_fifo_low(current);
721 
722         while (true) {
723                 wait_event_interruptible(group->rtpoll_wait,
724                                 atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
725                                 kthread_should_stop());
726                 if (kthread_should_stop())
727                         break;
728 
729                 psi_rtpoll_work(group);
730         }
731         return 0;
732 }
733 
734 static void poll_timer_fn(struct timer_list *t)
735 {
736         struct psi_group *group = from_timer(group, t, rtpoll_timer);
737 
738         atomic_set(&group->rtpoll_wakeup, 1);
739         wake_up_interruptible(&group->rtpoll_wait);
740 }
741 
742 static void record_times(struct psi_group_cpu *groupc, u64 now)
743 {
744         u32 delta;
745 
746         delta = now - groupc->state_start;
747         groupc->state_start = now;
748 
749         if (groupc->state_mask & (1 << PSI_IO_SOME)) {
750                 groupc->times[PSI_IO_SOME] += delta;
751                 if (groupc->state_mask & (1 << PSI_IO_FULL))
752                         groupc->times[PSI_IO_FULL] += delta;
753         }
754 
755         if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
756                 groupc->times[PSI_MEM_SOME] += delta;
757                 if (groupc->state_mask & (1 << PSI_MEM_FULL))
758                         groupc->times[PSI_MEM_FULL] += delta;
759         }
760 
761         if (groupc->state_mask & (1 << PSI_CPU_SOME)) {
762                 groupc->times[PSI_CPU_SOME] += delta;
763                 if (groupc->state_mask & (1 << PSI_CPU_FULL))
764                         groupc->times[PSI_CPU_FULL] += delta;
765         }
766 
767         if (groupc->state_mask & (1 << PSI_NONIDLE))
768                 groupc->times[PSI_NONIDLE] += delta;
769 }
770 
771 static void psi_group_change(struct psi_group *group, int cpu,
772                              unsigned int clear, unsigned int set,
773                              bool wake_clock)
774 {
775         struct psi_group_cpu *groupc;
776         unsigned int t, m;
777         u32 state_mask;
778         u64 now;
779 
780         lockdep_assert_rq_held(cpu_rq(cpu));
781         groupc = per_cpu_ptr(group->pcpu, cpu);
782 
783         /*
784          * First we update the task counts according to the state
785          * change requested through the @clear and @set bits.
786          *
787          * Then if the cgroup PSI stats accounting enabled, we
788          * assess the aggregate resource states this CPU's tasks
789          * have been in since the last change, and account any
790          * SOME and FULL time these may have resulted in.
791          */
792         write_seqcount_begin(&groupc->seq);
793         now = cpu_clock(cpu);
794 
795         /*
796          * Start with TSK_ONCPU, which doesn't have a corresponding
797          * task count - it's just a boolean flag directly encoded in
798          * the state mask. Clear, set, or carry the current state if
799          * no changes are requested.
800          */
801         if (unlikely(clear & TSK_ONCPU)) {
802                 state_mask = 0;
803                 clear &= ~TSK_ONCPU;
804         } else if (unlikely(set & TSK_ONCPU)) {
805                 state_mask = PSI_ONCPU;
806                 set &= ~TSK_ONCPU;
807         } else {
808                 state_mask = groupc->state_mask & PSI_ONCPU;
809         }
810 
811         /*
812          * The rest of the state mask is calculated based on the task
813          * counts. Update those first, then construct the mask.
814          */
815         for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
816                 if (!(m & (1 << t)))
817                         continue;
818                 if (groupc->tasks[t]) {
819                         groupc->tasks[t]--;
820                 } else if (!psi_bug) {
821                         printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
822                                         cpu, t, groupc->tasks[0],
823                                         groupc->tasks[1], groupc->tasks[2],
824                                         groupc->tasks[3], clear, set);
825                         psi_bug = 1;
826                 }
827         }
828 
829         for (t = 0; set; set &= ~(1 << t), t++)
830                 if (set & (1 << t))
831                         groupc->tasks[t]++;
832 
833         if (!group->enabled) {
834                 /*
835                  * On the first group change after disabling PSI, conclude
836                  * the current state and flush its time. This is unlikely
837                  * to matter to the user, but aggregation (get_recent_times)
838                  * may have already incorporated the live state into times_prev;
839                  * avoid a delta sample underflow when PSI is later re-enabled.
840                  */
841                 if (unlikely(groupc->state_mask & (1 << PSI_NONIDLE)))
842                         record_times(groupc, now);
843 
844                 groupc->state_mask = state_mask;
845 
846                 write_seqcount_end(&groupc->seq);
847                 return;
848         }
849 
850         state_mask = test_states(groupc->tasks, state_mask);
851 
852         /*
853          * Since we care about lost potential, a memstall is FULL
854          * when there are no other working tasks, but also when
855          * the CPU is actively reclaiming and nothing productive
856          * could run even if it were runnable. So when the current
857          * task in a cgroup is in_memstall, the corresponding groupc
858          * on that cpu is in PSI_MEM_FULL state.
859          */
860         if (unlikely((state_mask & PSI_ONCPU) && cpu_curr(cpu)->in_memstall))
861                 state_mask |= (1 << PSI_MEM_FULL);
862 
863         record_times(groupc, now);
864 
865         groupc->state_mask = state_mask;
866 
867         write_seqcount_end(&groupc->seq);
868 
869         if (state_mask & group->rtpoll_states)
870                 psi_schedule_rtpoll_work(group, 1, false);
871 
872         if (wake_clock && !delayed_work_pending(&group->avgs_work))
873                 schedule_delayed_work(&group->avgs_work, PSI_FREQ);
874 }
875 
876 static inline struct psi_group *task_psi_group(struct task_struct *task)
877 {
878 #ifdef CONFIG_CGROUPS
879         if (static_branch_likely(&psi_cgroups_enabled))
880                 return cgroup_psi(task_dfl_cgroup(task));
881 #endif
882         return &psi_system;
883 }
884 
885 static void psi_flags_change(struct task_struct *task, int clear, int set)
886 {
887         if (((task->psi_flags & set) ||
888              (task->psi_flags & clear) != clear) &&
889             !psi_bug) {
890                 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
891                                 task->pid, task->comm, task_cpu(task),
892                                 task->psi_flags, clear, set);
893                 psi_bug = 1;
894         }
895 
896         task->psi_flags &= ~clear;
897         task->psi_flags |= set;
898 }
899 
900 void psi_task_change(struct task_struct *task, int clear, int set)
901 {
902         int cpu = task_cpu(task);
903         struct psi_group *group;
904 
905         if (!task->pid)
906                 return;
907 
908         psi_flags_change(task, clear, set);
909 
910         group = task_psi_group(task);
911         do {
912                 psi_group_change(group, cpu, clear, set, true);
913         } while ((group = group->parent));
914 }
915 
916 void psi_task_switch(struct task_struct *prev, struct task_struct *next,
917                      bool sleep)
918 {
919         struct psi_group *group, *common = NULL;
920         int cpu = task_cpu(prev);
921 
922         if (next->pid) {
923                 psi_flags_change(next, 0, TSK_ONCPU);
924                 /*
925                  * Set TSK_ONCPU on @next's cgroups. If @next shares any
926                  * ancestors with @prev, those will already have @prev's
927                  * TSK_ONCPU bit set, and we can stop the iteration there.
928                  */
929                 group = task_psi_group(next);
930                 do {
931                         if (per_cpu_ptr(group->pcpu, cpu)->state_mask &
932                             PSI_ONCPU) {
933                                 common = group;
934                                 break;
935                         }
936 
937                         psi_group_change(group, cpu, 0, TSK_ONCPU, true);
938                 } while ((group = group->parent));
939         }
940 
941         if (prev->pid) {
942                 int clear = TSK_ONCPU, set = 0;
943                 bool wake_clock = true;
944 
945                 /*
946                  * When we're going to sleep, psi_dequeue() lets us
947                  * handle TSK_RUNNING, TSK_MEMSTALL_RUNNING and
948                  * TSK_IOWAIT here, where we can combine it with
949                  * TSK_ONCPU and save walking common ancestors twice.
950                  */
951                 if (sleep) {
952                         clear |= TSK_RUNNING;
953                         if (prev->in_memstall)
954                                 clear |= TSK_MEMSTALL_RUNNING;
955                         if (prev->in_iowait)
956                                 set |= TSK_IOWAIT;
957 
958                         /*
959                          * Periodic aggregation shuts off if there is a period of no
960                          * task changes, so we wake it back up if necessary. However,
961                          * don't do this if the task change is the aggregation worker
962                          * itself going to sleep, or we'll ping-pong forever.
963                          */
964                         if (unlikely((prev->flags & PF_WQ_WORKER) &&
965                                      wq_worker_last_func(prev) == psi_avgs_work))
966                                 wake_clock = false;
967                 }
968 
969                 psi_flags_change(prev, clear, set);
970 
971                 group = task_psi_group(prev);
972                 do {
973                         if (group == common)
974                                 break;
975                         psi_group_change(group, cpu, clear, set, wake_clock);
976                 } while ((group = group->parent));
977 
978                 /*
979                  * TSK_ONCPU is handled up to the common ancestor. If there are
980                  * any other differences between the two tasks (e.g. prev goes
981                  * to sleep, or only one task is memstall), finish propagating
982                  * those differences all the way up to the root.
983                  */
984                 if ((prev->psi_flags ^ next->psi_flags) & ~TSK_ONCPU) {
985                         clear &= ~TSK_ONCPU;
986                         for (; group; group = group->parent)
987                                 psi_group_change(group, cpu, clear, set, wake_clock);
988                 }
989         }
990 }
991 
992 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
993 void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev)
994 {
995         int cpu = task_cpu(curr);
996         struct psi_group *group;
997         struct psi_group_cpu *groupc;
998         s64 delta;
999         u64 irq;
1000 
1001         if (static_branch_likely(&psi_disabled))
1002                 return;
1003 
1004         if (!curr->pid)
1005                 return;
1006 
1007         lockdep_assert_rq_held(rq);
1008         group = task_psi_group(curr);
1009         if (prev && task_psi_group(prev) == group)
1010                 return;
1011 
1012         irq = irq_time_read(cpu);
1013         delta = (s64)(irq - rq->psi_irq_time);
1014         if (delta < 0)
1015                 return;
1016         rq->psi_irq_time = irq;
1017 
1018         do {
1019                 u64 now;
1020 
1021                 if (!group->enabled)
1022                         continue;
1023 
1024                 groupc = per_cpu_ptr(group->pcpu, cpu);
1025 
1026                 write_seqcount_begin(&groupc->seq);
1027                 now = cpu_clock(cpu);
1028 
1029                 record_times(groupc, now);
1030                 groupc->times[PSI_IRQ_FULL] += delta;
1031 
1032                 write_seqcount_end(&groupc->seq);
1033 
1034                 if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
1035                         psi_schedule_rtpoll_work(group, 1, false);
1036         } while ((group = group->parent));
1037 }
1038 #endif
1039 
1040 /**
1041  * psi_memstall_enter - mark the beginning of a memory stall section
1042  * @flags: flags to handle nested sections
1043  *
1044  * Marks the calling task as being stalled due to a lack of memory,
1045  * such as waiting for a refault or performing reclaim.
1046  */
1047 void psi_memstall_enter(unsigned long *flags)
1048 {
1049         struct rq_flags rf;
1050         struct rq *rq;
1051 
1052         if (static_branch_likely(&psi_disabled))
1053                 return;
1054 
1055         *flags = current->in_memstall;
1056         if (*flags)
1057                 return;
1058         /*
1059          * in_memstall setting & accounting needs to be atomic wrt
1060          * changes to the task's scheduling state, otherwise we can
1061          * race with CPU migration.
1062          */
1063         rq = this_rq_lock_irq(&rf);
1064 
1065         current->in_memstall = 1;
1066         psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
1067 
1068         rq_unlock_irq(rq, &rf);
1069 }
1070 EXPORT_SYMBOL_GPL(psi_memstall_enter);
1071 
1072 /**
1073  * psi_memstall_leave - mark the end of an memory stall section
1074  * @flags: flags to handle nested memdelay sections
1075  *
1076  * Marks the calling task as no longer stalled due to lack of memory.
1077  */
1078 void psi_memstall_leave(unsigned long *flags)
1079 {
1080         struct rq_flags rf;
1081         struct rq *rq;
1082 
1083         if (static_branch_likely(&psi_disabled))
1084                 return;
1085 
1086         if (*flags)
1087                 return;
1088         /*
1089          * in_memstall clearing & accounting needs to be atomic wrt
1090          * changes to the task's scheduling state, otherwise we could
1091          * race with CPU migration.
1092          */
1093         rq = this_rq_lock_irq(&rf);
1094 
1095         current->in_memstall = 0;
1096         psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
1097 
1098         rq_unlock_irq(rq, &rf);
1099 }
1100 EXPORT_SYMBOL_GPL(psi_memstall_leave);
1101 
1102 #ifdef CONFIG_CGROUPS
1103 int psi_cgroup_alloc(struct cgroup *cgroup)
1104 {
1105         if (!static_branch_likely(&psi_cgroups_enabled))
1106                 return 0;
1107 
1108         cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
1109         if (!cgroup->psi)
1110                 return -ENOMEM;
1111 
1112         cgroup->psi->pcpu = alloc_percpu(struct psi_group_cpu);
1113         if (!cgroup->psi->pcpu) {
1114                 kfree(cgroup->psi);
1115                 return -ENOMEM;
1116         }
1117         group_init(cgroup->psi);
1118         cgroup->psi->parent = cgroup_psi(cgroup_parent(cgroup));
1119         return 0;
1120 }
1121 
1122 void psi_cgroup_free(struct cgroup *cgroup)
1123 {
1124         if (!static_branch_likely(&psi_cgroups_enabled))
1125                 return;
1126 
1127         cancel_delayed_work_sync(&cgroup->psi->avgs_work);
1128         free_percpu(cgroup->psi->pcpu);
1129         /* All triggers must be removed by now */
1130         WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
1131         kfree(cgroup->psi);
1132 }
1133 
1134 /**
1135  * cgroup_move_task - move task to a different cgroup
1136  * @task: the task
1137  * @to: the target css_set
1138  *
1139  * Move task to a new cgroup and safely migrate its associated stall
1140  * state between the different groups.
1141  *
1142  * This function acquires the task's rq lock to lock out concurrent
1143  * changes to the task's scheduling state and - in case the task is
1144  * running - concurrent changes to its stall state.
1145  */
1146 void cgroup_move_task(struct task_struct *task, struct css_set *to)
1147 {
1148         unsigned int task_flags;
1149         struct rq_flags rf;
1150         struct rq *rq;
1151 
1152         if (!static_branch_likely(&psi_cgroups_enabled)) {
1153                 /*
1154                  * Lame to do this here, but the scheduler cannot be locked
1155                  * from the outside, so we move cgroups from inside sched/.
1156                  */
1157                 rcu_assign_pointer(task->cgroups, to);
1158                 return;
1159         }
1160 
1161         rq = task_rq_lock(task, &rf);
1162 
1163         /*
1164          * We may race with schedule() dropping the rq lock between
1165          * deactivating prev and switching to next. Because the psi
1166          * updates from the deactivation are deferred to the switch
1167          * callback to save cgroup tree updates, the task's scheduling
1168          * state here is not coherent with its psi state:
1169          *
1170          * schedule()                   cgroup_move_task()
1171          *   rq_lock()
1172          *   deactivate_task()
1173          *     p->on_rq = 0
1174          *     psi_dequeue() // defers TSK_RUNNING & TSK_IOWAIT updates
1175          *   pick_next_task()
1176          *     rq_unlock()
1177          *                                rq_lock()
1178          *                                psi_task_change() // old cgroup
1179          *                                task->cgroups = to
1180          *                                psi_task_change() // new cgroup
1181          *                                rq_unlock()
1182          *     rq_lock()
1183          *   psi_sched_switch() // does deferred updates in new cgroup
1184          *
1185          * Don't rely on the scheduling state. Use psi_flags instead.
1186          */
1187         task_flags = task->psi_flags;
1188 
1189         if (task_flags)
1190                 psi_task_change(task, task_flags, 0);
1191 
1192         /* See comment above */
1193         rcu_assign_pointer(task->cgroups, to);
1194 
1195         if (task_flags)
1196                 psi_task_change(task, 0, task_flags);
1197 
1198         task_rq_unlock(rq, task, &rf);
1199 }
1200 
1201 void psi_cgroup_restart(struct psi_group *group)
1202 {
1203         int cpu;
1204 
1205         /*
1206          * After we disable psi_group->enabled, we don't actually
1207          * stop percpu tasks accounting in each psi_group_cpu,
1208          * instead only stop test_states() loop, record_times()
1209          * and averaging worker, see psi_group_change() for details.
1210          *
1211          * When disable cgroup PSI, this function has nothing to sync
1212          * since cgroup pressure files are hidden and percpu psi_group_cpu
1213          * would see !psi_group->enabled and only do task accounting.
1214          *
1215          * When re-enable cgroup PSI, this function use psi_group_change()
1216          * to get correct state mask from test_states() loop on tasks[],
1217          * and restart groupc->state_start from now, use .clear = .set = 0
1218          * here since no task status really changed.
1219          */
1220         if (!group->enabled)
1221                 return;
1222 
1223         for_each_possible_cpu(cpu) {
1224                 struct rq *rq = cpu_rq(cpu);
1225                 struct rq_flags rf;
1226 
1227                 rq_lock_irq(rq, &rf);
1228                 psi_group_change(group, cpu, 0, 0, true);
1229                 rq_unlock_irq(rq, &rf);
1230         }
1231 }
1232 #endif /* CONFIG_CGROUPS */
1233 
1234 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
1235 {
1236         bool only_full = false;
1237         int full;
1238         u64 now;
1239 
1240         if (static_branch_likely(&psi_disabled))
1241                 return -EOPNOTSUPP;
1242 
1243         /* Update averages before reporting them */
1244         mutex_lock(&group->avgs_lock);
1245         now = sched_clock();
1246         collect_percpu_times(group, PSI_AVGS, NULL);
1247         if (now >= group->avg_next_update)
1248                 group->avg_next_update = update_averages(group, now);
1249         mutex_unlock(&group->avgs_lock);
1250 
1251 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1252         only_full = res == PSI_IRQ;
1253 #endif
1254 
1255         for (full = 0; full < 2 - only_full; full++) {
1256                 unsigned long avg[3] = { 0, };
1257                 u64 total = 0;
1258                 int w;
1259 
1260                 /* CPU FULL is undefined at the system level */
1261                 if (!(group == &psi_system && res == PSI_CPU && full)) {
1262                         for (w = 0; w < 3; w++)
1263                                 avg[w] = group->avg[res * 2 + full][w];
1264                         total = div_u64(group->total[PSI_AVGS][res * 2 + full],
1265                                         NSEC_PER_USEC);
1266                 }
1267 
1268                 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
1269                            full || only_full ? "full" : "some",
1270                            LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
1271                            LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
1272                            LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
1273                            total);
1274         }
1275 
1276         return 0;
1277 }
1278 
1279 struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
1280                                        enum psi_res res, struct file *file,
1281                                        struct kernfs_open_file *of)
1282 {
1283         struct psi_trigger *t;
1284         enum psi_states state;
1285         u32 threshold_us;
1286         bool privileged;
1287         u32 window_us;
1288 
1289         if (static_branch_likely(&psi_disabled))
1290                 return ERR_PTR(-EOPNOTSUPP);
1291 
1292         /*
1293          * Checking the privilege here on file->f_cred implies that a privileged user
1294          * could open the file and delegate the write to an unprivileged one.
1295          */
1296         privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
1297 
1298         if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
1299                 state = PSI_IO_SOME + res * 2;
1300         else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
1301                 state = PSI_IO_FULL + res * 2;
1302         else
1303                 return ERR_PTR(-EINVAL);
1304 
1305 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1306         if (res == PSI_IRQ && --state != PSI_IRQ_FULL)
1307                 return ERR_PTR(-EINVAL);
1308 #endif
1309 
1310         if (state >= PSI_NONIDLE)
1311                 return ERR_PTR(-EINVAL);
1312 
1313         if (window_us == 0 || window_us > WINDOW_MAX_US)
1314                 return ERR_PTR(-EINVAL);
1315 
1316         /*
1317          * Unprivileged users can only use 2s windows so that averages aggregation
1318          * work is used, and no RT threads need to be spawned.
1319          */
1320         if (!privileged && window_us % 2000000)
1321                 return ERR_PTR(-EINVAL);
1322 
1323         /* Check threshold */
1324         if (threshold_us == 0 || threshold_us > window_us)
1325                 return ERR_PTR(-EINVAL);
1326 
1327         t = kmalloc(sizeof(*t), GFP_KERNEL);
1328         if (!t)
1329                 return ERR_PTR(-ENOMEM);
1330 
1331         t->group = group;
1332         t->state = state;
1333         t->threshold = threshold_us * NSEC_PER_USEC;
1334         t->win.size = window_us * NSEC_PER_USEC;
1335         window_reset(&t->win, sched_clock(),
1336                         group->total[PSI_POLL][t->state], 0);
1337 
1338         t->event = 0;
1339         t->last_event_time = 0;
1340         t->of = of;
1341         if (!of)
1342                 init_waitqueue_head(&t->event_wait);
1343         t->pending_event = false;
1344         t->aggregator = privileged ? PSI_POLL : PSI_AVGS;
1345 
1346         if (privileged) {
1347                 mutex_lock(&group->rtpoll_trigger_lock);
1348 
1349                 if (!rcu_access_pointer(group->rtpoll_task)) {
1350                         struct task_struct *task;
1351 
1352                         task = kthread_create(psi_rtpoll_worker, group, "psimon");
1353                         if (IS_ERR(task)) {
1354                                 kfree(t);
1355                                 mutex_unlock(&group->rtpoll_trigger_lock);
1356                                 return ERR_CAST(task);
1357                         }
1358                         atomic_set(&group->rtpoll_wakeup, 0);
1359                         wake_up_process(task);
1360                         rcu_assign_pointer(group->rtpoll_task, task);
1361                 }
1362 
1363                 list_add(&t->node, &group->rtpoll_triggers);
1364                 group->rtpoll_min_period = min(group->rtpoll_min_period,
1365                         div_u64(t->win.size, UPDATES_PER_WINDOW));
1366                 group->rtpoll_nr_triggers[t->state]++;
1367                 group->rtpoll_states |= (1 << t->state);
1368 
1369                 mutex_unlock(&group->rtpoll_trigger_lock);
1370         } else {
1371                 mutex_lock(&group->avgs_lock);
1372 
1373                 list_add(&t->node, &group->avg_triggers);
1374                 group->avg_nr_triggers[t->state]++;
1375 
1376                 mutex_unlock(&group->avgs_lock);
1377         }
1378         return t;
1379 }
1380 
1381 void psi_trigger_destroy(struct psi_trigger *t)
1382 {
1383         struct psi_group *group;
1384         struct task_struct *task_to_destroy = NULL;
1385 
1386         /*
1387          * We do not check psi_disabled since it might have been disabled after
1388          * the trigger got created.
1389          */
1390         if (!t)
1391                 return;
1392 
1393         group = t->group;
1394         /*
1395          * Wakeup waiters to stop polling and clear the queue to prevent it from
1396          * being accessed later. Can happen if cgroup is deleted from under a
1397          * polling process.
1398          */
1399         if (t->of)
1400                 kernfs_notify(t->of->kn);
1401         else
1402                 wake_up_interruptible(&t->event_wait);
1403 
1404         if (t->aggregator == PSI_AVGS) {
1405                 mutex_lock(&group->avgs_lock);
1406                 if (!list_empty(&t->node)) {
1407                         list_del(&t->node);
1408                         group->avg_nr_triggers[t->state]--;
1409                 }
1410                 mutex_unlock(&group->avgs_lock);
1411         } else {
1412                 mutex_lock(&group->rtpoll_trigger_lock);
1413                 if (!list_empty(&t->node)) {
1414                         struct psi_trigger *tmp;
1415                         u64 period = ULLONG_MAX;
1416 
1417                         list_del(&t->node);
1418                         group->rtpoll_nr_triggers[t->state]--;
1419                         if (!group->rtpoll_nr_triggers[t->state])
1420                                 group->rtpoll_states &= ~(1 << t->state);
1421                         /*
1422                          * Reset min update period for the remaining triggers
1423                          * iff the destroying trigger had the min window size.
1424                          */
1425                         if (group->rtpoll_min_period == div_u64(t->win.size, UPDATES_PER_WINDOW)) {
1426                                 list_for_each_entry(tmp, &group->rtpoll_triggers, node)
1427                                         period = min(period, div_u64(tmp->win.size,
1428                                                         UPDATES_PER_WINDOW));
1429                                 group->rtpoll_min_period = period;
1430                         }
1431                         /* Destroy rtpoll_task when the last trigger is destroyed */
1432                         if (group->rtpoll_states == 0) {
1433                                 group->rtpoll_until = 0;
1434                                 task_to_destroy = rcu_dereference_protected(
1435                                                 group->rtpoll_task,
1436                                                 lockdep_is_held(&group->rtpoll_trigger_lock));
1437                                 rcu_assign_pointer(group->rtpoll_task, NULL);
1438                                 del_timer(&group->rtpoll_timer);
1439                         }
1440                 }
1441                 mutex_unlock(&group->rtpoll_trigger_lock);
1442         }
1443 
1444         /*
1445          * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
1446          * critical section before destroying the trigger and optionally the
1447          * rtpoll_task.
1448          */
1449         synchronize_rcu();
1450         /*
1451          * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
1452          * a deadlock while waiting for psi_rtpoll_work to acquire
1453          * rtpoll_trigger_lock
1454          */
1455         if (task_to_destroy) {
1456                 /*
1457                  * After the RCU grace period has expired, the worker
1458                  * can no longer be found through group->rtpoll_task.
1459                  */
1460                 kthread_stop(task_to_destroy);
1461                 atomic_set(&group->rtpoll_scheduled, 0);
1462         }
1463         kfree(t);
1464 }
1465 
1466 __poll_t psi_trigger_poll(void **trigger_ptr,
1467                                 struct file *file, poll_table *wait)
1468 {
1469         __poll_t ret = DEFAULT_POLLMASK;
1470         struct psi_trigger *t;
1471 
1472         if (static_branch_likely(&psi_disabled))
1473                 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1474 
1475         t = smp_load_acquire(trigger_ptr);
1476         if (!t)
1477                 return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
1478 
1479         if (t->of)
1480                 kernfs_generic_poll(t->of, wait);
1481         else
1482                 poll_wait(file, &t->event_wait, wait);
1483 
1484         if (cmpxchg(&t->event, 1, 0) == 1)
1485                 ret |= EPOLLPRI;
1486 
1487         return ret;
1488 }
1489 
1490 #ifdef CONFIG_PROC_FS
1491 static int psi_io_show(struct seq_file *m, void *v)
1492 {
1493         return psi_show(m, &psi_system, PSI_IO);
1494 }
1495 
1496 static int psi_memory_show(struct seq_file *m, void *v)
1497 {
1498         return psi_show(m, &psi_system, PSI_MEM);
1499 }
1500 
1501 static int psi_cpu_show(struct seq_file *m, void *v)
1502 {
1503         return psi_show(m, &psi_system, PSI_CPU);
1504 }
1505 
1506 static int psi_io_open(struct inode *inode, struct file *file)
1507 {
1508         return single_open(file, psi_io_show, NULL);
1509 }
1510 
1511 static int psi_memory_open(struct inode *inode, struct file *file)
1512 {
1513         return single_open(file, psi_memory_show, NULL);
1514 }
1515 
1516 static int psi_cpu_open(struct inode *inode, struct file *file)
1517 {
1518         return single_open(file, psi_cpu_show, NULL);
1519 }
1520 
1521 static ssize_t psi_write(struct file *file, const char __user *user_buf,
1522                          size_t nbytes, enum psi_res res)
1523 {
1524         char buf[32];
1525         size_t buf_size;
1526         struct seq_file *seq;
1527         struct psi_trigger *new;
1528 
1529         if (static_branch_likely(&psi_disabled))
1530                 return -EOPNOTSUPP;
1531 
1532         if (!nbytes)
1533                 return -EINVAL;
1534 
1535         buf_size = min(nbytes, sizeof(buf));
1536         if (copy_from_user(buf, user_buf, buf_size))
1537                 return -EFAULT;
1538 
1539         buf[buf_size - 1] = '\0';
1540 
1541         seq = file->private_data;
1542 
1543         /* Take seq->lock to protect seq->private from concurrent writes */
1544         mutex_lock(&seq->lock);
1545 
1546         /* Allow only one trigger per file descriptor */
1547         if (seq->private) {
1548                 mutex_unlock(&seq->lock);
1549                 return -EBUSY;
1550         }
1551 
1552         new = psi_trigger_create(&psi_system, buf, res, file, NULL);
1553         if (IS_ERR(new)) {
1554                 mutex_unlock(&seq->lock);
1555                 return PTR_ERR(new);
1556         }
1557 
1558         smp_store_release(&seq->private, new);
1559         mutex_unlock(&seq->lock);
1560 
1561         return nbytes;
1562 }
1563 
1564 static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
1565                             size_t nbytes, loff_t *ppos)
1566 {
1567         return psi_write(file, user_buf, nbytes, PSI_IO);
1568 }
1569 
1570 static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
1571                                 size_t nbytes, loff_t *ppos)
1572 {
1573         return psi_write(file, user_buf, nbytes, PSI_MEM);
1574 }
1575 
1576 static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
1577                              size_t nbytes, loff_t *ppos)
1578 {
1579         return psi_write(file, user_buf, nbytes, PSI_CPU);
1580 }
1581 
1582 static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
1583 {
1584         struct seq_file *seq = file->private_data;
1585 
1586         return psi_trigger_poll(&seq->private, file, wait);
1587 }
1588 
1589 static int psi_fop_release(struct inode *inode, struct file *file)
1590 {
1591         struct seq_file *seq = file->private_data;
1592 
1593         psi_trigger_destroy(seq->private);
1594         return single_release(inode, file);
1595 }
1596 
1597 static const struct proc_ops psi_io_proc_ops = {
1598         .proc_open      = psi_io_open,
1599         .proc_read      = seq_read,
1600         .proc_lseek     = seq_lseek,
1601         .proc_write     = psi_io_write,
1602         .proc_poll      = psi_fop_poll,
1603         .proc_release   = psi_fop_release,
1604 };
1605 
1606 static const struct proc_ops psi_memory_proc_ops = {
1607         .proc_open      = psi_memory_open,
1608         .proc_read      = seq_read,
1609         .proc_lseek     = seq_lseek,
1610         .proc_write     = psi_memory_write,
1611         .proc_poll      = psi_fop_poll,
1612         .proc_release   = psi_fop_release,
1613 };
1614 
1615 static const struct proc_ops psi_cpu_proc_ops = {
1616         .proc_open      = psi_cpu_open,
1617         .proc_read      = seq_read,
1618         .proc_lseek     = seq_lseek,
1619         .proc_write     = psi_cpu_write,
1620         .proc_poll      = psi_fop_poll,
1621         .proc_release   = psi_fop_release,
1622 };
1623 
1624 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1625 static int psi_irq_show(struct seq_file *m, void *v)
1626 {
1627         return psi_show(m, &psi_system, PSI_IRQ);
1628 }
1629 
1630 static int psi_irq_open(struct inode *inode, struct file *file)
1631 {
1632         return single_open(file, psi_irq_show, NULL);
1633 }
1634 
1635 static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
1636                              size_t nbytes, loff_t *ppos)
1637 {
1638         return psi_write(file, user_buf, nbytes, PSI_IRQ);
1639 }
1640 
1641 static const struct proc_ops psi_irq_proc_ops = {
1642         .proc_open      = psi_irq_open,
1643         .proc_read      = seq_read,
1644         .proc_lseek     = seq_lseek,
1645         .proc_write     = psi_irq_write,
1646         .proc_poll      = psi_fop_poll,
1647         .proc_release   = psi_fop_release,
1648 };
1649 #endif
1650 
1651 static int __init psi_proc_init(void)
1652 {
1653         if (psi_enable) {
1654                 proc_mkdir("pressure", NULL);
1655                 proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops);
1656                 proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops);
1657                 proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops);
1658 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
1659                 proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops);
1660 #endif
1661         }
1662         return 0;
1663 }
1664 module_init(psi_proc_init);
1665 
1666 #endif /* CONFIG_PROC_FS */
1667 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php