~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/deadline.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/sched/deadline.c (Version linux-6.12-rc7) and /kernel/sched/deadline.c (Version linux-4.4.302)


  1 // SPDX-License-Identifier: GPL-2.0            << 
  2 /*                                                  1 /*
  3  * Deadline Scheduling Class (SCHED_DEADLINE)       2  * Deadline Scheduling Class (SCHED_DEADLINE)
  4  *                                                  3  *
  5  * Earliest Deadline First (EDF) + Constant Ba      4  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
  6  *                                                  5  *
  7  * Tasks that periodically executes their inst      6  * Tasks that periodically executes their instances for less than their
  8  * runtime won't miss any of their deadlines.       7  * runtime won't miss any of their deadlines.
  9  * Tasks that are not periodic or sporadic or       8  * Tasks that are not periodic or sporadic or that tries to execute more
 10  * than their reserved bandwidth will be slowe      9  * than their reserved bandwidth will be slowed down (and may potentially
 11  * miss some of their deadlines), and won't af     10  * miss some of their deadlines), and won't affect any other task.
 12  *                                                 11  *
 13  * Copyright (C) 2012 Dario Faggioli <raistlin     12  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
 14  *                    Juri Lelli <juri.lelli@g     13  *                    Juri Lelli <juri.lelli@gmail.com>,
 15  *                    Michael Trimarchi <micha     14  *                    Michael Trimarchi <michael@amarulasolutions.com>,
 16  *                    Fabio Checconi <fcheccon     15  *                    Fabio Checconi <fchecconi@gmail.com>
 17  */                                                16  */
                                                   >>  17 #include "sched.h"
 18                                                    18 
 19 #include <linux/cpuset.h>                      !!  19 #include <linux/slab.h>
 20                                                    20 
 21 /*                                             !!  21 struct dl_bandwidth def_dl_bandwidth;
 22  * Default limits for DL period; on the top en << 
 23  * tasks still getting ridiculously long effec << 
 24  * guard against timer DoS.                    << 
 25  */                                            << 
 26 static unsigned int sysctl_sched_dl_period_max << 
 27 static unsigned int sysctl_sched_dl_period_min << 
 28 #ifdef CONFIG_SYSCTL                           << 
 29 static struct ctl_table sched_dl_sysctls[] = { << 
 30         {                                      << 
 31                 .procname       = "sched_deadl << 
 32                 .data           = &sysctl_sche << 
 33                 .maxlen         = sizeof(unsig << 
 34                 .mode           = 0644,        << 
 35                 .proc_handler   = proc_douintv << 
 36                 .extra1         = (void *)&sys << 
 37         },                                     << 
 38         {                                      << 
 39                 .procname       = "sched_deadl << 
 40                 .data           = &sysctl_sche << 
 41                 .maxlen         = sizeof(unsig << 
 42                 .mode           = 0644,        << 
 43                 .proc_handler   = proc_douintv << 
 44                 .extra2         = (void *)&sys << 
 45         },                                     << 
 46 };                                             << 
 47                                                << 
 48 static int __init sched_dl_sysctl_init(void)   << 
 49 {                                              << 
 50         register_sysctl_init("kernel", sched_d << 
 51         return 0;                              << 
 52 }                                              << 
 53 late_initcall(sched_dl_sysctl_init);           << 
 54 #endif                                         << 
 55                                                << 
 56 static bool dl_server(struct sched_dl_entity * << 
 57 {                                              << 
 58         return dl_se->dl_server;               << 
 59 }                                              << 
 60                                                    22 
 61 static inline struct task_struct *dl_task_of(s     23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
 62 {                                                  24 {
 63         BUG_ON(dl_server(dl_se));              << 
 64         return container_of(dl_se, struct task     25         return container_of(dl_se, struct task_struct, dl);
 65 }                                                  26 }
 66                                                    27 
 67 static inline struct rq *rq_of_dl_rq(struct dl     28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
 68 {                                                  29 {
 69         return container_of(dl_rq, struct rq,      30         return container_of(dl_rq, struct rq, dl);
 70 }                                                  31 }
 71                                                    32 
 72 static inline struct rq *rq_of_dl_se(struct sc << 
 73 {                                              << 
 74         struct rq *rq = dl_se->rq;             << 
 75                                                << 
 76         if (!dl_server(dl_se))                 << 
 77                 rq = task_rq(dl_task_of(dl_se) << 
 78                                                << 
 79         return rq;                             << 
 80 }                                              << 
 81                                                << 
 82 static inline struct dl_rq *dl_rq_of_se(struct     33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
 83 {                                                  34 {
 84         return &rq_of_dl_se(dl_se)->dl;        !!  35         struct task_struct *p = dl_task_of(dl_se);
                                                   >>  36         struct rq *rq = task_rq(p);
                                                   >>  37 
                                                   >>  38         return &rq->dl;
 85 }                                                  39 }
 86                                                    40 
 87 static inline int on_dl_rq(struct sched_dl_ent     41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
 88 {                                                  42 {
 89         return !RB_EMPTY_NODE(&dl_se->rb_node)     43         return !RB_EMPTY_NODE(&dl_se->rb_node);
 90 }                                                  44 }
 91                                                    45 
 92 #ifdef CONFIG_RT_MUTEXES                       !!  46 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
 93 static inline struct sched_dl_entity *pi_of(st << 
 94 {                                              << 
 95         return dl_se->pi_se;                   << 
 96 }                                              << 
 97                                                << 
 98 static inline bool is_dl_boosted(struct sched_ << 
 99 {                                              << 
100         return pi_of(dl_se) != dl_se;          << 
101 }                                              << 
102 #else                                          << 
103 static inline struct sched_dl_entity *pi_of(st << 
104 {                                              << 
105         return dl_se;                          << 
106 }                                              << 
107                                                << 
108 static inline bool is_dl_boosted(struct sched_ << 
109 {                                              << 
110         return false;                          << 
111 }                                              << 
112 #endif                                         << 
113                                                << 
114 #ifdef CONFIG_SMP                              << 
115 static inline struct dl_bw *dl_bw_of(int i)    << 
116 {                                              << 
117         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_ << 
118                          "sched RCU must be he << 
119         return &cpu_rq(i)->rd->dl_bw;          << 
120 }                                              << 
121                                                << 
122 static inline int dl_bw_cpus(int i)            << 
123 {                                              << 
124         struct root_domain *rd = cpu_rq(i)->rd << 
125         int cpus;                              << 
126                                                << 
127         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_ << 
128                          "sched RCU must be he << 
129                                                << 
130         if (cpumask_subset(rd->span, cpu_activ << 
131                 return cpumask_weight(rd->span << 
132                                                << 
133         cpus = 0;                              << 
134                                                << 
135         for_each_cpu_and(i, rd->span, cpu_acti << 
136                 cpus++;                        << 
137                                                << 
138         return cpus;                           << 
139 }                                              << 
140                                                << 
141 static inline unsigned long __dl_bw_capacity(c << 
142 {                                                  47 {
143         unsigned long cap = 0;                 !!  48         struct sched_dl_entity *dl_se = &p->dl;
144         int i;                                 << 
145                                                << 
146         for_each_cpu_and(i, mask, cpu_active_m << 
147                 cap += arch_scale_cpu_capacity << 
148                                                << 
149         return cap;                            << 
150 }                                              << 
151                                                << 
152 /*                                             << 
153  * XXX Fix: If 'rq->rd == def_root_domain' per << 
154  * of the CPU the task is running on rather rd << 
155  */                                            << 
156 static inline unsigned long dl_bw_capacity(int << 
157 {                                              << 
158         if (!sched_asym_cpucap_active() &&     << 
159             arch_scale_cpu_capacity(i) == SCHE << 
160                 return dl_bw_cpus(i) << SCHED_ << 
161         } else {                               << 
162                 RCU_LOCKDEP_WARN(!rcu_read_loc << 
163                                  "sched RCU mu << 
164                                                << 
165                 return __dl_bw_capacity(cpu_rq << 
166         }                                      << 
167 }                                              << 
168                                                << 
169 static inline bool dl_bw_visited(int cpu, u64  << 
170 {                                              << 
171         struct root_domain *rd = cpu_rq(cpu)-> << 
172                                                << 
173         if (rd->visit_gen == gen)              << 
174                 return true;                   << 
175                                                << 
176         rd->visit_gen = gen;                   << 
177         return false;                          << 
178 }                                              << 
179                                                << 
180 static inline                                  << 
181 void __dl_update(struct dl_bw *dl_b, s64 bw)   << 
182 {                                              << 
183         struct root_domain *rd = container_of( << 
184         int i;                                 << 
185                                                << 
186         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_ << 
187                          "sched RCU must be he << 
188         for_each_cpu_and(i, rd->span, cpu_acti << 
189                 struct rq *rq = cpu_rq(i);     << 
190                                                << 
191                 rq->dl.extra_bw += bw;         << 
192         }                                      << 
193 }                                              << 
194 #else                                          << 
195 static inline struct dl_bw *dl_bw_of(int i)    << 
196 {                                              << 
197         return &cpu_rq(i)->dl.dl_bw;           << 
198 }                                              << 
199                                                << 
200 static inline int dl_bw_cpus(int i)            << 
201 {                                              << 
202         return 1;                              << 
203 }                                              << 
204                                                << 
205 static inline unsigned long dl_bw_capacity(int << 
206 {                                              << 
207         return SCHED_CAPACITY_SCALE;           << 
208 }                                              << 
209                                                << 
210 static inline bool dl_bw_visited(int cpu, u64  << 
211 {                                              << 
212         return false;                          << 
213 }                                              << 
214                                                << 
215 static inline                                  << 
216 void __dl_update(struct dl_bw *dl_b, s64 bw)   << 
217 {                                              << 
218         struct dl_rq *dl = container_of(dl_b,  << 
219                                                << 
220         dl->extra_bw += bw;                    << 
221 }                                              << 
222 #endif                                         << 
223                                                << 
224 static inline                                  << 
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw,  << 
226 {                                              << 
227         dl_b->total_bw -= tsk_bw;              << 
228         __dl_update(dl_b, (s32)tsk_bw / cpus); << 
229 }                                              << 
230                                                << 
231 static inline                                  << 
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw,  << 
233 {                                              << 
234         dl_b->total_bw += tsk_bw;              << 
235         __dl_update(dl_b, -((s32)tsk_bw / cpus << 
236 }                                              << 
237                                                << 
238 static inline bool                             << 
239 __dl_overflow(struct dl_bw *dl_b, unsigned lon << 
240 {                                              << 
241         return dl_b->bw != -1 &&               << 
242                cap_scale(dl_b->bw, cap) < dl_b << 
243 }                                              << 
244                                                << 
245 static inline                                  << 
246 void __add_running_bw(u64 dl_bw, struct dl_rq  << 
247 {                                              << 
248         u64 old = dl_rq->running_bw;           << 
249                                                << 
250         lockdep_assert_rq_held(rq_of_dl_rq(dl_ << 
251         dl_rq->running_bw += dl_bw;            << 
252         SCHED_WARN_ON(dl_rq->running_bw < old) << 
253         SCHED_WARN_ON(dl_rq->running_bw > dl_r << 
254         /* kick cpufreq (see the comment in ke << 
255         cpufreq_update_util(rq_of_dl_rq(dl_rq) << 
256 }                                              << 
257                                                << 
258 static inline                                  << 
259 void __sub_running_bw(u64 dl_bw, struct dl_rq  << 
260 {                                              << 
261         u64 old = dl_rq->running_bw;           << 
262                                                << 
263         lockdep_assert_rq_held(rq_of_dl_rq(dl_ << 
264         dl_rq->running_bw -= dl_bw;            << 
265         SCHED_WARN_ON(dl_rq->running_bw > old) << 
266         if (dl_rq->running_bw > old)           << 
267                 dl_rq->running_bw = 0;         << 
268         /* kick cpufreq (see the comment in ke << 
269         cpufreq_update_util(rq_of_dl_rq(dl_rq) << 
270 }                                              << 
271                                                << 
272 static inline                                  << 
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_r << 
274 {                                              << 
275         u64 old = dl_rq->this_bw;              << 
276                                                << 
277         lockdep_assert_rq_held(rq_of_dl_rq(dl_ << 
278         dl_rq->this_bw += dl_bw;               << 
279         SCHED_WARN_ON(dl_rq->this_bw < old); / << 
280 }                                              << 
281                                                << 
282 static inline                                  << 
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_r << 
284 {                                              << 
285         u64 old = dl_rq->this_bw;              << 
286                                                << 
287         lockdep_assert_rq_held(rq_of_dl_rq(dl_ << 
288         dl_rq->this_bw -= dl_bw;               << 
289         SCHED_WARN_ON(dl_rq->this_bw > old); / << 
290         if (dl_rq->this_bw > old)              << 
291                 dl_rq->this_bw = 0;            << 
292         SCHED_WARN_ON(dl_rq->running_bw > dl_r << 
293 }                                              << 
294                                                << 
295 static inline                                  << 
296 void add_rq_bw(struct sched_dl_entity *dl_se,  << 
297 {                                              << 
298         if (!dl_entity_is_special(dl_se))      << 
299                 __add_rq_bw(dl_se->dl_bw, dl_r << 
300 }                                              << 
301                                                << 
302 static inline                                  << 
303 void sub_rq_bw(struct sched_dl_entity *dl_se,  << 
304 {                                              << 
305         if (!dl_entity_is_special(dl_se))      << 
306                 __sub_rq_bw(dl_se->dl_bw, dl_r << 
307 }                                              << 
308                                                << 
309 static inline                                  << 
310 void add_running_bw(struct sched_dl_entity *dl << 
311 {                                              << 
312         if (!dl_entity_is_special(dl_se))      << 
313                 __add_running_bw(dl_se->dl_bw, << 
314 }                                              << 
315                                                << 
316 static inline                                  << 
317 void sub_running_bw(struct sched_dl_entity *dl << 
318 {                                              << 
319         if (!dl_entity_is_special(dl_se))      << 
320                 __sub_running_bw(dl_se->dl_bw, << 
321 }                                              << 
322                                                << 
323 static void dl_rq_change_utilization(struct rq << 
324 {                                              << 
325         if (dl_se->dl_non_contending) {        << 
326                 sub_running_bw(dl_se, &rq->dl) << 
327                 dl_se->dl_non_contending = 0;  << 
328                                                << 
329                 /*                             << 
330                  * If the timer handler is cur << 
331                  * timer cannot be canceled, i << 
332                  * will see that dl_not_conten << 
333                  * will not touch the rq's act << 
334                  * so we are still safe.       << 
335                  */                            << 
336                 if (hrtimer_try_to_cancel(&dl_ << 
337                         if (!dl_server(dl_se)) << 
338                                 put_task_struc << 
339                 }                              << 
340         }                                      << 
341         __sub_rq_bw(dl_se->dl_bw, &rq->dl);    << 
342         __add_rq_bw(new_bw, &rq->dl);          << 
343 }                                              << 
344                                                << 
345 static void dl_change_utilization(struct task_ << 
346 {                                              << 
347         WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_ << 
348                                                << 
349         if (task_on_rq_queued(p))              << 
350                 return;                        << 
351                                                << 
352         dl_rq_change_utilization(task_rq(p), & << 
353 }                                              << 
354                                                << 
355 static void __dl_clear_params(struct sched_dl_ << 
356                                                << 
357 /*                                             << 
358  * The utilization of a task cannot be immedia << 
359  * the rq active utilization (running_bw) when << 
360  * Instead, we have to wait for the so called  << 
361  *                                             << 
362  * If a task blocks before the "0-lag time", a << 
363  * timer) is armed, and running_bw is decrease << 
364  * fires.                                      << 
365  *                                             << 
366  * If the task wakes up again before the inact << 
367  * the timer is canceled, whereas if the task  << 
368  * inactive timer fired (and running_bw has be << 
369  * task's utilization has to be added to runni << 
370  * A flag in the deadline scheduling entity (d << 
371  * is used to avoid race conditions between th << 
372  * and task wakeups.                           << 
373  *                                             << 
374  * The following diagram shows how running_bw  << 
375  * "ACTIVE" when its utilization contributes t << 
376  * "ACTIVE contending" task is in the TASK_RUN << 
377  * "ACTIVE non contending" task is a blocked t << 
378  * has not passed yet. An "INACTIVE" task is a << 
379  * time already passed, which does not contrib << 
380  *                              +------------- << 
381  *             wakeup           |    ACTIVE    << 
382  *          +------------------>+   contending << 
383  *          | add_running_bw    |              << 
384  *          |                   +----+------+- << 
385  *          |                        |      ^  << 
386  *          |                dequeue |      |  << 
387  * +--------+-------+                |      |  << 
388  * |                |   t >= 0-lag   |      |  << 
389  * |    INACTIVE    |<---------------+      |  << 
390  * |                | sub_running_bw |      |  << 
391  * +--------+-------+                |      |  << 
392  *          ^                        |      |  << 
393  *          |              t < 0-lag |      |  << 
394  *          |                        |      |  << 
395  *          |                        V      |  << 
396  *          |                   +----+------+- << 
397  *          | sub_running_bw    |    ACTIVE    << 
398  *          +-------------------+              << 
399  *            inactive timer    |  non contend << 
400  *            fired             +------------- << 
401  *                                             << 
402  * The task_non_contending() function is invok << 
403  * blocks, and checks if the 0-lag time alread << 
404  * not (in the first case, it directly updates << 
405  * in the second case, it arms the inactive ti << 
406  *                                             << 
407  * The task_contending() function is invoked w << 
408  * up, and checks if the task is still in the  << 
409  * state or not (in the second case, it update << 
410  */                                            << 
411 static void task_non_contending(struct sched_d << 
412 {                                              << 
413         struct hrtimer *timer = &dl_se->inacti << 
414         struct rq *rq = rq_of_dl_se(dl_se);    << 
415         struct dl_rq *dl_rq = &rq->dl;         << 
416         s64 zerolag_time;                      << 
417                                                << 
418         /*                                     << 
419          * If this is a non-deadline task that << 
420          * do nothing                          << 
421          */                                    << 
422         if (dl_se->dl_runtime == 0)            << 
423                 return;                        << 
424                                                << 
425         if (dl_entity_is_special(dl_se))       << 
426                 return;                        << 
427                                                << 
428         WARN_ON(dl_se->dl_non_contending);     << 
429                                                << 
430         zerolag_time = dl_se->deadline -       << 
431                  div64_long((dl_se->runtime *  << 
432                         dl_se->dl_runtime);    << 
433                                                << 
434         /*                                     << 
435          * Using relative times instead of the << 
436          * allows to simplify the code         << 
437          */                                    << 
438         zerolag_time -= rq_clock(rq);          << 
439                                                << 
440         /*                                     << 
441          * If the "0-lag time" already passed, << 
442          * utilization now, instead of startin << 
443          */                                    << 
444         if ((zerolag_time < 0) || hrtimer_acti << 
445                 if (dl_server(dl_se)) {        << 
446                         sub_running_bw(dl_se,  << 
447                 } else {                       << 
448                         struct task_struct *p  << 
449                                                << 
450                         if (dl_task(p))        << 
451                                 sub_running_bw << 
452                                                << 
453                         if (!dl_task(p) || REA << 
454                                 struct dl_bw * << 
455                                                << 
456                                 if (READ_ONCE( << 
457                                         sub_rq << 
458                                 raw_spin_lock( << 
459                                 __dl_sub(dl_b, << 
460                                 raw_spin_unloc << 
461                                 __dl_clear_par << 
462                         }                      << 
463                 }                              << 
464                                                << 
465                 return;                        << 
466         }                                      << 
467                                                << 
468         dl_se->dl_non_contending = 1;          << 
469         if (!dl_server(dl_se))                 << 
470                 get_task_struct(dl_task_of(dl_ << 
471                                                << 
472         hrtimer_start(timer, ns_to_ktime(zerol << 
473 }                                              << 
474                                                << 
475 static void task_contending(struct sched_dl_en << 
476 {                                              << 
477         struct dl_rq *dl_rq = dl_rq_of_se(dl_s << 
478                                                << 
479         /*                                     << 
480          * If this is a non-deadline task that << 
481          * do nothing                          << 
482          */                                    << 
483         if (dl_se->dl_runtime == 0)            << 
484                 return;                        << 
485                                                << 
486         if (flags & ENQUEUE_MIGRATED)          << 
487                 add_rq_bw(dl_se, dl_rq);       << 
488                                                    49 
489         if (dl_se->dl_non_contending) {        !!  50         return dl_rq->rb_leftmost == &dl_se->rb_node;
490                 dl_se->dl_non_contending = 0;  << 
491                 /*                             << 
492                  * If the timer handler is cur << 
493                  * timer cannot be canceled, i << 
494                  * will see that dl_not_conten << 
495                  * will not touch the rq's act << 
496                  * so we are still safe.       << 
497                  */                            << 
498                 if (hrtimer_try_to_cancel(&dl_ << 
499                         if (!dl_server(dl_se)) << 
500                                 put_task_struc << 
501                 }                              << 
502         } else {                               << 
503                 /*                             << 
504                  * Since "dl_non_contending" i << 
505                  * task's utilization has alre << 
506                  * active utilization (either  << 
507                  * when the "inactive timer" f << 
508                  * So, add it back.            << 
509                  */                            << 
510                 add_running_bw(dl_se, dl_rq);  << 
511         }                                      << 
512 }                                                  51 }
513                                                    52 
514 static inline int is_leftmost(struct sched_dl_ !!  53 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
515 {                                                  54 {
516         return rb_first_cached(&dl_rq->root) = !!  55         raw_spin_lock_init(&dl_b->dl_runtime_lock);
                                                   >>  56         dl_b->dl_period = period;
                                                   >>  57         dl_b->dl_runtime = runtime;
517 }                                                  58 }
518                                                    59 
519 static void init_dl_rq_bw_ratio(struct dl_rq * << 
520                                                << 
521 void init_dl_bw(struct dl_bw *dl_b)                60 void init_dl_bw(struct dl_bw *dl_b)
522 {                                                  61 {
523         raw_spin_lock_init(&dl_b->lock);           62         raw_spin_lock_init(&dl_b->lock);
                                                   >>  63         raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
524         if (global_rt_runtime() == RUNTIME_INF     64         if (global_rt_runtime() == RUNTIME_INF)
525                 dl_b->bw = -1;                     65                 dl_b->bw = -1;
526         else                                       66         else
527                 dl_b->bw = to_ratio(global_rt_     67                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
                                                   >>  68         raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
528         dl_b->total_bw = 0;                        69         dl_b->total_bw = 0;
529 }                                                  70 }
530                                                    71 
531 void init_dl_rq(struct dl_rq *dl_rq)               72 void init_dl_rq(struct dl_rq *dl_rq)
532 {                                                  73 {
533         dl_rq->root = RB_ROOT_CACHED;          !!  74         dl_rq->rb_root = RB_ROOT;
534                                                    75 
535 #ifdef CONFIG_SMP                                  76 #ifdef CONFIG_SMP
536         /* zero means no -deadline tasks */        77         /* zero means no -deadline tasks */
537         dl_rq->earliest_dl.curr = dl_rq->earli     78         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
538                                                    79 
                                                   >>  80         dl_rq->dl_nr_migratory = 0;
539         dl_rq->overloaded = 0;                     81         dl_rq->overloaded = 0;
540         dl_rq->pushable_dl_tasks_root = RB_ROO !!  82         dl_rq->pushable_dl_tasks_root = RB_ROOT;
541 #else                                              83 #else
542         init_dl_bw(&dl_rq->dl_bw);                 84         init_dl_bw(&dl_rq->dl_bw);
543 #endif                                             85 #endif
544                                                << 
545         dl_rq->running_bw = 0;                 << 
546         dl_rq->this_bw = 0;                    << 
547         init_dl_rq_bw_ratio(dl_rq);            << 
548 }                                                  86 }
549                                                    87 
550 #ifdef CONFIG_SMP                                  88 #ifdef CONFIG_SMP
551                                                    89 
552 static inline int dl_overloaded(struct rq *rq)     90 static inline int dl_overloaded(struct rq *rq)
553 {                                                  91 {
554         return atomic_read(&rq->rd->dlo_count)     92         return atomic_read(&rq->rd->dlo_count);
555 }                                                  93 }
556                                                    94 
557 static inline void dl_set_overload(struct rq *     95 static inline void dl_set_overload(struct rq *rq)
558 {                                                  96 {
559         if (!rq->online)                           97         if (!rq->online)
560                 return;                            98                 return;
561                                                    99 
562         cpumask_set_cpu(rq->cpu, rq->rd->dlo_m    100         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
563         /*                                        101         /*
564          * Must be visible before the overload    102          * Must be visible before the overload count is
565          * set (as in sched_rt.c).                103          * set (as in sched_rt.c).
566          *                                        104          *
567          * Matched by the barrier in pull_dl_t    105          * Matched by the barrier in pull_dl_task().
568          */                                       106          */
569         smp_wmb();                                107         smp_wmb();
570         atomic_inc(&rq->rd->dlo_count);           108         atomic_inc(&rq->rd->dlo_count);
571 }                                                 109 }
572                                                   110 
573 static inline void dl_clear_overload(struct rq    111 static inline void dl_clear_overload(struct rq *rq)
574 {                                                 112 {
575         if (!rq->online)                          113         if (!rq->online)
576                 return;                           114                 return;
577                                                   115 
578         atomic_dec(&rq->rd->dlo_count);           116         atomic_dec(&rq->rd->dlo_count);
579         cpumask_clear_cpu(rq->cpu, rq->rd->dlo    117         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
580 }                                                 118 }
581                                                   119 
582 #define __node_2_pdl(node) \                   !! 120 static void update_dl_migration(struct dl_rq *dl_rq)
583         rb_entry((node), struct task_struct, p !! 121 {
                                                   >> 122         if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
                                                   >> 123                 if (!dl_rq->overloaded) {
                                                   >> 124                         dl_set_overload(rq_of_dl_rq(dl_rq));
                                                   >> 125                         dl_rq->overloaded = 1;
                                                   >> 126                 }
                                                   >> 127         } else if (dl_rq->overloaded) {
                                                   >> 128                 dl_clear_overload(rq_of_dl_rq(dl_rq));
                                                   >> 129                 dl_rq->overloaded = 0;
                                                   >> 130         }
                                                   >> 131 }
584                                                   132 
585 static inline bool __pushable_less(struct rb_n !! 133 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
586 {                                                 134 {
587         return dl_entity_preempt(&__node_2_pdl !! 135         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 136 
                                                   >> 137         if (p->nr_cpus_allowed > 1)
                                                   >> 138                 dl_rq->dl_nr_migratory++;
                                                   >> 139 
                                                   >> 140         update_dl_migration(dl_rq);
588 }                                                 141 }
589                                                   142 
590 static inline int has_pushable_dl_tasks(struct !! 143 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
591 {                                                 144 {
592         return !RB_EMPTY_ROOT(&rq->dl.pushable !! 145         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 146 
                                                   >> 147         if (p->nr_cpus_allowed > 1)
                                                   >> 148                 dl_rq->dl_nr_migratory--;
                                                   >> 149 
                                                   >> 150         update_dl_migration(dl_rq);
593 }                                                 151 }
594                                                   152 
595 /*                                                153 /*
596  * The list of pushable -deadline task is not     154  * The list of pushable -deadline task is not a plist, like in
597  * sched_rt.c, it is an rb-tree with tasks ord    155  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
598  */                                               156  */
599 static void enqueue_pushable_dl_task(struct rq    157 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
600 {                                                 158 {
601         struct rb_node *leftmost;              !! 159         struct dl_rq *dl_rq = &rq->dl;
602                                                !! 160         struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
603         WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushab !! 161         struct rb_node *parent = NULL;
                                                   >> 162         struct task_struct *entry;
                                                   >> 163         int leftmost = 1;
                                                   >> 164 
                                                   >> 165         BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
                                                   >> 166 
                                                   >> 167         while (*link) {
                                                   >> 168                 parent = *link;
                                                   >> 169                 entry = rb_entry(parent, struct task_struct,
                                                   >> 170                                  pushable_dl_tasks);
                                                   >> 171                 if (dl_entity_preempt(&p->dl, &entry->dl))
                                                   >> 172                         link = &parent->rb_left;
                                                   >> 173                 else {
                                                   >> 174                         link = &parent->rb_right;
                                                   >> 175                         leftmost = 0;
                                                   >> 176                 }
                                                   >> 177         }
604                                                   178 
605         leftmost = rb_add_cached(&p->pushable_ << 
606                                  &rq->dl.pusha << 
607                                  __pushable_le << 
608         if (leftmost)                             179         if (leftmost)
609                 rq->dl.earliest_dl.next = p->d !! 180                 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
610                                                   181 
611         if (!rq->dl.overloaded) {              !! 182         rb_link_node(&p->pushable_dl_tasks, parent, link);
612                 dl_set_overload(rq);           !! 183         rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
613                 rq->dl.overloaded = 1;         << 
614         }                                      << 
615 }                                                 184 }
616                                                   185 
617 static void dequeue_pushable_dl_task(struct rq    186 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
618 {                                                 187 {
619         struct dl_rq *dl_rq = &rq->dl;            188         struct dl_rq *dl_rq = &rq->dl;
620         struct rb_root_cached *root = &dl_rq-> << 
621         struct rb_node *leftmost;              << 
622                                                   189 
623         if (RB_EMPTY_NODE(&p->pushable_dl_task    190         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624                 return;                           191                 return;
625                                                   192 
626         leftmost = rb_erase_cached(&p->pushabl !! 193         if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
627         if (leftmost)                          !! 194                 struct rb_node *next_node;
628                 dl_rq->earliest_dl.next = __no !! 195 
                                                   >> 196                 next_node = rb_next(&p->pushable_dl_tasks);
                                                   >> 197                 dl_rq->pushable_dl_tasks_leftmost = next_node;
                                                   >> 198         }
629                                                   199 
                                                   >> 200         rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
630         RB_CLEAR_NODE(&p->pushable_dl_tasks);     201         RB_CLEAR_NODE(&p->pushable_dl_tasks);
                                                   >> 202 }
631                                                   203 
632         if (!has_pushable_dl_tasks(rq) && rq-> !! 204 static inline int has_pushable_dl_tasks(struct rq *rq)
633                 dl_clear_overload(rq);         !! 205 {
634                 rq->dl.overloaded = 0;         !! 206         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
635         }                                      << 
636 }                                                 207 }
637                                                   208 
638 static int push_dl_task(struct rq *rq);           209 static int push_dl_task(struct rq *rq);
639                                                   210 
640 static inline bool need_pull_dl_task(struct rq    211 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
641 {                                                 212 {
642         return rq->online && dl_task(prev);    !! 213         return dl_task(prev);
643 }                                                 214 }
644                                                   215 
645 static DEFINE_PER_CPU(struct balance_callback, !! 216 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
646 static DEFINE_PER_CPU(struct balance_callback, !! 217 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
647                                                   218 
648 static void push_dl_tasks(struct rq *);           219 static void push_dl_tasks(struct rq *);
649 static void pull_dl_task(struct rq *);            220 static void pull_dl_task(struct rq *);
650                                                   221 
651 static inline void deadline_queue_push_tasks(s !! 222 static inline void queue_push_tasks(struct rq *rq)
652 {                                                 223 {
653         if (!has_pushable_dl_tasks(rq))           224         if (!has_pushable_dl_tasks(rq))
654                 return;                           225                 return;
655                                                   226 
656         queue_balance_callback(rq, &per_cpu(dl    227         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
657 }                                                 228 }
658                                                   229 
659 static inline void deadline_queue_pull_task(st !! 230 static inline void queue_pull_task(struct rq *rq)
660 {                                                 231 {
661         queue_balance_callback(rq, &per_cpu(dl    232         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
662 }                                                 233 }
663                                                   234 
664 static struct rq *find_lock_later_rq(struct ta    235 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
665                                                   236 
666 static struct rq *dl_task_offline_migration(st    237 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
667 {                                                 238 {
668         struct rq *later_rq = NULL;               239         struct rq *later_rq = NULL;
669         struct dl_bw *dl_b;                    !! 240         bool fallback = false;
670                                                   241 
671         later_rq = find_lock_later_rq(p, rq);     242         later_rq = find_lock_later_rq(p, rq);
                                                   >> 243 
672         if (!later_rq) {                          244         if (!later_rq) {
673                 int cpu;                          245                 int cpu;
674                                                   246 
675                 /*                                247                 /*
676                  * If we cannot preempt any rq    248                  * If we cannot preempt any rq, fall back to pick any
677                  * online CPU:                 !! 249                  * online cpu.
678                  */                               250                  */
679                 cpu = cpumask_any_and(cpu_acti !! 251                 fallback = true;
                                                   >> 252                 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
680                 if (cpu >= nr_cpu_ids) {          253                 if (cpu >= nr_cpu_ids) {
681                         /*                        254                         /*
682                          * Failed to find any  !! 255                          * Fail to find any suitable cpu.
683                          * The task will never    256                          * The task will never come back!
684                          */                       257                          */
685                         WARN_ON_ONCE(dl_bandwi !! 258                         BUG_ON(dl_bandwidth_enabled());
686                                                   259 
687                         /*                        260                         /*
688                          * If admission contro    261                          * If admission control is disabled we
689                          * try a little harder    262                          * try a little harder to let the task
690                          * run.                   263                          * run.
691                          */                       264                          */
692                         cpu = cpumask_any(cpu_    265                         cpu = cpumask_any(cpu_active_mask);
693                 }                                 266                 }
694                 later_rq = cpu_rq(cpu);           267                 later_rq = cpu_rq(cpu);
695                 double_lock_balance(rq, later_    268                 double_lock_balance(rq, later_rq);
696         }                                         269         }
697                                                   270 
698         if (p->dl.dl_non_contending || p->dl.d << 
699                 /*                             << 
700                  * Inactive timer is armed (or << 
701                  * waiting for us to release r << 
702                  * will fire (or continue), it << 
703                  * task migrated to later_rq ( << 
704                  */                            << 
705                 sub_running_bw(&p->dl, &rq->dl << 
706                 sub_rq_bw(&p->dl, &rq->dl);    << 
707                                                << 
708                 add_rq_bw(&p->dl, &later_rq->d << 
709                 add_running_bw(&p->dl, &later_ << 
710         } else {                               << 
711                 sub_rq_bw(&p->dl, &rq->dl);    << 
712                 add_rq_bw(&p->dl, &later_rq->d << 
713         }                                      << 
714                                                << 
715         /*                                        271         /*
716          * And we finally need to fix up root_ !! 272          * By now the task is replenished and enqueued; migrate it.
717          * since p is still hanging out in the << 
718          * domain.                             << 
719          */                                       273          */
720         dl_b = &rq->rd->dl_bw;                 !! 274         deactivate_task(rq, p, 0);
721         raw_spin_lock(&dl_b->lock);            !! 275         set_task_cpu(p, later_rq->cpu);
722         __dl_sub(dl_b, p->dl.dl_bw, cpumask_we !! 276         activate_task(later_rq, p, 0);
723         raw_spin_unlock(&dl_b->lock);          << 
724                                                   277 
725         dl_b = &later_rq->rd->dl_bw;           !! 278         if (!fallback)
726         raw_spin_lock(&dl_b->lock);            !! 279                 resched_curr(later_rq);
727         __dl_add(dl_b, p->dl.dl_bw, cpumask_we << 
728         raw_spin_unlock(&dl_b->lock);          << 
729                                                   280 
730         set_task_cpu(p, later_rq->cpu);        << 
731         double_unlock_balance(later_rq, rq);      281         double_unlock_balance(later_rq, rq);
732                                                   282 
733         return later_rq;                          283         return later_rq;
734 }                                                 284 }
735                                                   285 
736 #else                                             286 #else
737                                                   287 
738 static inline                                     288 static inline
739 void enqueue_pushable_dl_task(struct rq *rq, s    289 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740 {                                                 290 {
741 }                                                 291 }
742                                                   292 
743 static inline                                     293 static inline
744 void dequeue_pushable_dl_task(struct rq *rq, s    294 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
745 {                                                 295 {
746 }                                                 296 }
747                                                   297 
748 static inline                                     298 static inline
749 void inc_dl_migration(struct sched_dl_entity *    299 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750 {                                                 300 {
751 }                                                 301 }
752                                                   302 
753 static inline                                     303 static inline
754 void dec_dl_migration(struct sched_dl_entity *    304 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755 {                                                 305 {
756 }                                                 306 }
757                                                   307 
758 static inline void deadline_queue_push_tasks(s !! 308 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
759 {                                                 309 {
                                                   >> 310         return false;
760 }                                                 311 }
761                                                   312 
762 static inline void deadline_queue_pull_task(st !! 313 static inline void pull_dl_task(struct rq *rq)
763 {                                                 314 {
764 }                                                 315 }
765 #endif /* CONFIG_SMP */                        << 
766                                                << 
767 static void                                    << 
768 enqueue_dl_entity(struct sched_dl_entity *dl_s << 
769 static void enqueue_task_dl(struct rq *rq, str << 
770 static void dequeue_dl_entity(struct sched_dl_ << 
771 static void wakeup_preempt_dl(struct rq *rq, s << 
772                                                   316 
773 static inline void replenish_dl_new_period(str !! 317 static inline void queue_push_tasks(struct rq *rq)
774                                             st << 
775 {                                                 318 {
776         /* for non-boosted task, pi_of(dl_se)  !! 319 }
777         dl_se->deadline = rq_clock(rq) + pi_of << 
778         dl_se->runtime = pi_of(dl_se)->dl_runt << 
779                                                   320 
780         /*                                     !! 321 static inline void queue_pull_task(struct rq *rq)
781          * If it is a deferred reservation, an !! 322 {
782          * is not handling an starvation case, << 
783          */                                    << 
784         if (dl_se->dl_defer & !dl_se->dl_defer << 
785                 dl_se->dl_throttled = 1;       << 
786                 dl_se->dl_defer_armed = 1;     << 
787         }                                      << 
788 }                                                 323 }
                                                   >> 324 #endif /* CONFIG_SMP */
                                                   >> 325 
                                                   >> 326 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
                                                   >> 327 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
                                                   >> 328 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
                                                   >> 329                                   int flags);
789                                                   330 
790 /*                                                331 /*
791  * We are being explicitly informed that a new    332  * We are being explicitly informed that a new instance is starting,
792  * and this means that:                           333  * and this means that:
793  *  - the absolute deadline of the entity has     334  *  - the absolute deadline of the entity has to be placed at
794  *    current time + relative deadline;           335  *    current time + relative deadline;
795  *  - the runtime of the entity has to be set     336  *  - the runtime of the entity has to be set to the maximum value.
796  *                                                337  *
797  * The capability of specifying such event is     338  * The capability of specifying such event is useful whenever a -deadline
798  * entity wants to (try to!) synchronize its b    339  * entity wants to (try to!) synchronize its behaviour with the scheduler's
799  * one, and to (try to!) reconcile itself with    340  * one, and to (try to!) reconcile itself with its own scheduling
800  * parameters.                                    341  * parameters.
801  */                                               342  */
802 static inline void setup_new_dl_entity(struct  !! 343 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 344                                        struct sched_dl_entity *pi_se)
803 {                                                 345 {
804         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    346         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
805         struct rq *rq = rq_of_dl_rq(dl_rq);       347         struct rq *rq = rq_of_dl_rq(dl_rq);
806                                                   348 
807         WARN_ON(is_dl_boosted(dl_se));         !! 349         WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
808         WARN_ON(dl_time_before(rq_clock(rq), d << 
809                                                << 
810         /*                                     << 
811          * We are racing with the deadline tim << 
812          * the deadline timer handler will tak << 
813          * the runtime and postponing the dead << 
814          */                                    << 
815         if (dl_se->dl_throttled)               << 
816                 return;                        << 
817                                                   350 
818         /*                                        351         /*
819          * We use the regular wall clock time     352          * We use the regular wall clock time to set deadlines in the
820          * future; in fact, we must consider e    353          * future; in fact, we must consider execution overheads (time
821          * spent on hardirq context, etc.).       354          * spent on hardirq context, etc.).
822          */                                       355          */
823         replenish_dl_new_period(dl_se, rq);    !! 356         dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                                                   >> 357         dl_se->runtime = pi_se->dl_runtime;
                                                   >> 358         dl_se->dl_new = 0;
824 }                                                 359 }
825                                                   360 
826 static int start_dl_timer(struct sched_dl_enti << 
827 static bool dl_entity_overflow(struct sched_dl << 
828                                                << 
829 /*                                                361 /*
830  * Pure Earliest Deadline First (EDF) scheduli    362  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
831  * possibility of a entity lasting more than w    363  * possibility of a entity lasting more than what it declared, and thus
832  * exhausting its runtime.                        364  * exhausting its runtime.
833  *                                                365  *
834  * Here we are interested in making runtime ov    366  * Here we are interested in making runtime overrun possible, but we do
835  * not want a entity which is misbehaving to a    367  * not want a entity which is misbehaving to affect the scheduling of all
836  * other entities.                                368  * other entities.
837  * Therefore, a budgeting strategy called Cons    369  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
838  * is used, in order to confine each entity wi    370  * is used, in order to confine each entity within its own bandwidth.
839  *                                                371  *
840  * This function deals exactly with that, and     372  * This function deals exactly with that, and ensures that when the runtime
841  * of a entity is replenished, its deadline is    373  * of a entity is replenished, its deadline is also postponed. That ensures
842  * the overrunning entity can't interfere with    374  * the overrunning entity can't interfere with other entity in the system and
843  * can't make them miss their deadlines. Reaso    375  * can't make them miss their deadlines. Reasons why this kind of overruns
844  * could happen are, typically, a entity volun    376  * could happen are, typically, a entity voluntarily trying to overcome its
845  * runtime, or it just underestimated it durin    377  * runtime, or it just underestimated it during sched_setattr().
846  */                                               378  */
847 static void replenish_dl_entity(struct sched_d !! 379 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 380                                 struct sched_dl_entity *pi_se)
848 {                                                 381 {
849         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    382         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
850         struct rq *rq = rq_of_dl_rq(dl_rq);       383         struct rq *rq = rq_of_dl_rq(dl_rq);
851                                                   384 
852         WARN_ON_ONCE(pi_of(dl_se)->dl_runtime  !! 385         BUG_ON(pi_se->dl_runtime <= 0);
853                                                   386 
854         /*                                        387         /*
855          * This could be the case for a !-dl t    388          * This could be the case for a !-dl task that is boosted.
856          * Just go with full inherited paramet    389          * Just go with full inherited parameters.
857          *                                     << 
858          * Or, it could be the case of a defer << 
859          * was not able to consume its runtime << 
860          * reached this point with current u > << 
861          *                                     << 
862          * In both cases, set a new period.    << 
863          */                                       390          */
864         if (dl_se->dl_deadline == 0 ||         !! 391         if (dl_se->dl_deadline == 0) {
865             (dl_se->dl_defer_armed && dl_entit !! 392                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
866                 dl_se->deadline = rq_clock(rq) !! 393                 dl_se->runtime = pi_se->dl_runtime;
867                 dl_se->runtime = pi_of(dl_se)- << 
868         }                                         394         }
869                                                   395 
870         if (dl_se->dl_yielded && dl_se->runtim << 
871                 dl_se->runtime = 0;            << 
872                                                << 
873         /*                                        396         /*
874          * We keep moving the deadline away un    397          * We keep moving the deadline away until we get some
875          * available runtime for the entity. T    398          * available runtime for the entity. This ensures correct
876          * handling of situations where the ru    399          * handling of situations where the runtime overrun is
877          * arbitrary large.                       400          * arbitrary large.
878          */                                       401          */
879         while (dl_se->runtime <= 0) {             402         while (dl_se->runtime <= 0) {
880                 dl_se->deadline += pi_of(dl_se !! 403                 dl_se->deadline += pi_se->dl_period;
881                 dl_se->runtime += pi_of(dl_se) !! 404                 dl_se->runtime += pi_se->dl_runtime;
882         }                                         405         }
883                                                   406 
884         /*                                        407         /*
885          * At this point, the deadline really     408          * At this point, the deadline really should be "in
886          * the future" with respect to rq->clo    409          * the future" with respect to rq->clock. If it's
887          * not, we are, for some reason, laggi    410          * not, we are, for some reason, lagging too much!
888          * Anyway, after having warn userspace    411          * Anyway, after having warn userspace abut that,
889          * we still try to keep the things run    412          * we still try to keep the things running by
890          * resetting the deadline and the budg    413          * resetting the deadline and the budget of the
891          * entity.                                414          * entity.
892          */                                       415          */
893         if (dl_time_before(dl_se->deadline, rq    416         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
894                 printk_deferred_once("sched: D !! 417                 printk_deferred_once("sched: DL replenish lagged to much\n");
895                 replenish_dl_new_period(dl_se, !! 418                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                                                   >> 419                 dl_se->runtime = pi_se->dl_runtime;
896         }                                         420         }
897                                                   421 
898         if (dl_se->dl_yielded)                    422         if (dl_se->dl_yielded)
899                 dl_se->dl_yielded = 0;            423                 dl_se->dl_yielded = 0;
900         if (dl_se->dl_throttled)                  424         if (dl_se->dl_throttled)
901                 dl_se->dl_throttled = 0;          425                 dl_se->dl_throttled = 0;
902                                                << 
903         /*                                     << 
904          * If this is the replenishment of a d << 
905          * clear the flag and return.          << 
906          */                                    << 
907         if (dl_se->dl_defer_armed) {           << 
908                 dl_se->dl_defer_armed = 0;     << 
909                 return;                        << 
910         }                                      << 
911                                                << 
912         /*                                     << 
913          * A this point, if the deferred serve << 
914          * is in the future, if it is not runn << 
915          * and arm the defer timer.            << 
916          */                                    << 
917         if (dl_se->dl_defer && !dl_se->dl_defe << 
918             dl_time_before(rq_clock(dl_se->rq) << 
919                 if (!is_dl_boosted(dl_se) && d << 
920                                                << 
921                         /*                     << 
922                          * Set dl_se->dl_defer << 
923                          * inform the start_dl << 
924                          * activation.         << 
925                          */                    << 
926                         dl_se->dl_defer_armed  << 
927                         dl_se->dl_throttled =  << 
928                         if (!start_dl_timer(dl << 
929                                 /*             << 
930                                  * If for what << 
931                                  * queued but  << 
932                                  * deferrable  << 
933                                  */            << 
934                                 hrtimer_try_to << 
935                                 dl_se->dl_defe << 
936                                 dl_se->dl_thro << 
937                         }                      << 
938                 }                              << 
939         }                                      << 
940 }                                                 426 }
941                                                   427 
942 /*                                                428 /*
943  * Here we check if --at time t-- an entity (w    429  * Here we check if --at time t-- an entity (which is probably being
944  * [re]activated or, in general, enqueued) can    430  * [re]activated or, in general, enqueued) can use its remaining runtime
945  * and its current deadline _without_ exceedin    431  * and its current deadline _without_ exceeding the bandwidth it is
946  * assigned (function returns true if it can't    432  * assigned (function returns true if it can't). We are in fact applying
947  * one of the CBS rules: when a task wakes up,    433  * one of the CBS rules: when a task wakes up, if the residual runtime
948  * over residual deadline fits within the allo    434  * over residual deadline fits within the allocated bandwidth, then we
949  * can keep the current (absolute) deadline an    435  * can keep the current (absolute) deadline and residual budget without
950  * disrupting the schedulability of the system    436  * disrupting the schedulability of the system. Otherwise, we should
951  * refill the runtime and set the deadline a p    437  * refill the runtime and set the deadline a period in the future,
952  * because keeping the current (absolute) dead    438  * because keeping the current (absolute) deadline of the task would
953  * result in breaking guarantees promised to o    439  * result in breaking guarantees promised to other tasks (refer to
954  * Documentation/scheduler/sched-deadline.rst  !! 440  * Documentation/scheduler/sched-deadline.txt for more informations).
955  *                                                441  *
956  * This function returns true if:                 442  * This function returns true if:
957  *                                                443  *
958  *   runtime / (deadline - t) > dl_runtime / d    444  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
959  *                                                445  *
960  * IOW we can't recycle current parameters.       446  * IOW we can't recycle current parameters.
961  *                                                447  *
962  * Notice that the bandwidth check is done aga    448  * Notice that the bandwidth check is done against the deadline. For
963  * task with deadline equal to period this is     449  * task with deadline equal to period this is the same of using
964  * dl_period instead of dl_deadline in the equ    450  * dl_period instead of dl_deadline in the equation above.
965  */                                               451  */
966 static bool dl_entity_overflow(struct sched_dl !! 452 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
                                                   >> 453                                struct sched_dl_entity *pi_se, u64 t)
967 {                                                 454 {
968         u64 left, right;                          455         u64 left, right;
969                                                   456 
970         /*                                        457         /*
971          * left and right are the two sides of    458          * left and right are the two sides of the equation above,
972          * after a bit of shuffling to use mul    459          * after a bit of shuffling to use multiplications instead
973          * of divisions.                          460          * of divisions.
974          *                                        461          *
975          * Note that none of the time values i    462          * Note that none of the time values involved in the two
976          * multiplications are absolute: dl_de    463          * multiplications are absolute: dl_deadline and dl_runtime
977          * are the relative deadline and the m    464          * are the relative deadline and the maximum runtime of each
978          * instance, runtime is the runtime le    465          * instance, runtime is the runtime left for the last instance
979          * and (deadline - t), since t is rq->    466          * and (deadline - t), since t is rq->clock, is the time left
980          * to the (absolute) deadline. Even if    467          * to the (absolute) deadline. Even if overflowing the u64 type
981          * is very unlikely to occur in both c    468          * is very unlikely to occur in both cases, here we scale down
982          * as we want to avoid that risk at al    469          * as we want to avoid that risk at all. Scaling down by 10
983          * means that we reduce granularity to    470          * means that we reduce granularity to 1us. We are fine with it,
984          * since this is only a true/false che    471          * since this is only a true/false check and, anyway, thinking
985          * of anything below microseconds reso    472          * of anything below microseconds resolution is actually fiction
986          * (but still we want to give the user    473          * (but still we want to give the user that illusion >;).
987          */                                       474          */
988         left = (pi_of(dl_se)->dl_deadline >> D !! 475         left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
989         right = ((dl_se->deadline - t) >> DL_S    476         right = ((dl_se->deadline - t) >> DL_SCALE) *
990                 (pi_of(dl_se)->dl_runtime >> D !! 477                 (pi_se->dl_runtime >> DL_SCALE);
991                                                   478 
992         return dl_time_before(right, left);       479         return dl_time_before(right, left);
993 }                                                 480 }
994                                                   481 
995 /*                                                482 /*
996  * Revised wakeup rule [1]: For self-suspendin    483  * Revised wakeup rule [1]: For self-suspending tasks, rather then
997  * re-initializing task's runtime and deadline    484  * re-initializing task's runtime and deadline, the revised wakeup
998  * rule adjusts the task's runtime to avoid th    485  * rule adjusts the task's runtime to avoid the task to overrun its
999  * density.                                       486  * density.
1000  *                                               487  *
1001  * Reasoning: a task may overrun the density     488  * Reasoning: a task may overrun the density if:
1002  *    runtime / (deadline - t) > dl_runtime /    489  *    runtime / (deadline - t) > dl_runtime / dl_deadline
1003  *                                               490  *
1004  * Therefore, runtime can be adjusted to:        491  * Therefore, runtime can be adjusted to:
1005  *     runtime = (dl_runtime / dl_deadline) *    492  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
1006  *                                               493  *
1007  * In such way that runtime will be equal to     494  * In such way that runtime will be equal to the maximum density
1008  * the task can use without breaking any rule    495  * the task can use without breaking any rule.
1009  *                                               496  *
1010  * [1] Luca Abeni, Giuseppe Lipari, and Juri     497  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
1011  * bandwidth server revisited. SIGBED Rev. 11    498  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
1012  */                                              499  */
1013 static void                                      500 static void
1014 update_dl_revised_wakeup(struct sched_dl_enti    501 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
1015 {                                                502 {
1016         u64 laxity = dl_se->deadline - rq_clo    503         u64 laxity = dl_se->deadline - rq_clock(rq);
1017                                                  504 
1018         /*                                       505         /*
1019          * If the task has deadline < period,    506          * If the task has deadline < period, and the deadline is in the past,
1020          * it should already be throttled bef    507          * it should already be throttled before this check.
1021          *                                       508          *
1022          * See update_dl_entity() comments fo    509          * See update_dl_entity() comments for further details.
1023          */                                      510          */
1024         WARN_ON(dl_time_before(dl_se->deadlin    511         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1025                                                  512 
1026         dl_se->runtime = (dl_se->dl_density * !! 513         dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
1027 }                                                514 }
1028                                                  515 
1029 /*                                               516 /*
1030  * Regarding the deadline, a task with implic    517  * Regarding the deadline, a task with implicit deadline has a relative
1031  * deadline == relative period. A task with c    518  * deadline == relative period. A task with constrained deadline has a
1032  * relative deadline <= relative period.         519  * relative deadline <= relative period.
1033  *                                               520  *
1034  * We support constrained deadline tasks. How    521  * We support constrained deadline tasks. However, there are some restrictions
1035  * applied only for tasks which do not have a    522  * applied only for tasks which do not have an implicit deadline. See
1036  * update_dl_entity() to know more about such    523  * update_dl_entity() to know more about such restrictions.
1037  *                                               524  *
1038  * The dl_is_implicit() returns true if the t    525  * The dl_is_implicit() returns true if the task has an implicit deadline.
1039  */                                              526  */
1040 static inline bool dl_is_implicit(struct sche    527 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1041 {                                                528 {
1042         return dl_se->dl_deadline == dl_se->d    529         return dl_se->dl_deadline == dl_se->dl_period;
1043 }                                                530 }
1044                                                  531 
1045 /*                                               532 /*
1046  * When a deadline entity is placed in the ru    533  * When a deadline entity is placed in the runqueue, its runtime and deadline
1047  * might need to be updated. This is done by     534  * might need to be updated. This is done by a CBS wake up rule. There are two
1048  * different rules: 1) the original CBS; and     535  * different rules: 1) the original CBS; and 2) the Revisited CBS.
1049  *                                               536  *
1050  * When the task is starting a new period, th    537  * When the task is starting a new period, the Original CBS is used. In this
1051  * case, the runtime is replenished and a new    538  * case, the runtime is replenished and a new absolute deadline is set.
1052  *                                               539  *
1053  * When a task is queued before the begin of     540  * When a task is queued before the begin of the next period, using the
1054  * remaining runtime and deadline could make     541  * remaining runtime and deadline could make the entity to overflow, see
1055  * dl_entity_overflow() to find more about ru    542  * dl_entity_overflow() to find more about runtime overflow. When such case
1056  * is detected, the runtime and deadline need    543  * is detected, the runtime and deadline need to be updated.
1057  *                                               544  *
1058  * If the task has an implicit deadline, i.e.    545  * If the task has an implicit deadline, i.e., deadline == period, the Original
1059  * CBS is applied. The runtime is replenished !! 546  * CBS is applied. the runtime is replenished and a new absolute deadline is
1060  * set, as in the previous cases.                547  * set, as in the previous cases.
1061  *                                               548  *
1062  * However, the Original CBS does not work pr    549  * However, the Original CBS does not work properly for tasks with
1063  * deadline < period, which are said to have     550  * deadline < period, which are said to have a constrained deadline. By
1064  * applying the Original CBS, a constrained d    551  * applying the Original CBS, a constrained deadline task would be able to run
1065  * runtime/deadline in a period. With deadlin    552  * runtime/deadline in a period. With deadline < period, the task would
1066  * overrun the runtime/period allowed bandwid    553  * overrun the runtime/period allowed bandwidth, breaking the admission test.
1067  *                                               554  *
1068  * In order to prevent this misbehave, the Re    555  * In order to prevent this misbehave, the Revisited CBS is used for
1069  * constrained deadline tasks when a runtime     556  * constrained deadline tasks when a runtime overflow is detected. In the
1070  * Revisited CBS, rather than replenishing &     557  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1071  * the remaining runtime of the task is reduc    558  * the remaining runtime of the task is reduced to avoid runtime overflow.
1072  * Please refer to the comments update_dl_rev    559  * Please refer to the comments update_dl_revised_wakeup() function to find
1073  * more about the Revised CBS rule.              560  * more about the Revised CBS rule.
1074  */                                              561  */
1075 static void update_dl_entity(struct sched_dl_ !! 562 static void update_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 563                              struct sched_dl_entity *pi_se)
1076 {                                                564 {
1077         struct rq *rq = rq_of_dl_se(dl_se);   !! 565         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
                                                   >> 566         struct rq *rq = rq_of_dl_rq(dl_rq);
                                                   >> 567 
                                                   >> 568         /*
                                                   >> 569          * The arrival of a new instance needs special treatment, i.e.,
                                                   >> 570          * the actual scheduling parameters have to be "renewed".
                                                   >> 571          */
                                                   >> 572         if (dl_se->dl_new) {
                                                   >> 573                 setup_new_dl_entity(dl_se, pi_se);
                                                   >> 574                 return;
                                                   >> 575         }
1078                                                  576 
1079         if (dl_time_before(dl_se->deadline, r    577         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1080             dl_entity_overflow(dl_se, rq_cloc !! 578             dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
1081                                                  579 
1082                 if (unlikely(!dl_is_implicit(    580                 if (unlikely(!dl_is_implicit(dl_se) &&
1083                              !dl_time_before(    581                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1084                              !is_dl_boosted(d !! 582                              !dl_se->dl_boosted)){
1085                         update_dl_revised_wak    583                         update_dl_revised_wakeup(dl_se, rq);
1086                         return;                  584                         return;
1087                 }                                585                 }
1088                                                  586 
1089                 replenish_dl_new_period(dl_se !! 587                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1090         } else if (dl_server(dl_se) && dl_se- !! 588                 dl_se->runtime = pi_se->dl_runtime;
1091                 /*                            << 
1092                  * The server can still use i << 
1093                  * it left the dl_defer_runni << 
1094                  */                           << 
1095                 if (!dl_se->dl_defer_running) << 
1096                         dl_se->dl_defer_armed << 
1097                         dl_se->dl_throttled = << 
1098                 }                             << 
1099         }                                        589         }
1100 }                                                590 }
1101                                                  591 
1102 static inline u64 dl_next_period(struct sched    592 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1103 {                                                593 {
1104         return dl_se->deadline - dl_se->dl_de    594         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1105 }                                                595 }
1106                                                  596 
1107 /*                                               597 /*
1108  * If the entity depleted all its runtime, an    598  * If the entity depleted all its runtime, and if we want it to sleep
1109  * while waiting for some new execution time     599  * while waiting for some new execution time to become available, we
1110  * set the bandwidth replenishment timer to t    600  * set the bandwidth replenishment timer to the replenishment instant
1111  * and try to activate it.                       601  * and try to activate it.
1112  *                                               602  *
1113  * Notice that it is important for the caller    603  * Notice that it is important for the caller to know if the timer
1114  * actually started or not (i.e., the repleni    604  * actually started or not (i.e., the replenishment instant is in
1115  * the future or in the past).                   605  * the future or in the past).
1116  */                                              606  */
1117 static int start_dl_timer(struct sched_dl_ent !! 607 static int start_dl_timer(struct task_struct *p)
1118 {                                                608 {
                                                   >> 609         struct sched_dl_entity *dl_se = &p->dl;
1119         struct hrtimer *timer = &dl_se->dl_ti    610         struct hrtimer *timer = &dl_se->dl_timer;
1120         struct dl_rq *dl_rq = dl_rq_of_se(dl_ !! 611         struct rq *rq = task_rq(p);
1121         struct rq *rq = rq_of_dl_rq(dl_rq);   << 
1122         ktime_t now, act;                        612         ktime_t now, act;
1123         s64 delta;                               613         s64 delta;
1124                                                  614 
1125         lockdep_assert_rq_held(rq);           !! 615         lockdep_assert_held(&rq->lock);
1126                                                  616 
1127         /*                                       617         /*
1128          * We want the timer to fire at the d    618          * We want the timer to fire at the deadline, but considering
1129          * that it is actually coming from rq    619          * that it is actually coming from rq->clock and not from
1130          * hrtimer's time base reading.          620          * hrtimer's time base reading.
1131          *                                    !! 621          */
1132          * The deferred reservation will have !! 622         act = ns_to_ktime(dl_next_period(dl_se));
1133          * (deadline - runtime). At that poin << 
1134          * if the current deadline can be use << 
1135          * required to avoid add too much pre << 
1136          * (current u > U).                   << 
1137          */                                   << 
1138         if (dl_se->dl_defer_armed) {          << 
1139                 WARN_ON_ONCE(!dl_se->dl_throt << 
1140                 act = ns_to_ktime(dl_se->dead << 
1141         } else {                              << 
1142                 /* act = deadline - rel-deadl << 
1143                 act = ns_to_ktime(dl_next_per << 
1144         }                                     << 
1145                                               << 
1146         now = hrtimer_cb_get_time(timer);        623         now = hrtimer_cb_get_time(timer);
1147         delta = ktime_to_ns(now) - rq_clock(r    624         delta = ktime_to_ns(now) - rq_clock(rq);
1148         act = ktime_add_ns(act, delta);          625         act = ktime_add_ns(act, delta);
1149                                                  626 
1150         /*                                       627         /*
1151          * If the expiry time already passed,    628          * If the expiry time already passed, e.g., because the value
1152          * chosen as the deadline is too smal    629          * chosen as the deadline is too small, don't even try to
1153          * start the timer in the past!          630          * start the timer in the past!
1154          */                                      631          */
1155         if (ktime_us_delta(act, now) < 0)        632         if (ktime_us_delta(act, now) < 0)
1156                 return 0;                        633                 return 0;
1157                                                  634 
1158         /*                                       635         /*
1159          * !enqueued will guarantee another c    636          * !enqueued will guarantee another callback; even if one is already in
1160          * progress. This ensures a balanced     637          * progress. This ensures a balanced {get,put}_task_struct().
1161          *                                       638          *
1162          * The race against __run_timer() cle    639          * The race against __run_timer() clearing the enqueued state is
1163          * harmless because we're holding tas    640          * harmless because we're holding task_rq()->lock, therefore the timer
1164          * expiring after we've done the chec    641          * expiring after we've done the check will wait on its task_rq_lock()
1165          * and observe our state.                642          * and observe our state.
1166          */                                      643          */
1167         if (!hrtimer_is_queued(timer)) {         644         if (!hrtimer_is_queued(timer)) {
1168                 if (!dl_server(dl_se))        !! 645                 get_task_struct(p);
1169                         get_task_struct(dl_ta !! 646                 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
1170                 hrtimer_start(timer, act, HRT << 
1171         }                                        647         }
1172                                                  648 
1173         return 1;                                649         return 1;
1174 }                                                650 }
1175                                                  651 
1176 static void __push_dl_task(struct rq *rq, str << 
1177 {                                             << 
1178 #ifdef CONFIG_SMP                             << 
1179         /*                                    << 
1180          * Queueing this task back might have << 
1181          * to kick someone away.              << 
1182          */                                   << 
1183         if (has_pushable_dl_tasks(rq)) {      << 
1184                 /*                            << 
1185                  * Nothing relies on rq->lock << 
1186                  * rq->lock.                  << 
1187                  */                           << 
1188                 rq_unpin_lock(rq, rf);        << 
1189                 push_dl_task(rq);             << 
1190                 rq_repin_lock(rq, rf);        << 
1191         }                                     << 
1192 #endif                                        << 
1193 }                                             << 
1194                                               << 
1195 /* a defer timer will not be reset if the run << 
1196 static const u64 dl_server_min_res = 1 * NSEC << 
1197                                               << 
1198 static enum hrtimer_restart dl_server_timer(s << 
1199 {                                             << 
1200         struct rq *rq = rq_of_dl_se(dl_se);   << 
1201         u64 fw;                               << 
1202                                               << 
1203         scoped_guard (rq_lock, rq) {          << 
1204                 struct rq_flags *rf = &scope. << 
1205                                               << 
1206                 if (!dl_se->dl_throttled || ! << 
1207                         return HRTIMER_NOREST << 
1208                                               << 
1209                 sched_clock_tick();           << 
1210                 update_rq_clock(rq);          << 
1211                                               << 
1212                 if (!dl_se->dl_runtime)       << 
1213                         return HRTIMER_NOREST << 
1214                                               << 
1215                 if (!dl_se->server_has_tasks( << 
1216                         replenish_dl_entity(d << 
1217                         return HRTIMER_NOREST << 
1218                 }                             << 
1219                                               << 
1220                 if (dl_se->dl_defer_armed) {  << 
1221                         /*                    << 
1222                          * First check if the << 
1223                          * If so, it is possi << 
1224                          * of time. The dl_se << 
1225                          * forwarding the tim << 
1226                          */                   << 
1227                         if (dl_time_before(rq << 
1228                                            (d << 
1229                                               << 
1230                                 /* reset the  << 
1231                                 fw = dl_se->d << 
1232                                               << 
1233                                 hrtimer_forwa << 
1234                                 return HRTIME << 
1235                         }                     << 
1236                                               << 
1237                         dl_se->dl_defer_runni << 
1238                 }                             << 
1239                                               << 
1240                 enqueue_dl_entity(dl_se, ENQU << 
1241                                               << 
1242                 if (!dl_task(dl_se->rq->curr) << 
1243                         resched_curr(rq);     << 
1244                                               << 
1245                 __push_dl_task(rq, rf);       << 
1246         }                                     << 
1247                                               << 
1248         return HRTIMER_NORESTART;             << 
1249 }                                             << 
1250                                               << 
1251 /*                                               652 /*
1252  * This is the bandwidth enforcement timer ca    653  * This is the bandwidth enforcement timer callback. If here, we know
1253  * a task is not on its dl_rq, since the fact    654  * a task is not on its dl_rq, since the fact that the timer was running
1254  * means the task is throttled and needs a ru    655  * means the task is throttled and needs a runtime replenishment.
1255  *                                               656  *
1256  * However, what we actually do depends on th    657  * However, what we actually do depends on the fact the task is active,
1257  * (it is on its rq) or has been removed from    658  * (it is on its rq) or has been removed from there by a call to
1258  * dequeue_task_dl(). In the former case we m    659  * dequeue_task_dl(). In the former case we must issue the runtime
1259  * replenishment and add the task back to the    660  * replenishment and add the task back to the dl_rq; in the latter, we just
1260  * do nothing but clearing dl_throttled, so t    661  * do nothing but clearing dl_throttled, so that runtime and deadline
1261  * updating (and the queueing back to dl_rq)     662  * updating (and the queueing back to dl_rq) will be done by the
1262  * next call to enqueue_task_dl().               663  * next call to enqueue_task_dl().
1263  */                                              664  */
1264 static enum hrtimer_restart dl_task_timer(str    665 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1265 {                                                666 {
1266         struct sched_dl_entity *dl_se = conta    667         struct sched_dl_entity *dl_se = container_of(timer,
1267                                                  668                                                      struct sched_dl_entity,
1268                                                  669                                                      dl_timer);
1269         struct task_struct *p;                !! 670         struct task_struct *p = dl_task_of(dl_se);
1270         struct rq_flags rf;                   !! 671         unsigned long flags;
1271         struct rq *rq;                           672         struct rq *rq;
1272                                                  673 
1273         if (dl_server(dl_se))                 !! 674         rq = task_rq_lock(p, &flags);
1274                 return dl_server_timer(timer, << 
1275                                               << 
1276         p = dl_task_of(dl_se);                << 
1277         rq = task_rq_lock(p, &rf);            << 
1278                                                  675 
1279         /*                                       676         /*
1280          * The task might have changed its sc    677          * The task might have changed its scheduling policy to something
1281          * different than SCHED_DEADLINE (thr !! 678          * different than SCHED_DEADLINE (through switched_fromd_dl()).
1282          */                                      679          */
1283         if (!dl_task(p))                      !! 680         if (!dl_task(p)) {
                                                   >> 681                 __dl_clear_params(p);
                                                   >> 682                 goto unlock;
                                                   >> 683         }
                                                   >> 684 
                                                   >> 685         /*
                                                   >> 686          * This is possible if switched_from_dl() raced against a running
                                                   >> 687          * callback that took the above !dl_task() path and we've since then
                                                   >> 688          * switched back into SCHED_DEADLINE.
                                                   >> 689          *
                                                   >> 690          * There's nothing to do except drop our task reference.
                                                   >> 691          */
                                                   >> 692         if (dl_se->dl_new)
1284                 goto unlock;                     693                 goto unlock;
1285                                                  694 
1286         /*                                       695         /*
1287          * The task might have been boosted b    696          * The task might have been boosted by someone else and might be in the
1288          * boosting/deboosting path, its not     697          * boosting/deboosting path, its not throttled.
1289          */                                      698          */
1290         if (is_dl_boosted(dl_se))             !! 699         if (dl_se->dl_boosted)
1291                 goto unlock;                     700                 goto unlock;
1292                                                  701 
1293         /*                                       702         /*
1294          * Spurious timer due to start_dl_tim    703          * Spurious timer due to start_dl_timer() race; or we already received
1295          * a replenishment from rt_mutex_setp    704          * a replenishment from rt_mutex_setprio().
1296          */                                      705          */
1297         if (!dl_se->dl_throttled)                706         if (!dl_se->dl_throttled)
1298                 goto unlock;                     707                 goto unlock;
1299                                                  708 
1300         sched_clock_tick();                      709         sched_clock_tick();
1301         update_rq_clock(rq);                     710         update_rq_clock(rq);
1302                                                  711 
1303         /*                                       712         /*
1304          * If the throttle happened during sc    713          * If the throttle happened during sched-out; like:
1305          *                                       714          *
1306          *   schedule()                          715          *   schedule()
1307          *     deactivate_task()                 716          *     deactivate_task()
1308          *       dequeue_task_dl()               717          *       dequeue_task_dl()
1309          *         update_curr_dl()              718          *         update_curr_dl()
1310          *           start_dl_timer()            719          *           start_dl_timer()
1311          *         __dequeue_task_dl()           720          *         __dequeue_task_dl()
1312          *     prev->on_rq = 0;                  721          *     prev->on_rq = 0;
1313          *                                       722          *
1314          * We can be both throttled and !queu    723          * We can be both throttled and !queued. Replenish the counter
1315          * but do not enqueue -- wait for our    724          * but do not enqueue -- wait for our wakeup to do that.
1316          */                                      725          */
1317         if (!task_on_rq_queued(p)) {             726         if (!task_on_rq_queued(p)) {
1318                 replenish_dl_entity(dl_se);   !! 727                 replenish_dl_entity(dl_se, dl_se);
1319                 goto unlock;                     728                 goto unlock;
1320         }                                        729         }
1321                                                  730 
                                                   >> 731         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
                                                   >> 732         if (dl_task(rq->curr))
                                                   >> 733                 check_preempt_curr_dl(rq, p, 0);
                                                   >> 734         else
                                                   >> 735                 resched_curr(rq);
                                                   >> 736 
1322 #ifdef CONFIG_SMP                                737 #ifdef CONFIG_SMP
1323         if (unlikely(!rq->online)) {          !! 738         /*
1324                 /*                            !! 739          * Perform balancing operations here; after the replenishments.  We
1325                  * If the runqueue is no long !! 740          * cannot drop rq->lock before this, otherwise the assertion in
1326                  * task elsewhere. This neces !! 741          * start_dl_timer() about not missing updates is not true.
1327                  */                           !! 742          *
1328                 lockdep_unpin_lock(__rq_lockp !! 743          * If we find that the rq the task was on is no longer available, we
                                                   >> 744          * need to select a new rq.
                                                   >> 745          *
                                                   >> 746          * XXX figure out if select_task_rq_dl() deals with offline cpus.
                                                   >> 747          */
                                                   >> 748         if (unlikely(!rq->online))
1329                 rq = dl_task_offline_migratio    749                 rq = dl_task_offline_migration(rq, p);
1330                 rf.cookie = lockdep_pin_lock( << 
1331                 update_rq_clock(rq);          << 
1332                                                  750 
                                                   >> 751         /*
                                                   >> 752          * Queueing this task back might have overloaded rq, check if we need
                                                   >> 753          * to kick someone away.
                                                   >> 754          */
                                                   >> 755         if (has_pushable_dl_tasks(rq)) {
1333                 /*                               756                 /*
1334                  * Now that the task has been !! 757                  * Nothing relies on rq->lock after this, so its safe to drop
1335                  * have that locked, proceed  !! 758                  * rq->lock.
1336                  * there.                     << 
1337                  */                              759                  */
                                                   >> 760                 lockdep_unpin_lock(&rq->lock);
                                                   >> 761                 push_dl_task(rq);
                                                   >> 762                 lockdep_pin_lock(&rq->lock);
1338         }                                        763         }
1339 #endif                                           764 #endif
1340                                                  765 
1341         enqueue_task_dl(rq, p, ENQUEUE_REPLEN << 
1342         if (dl_task(rq->curr))                << 
1343                 wakeup_preempt_dl(rq, p, 0);  << 
1344         else                                  << 
1345                 resched_curr(rq);             << 
1346                                               << 
1347         __push_dl_task(rq, &rf);              << 
1348                                               << 
1349 unlock:                                          766 unlock:
1350         task_rq_unlock(rq, p, &rf);           !! 767         task_rq_unlock(rq, p, &flags);
1351                                                  768 
1352         /*                                       769         /*
1353          * This can free the task_struct, inc    770          * This can free the task_struct, including this hrtimer, do not touch
1354          * anything related to that after thi    771          * anything related to that after this.
1355          */                                      772          */
1356         put_task_struct(p);                      773         put_task_struct(p);
1357                                                  774 
1358         return HRTIMER_NORESTART;                775         return HRTIMER_NORESTART;
1359 }                                                776 }
1360                                                  777 
1361 static void init_dl_task_timer(struct sched_d !! 778 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1362 {                                                779 {
1363         struct hrtimer *timer = &dl_se->dl_ti    780         struct hrtimer *timer = &dl_se->dl_timer;
1364                                                  781 
1365         hrtimer_init(timer, CLOCK_MONOTONIC,  !! 782         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1366         timer->function = dl_task_timer;         783         timer->function = dl_task_timer;
1367 }                                                784 }
1368                                                  785 
1369 /*                                               786 /*
1370  * During the activation, CBS checks if it ca    787  * During the activation, CBS checks if it can reuse the current task's
1371  * runtime and period. If the deadline of the    788  * runtime and period. If the deadline of the task is in the past, CBS
1372  * cannot use the runtime, and so it replenis    789  * cannot use the runtime, and so it replenishes the task. This rule
1373  * works fine for implicit deadline tasks (de    790  * works fine for implicit deadline tasks (deadline == period), and the
1374  * CBS was designed for implicit deadline tas    791  * CBS was designed for implicit deadline tasks. However, a task with
1375  * constrained deadline (deadline < period) m !! 792  * constrained deadline (deadine < period) might be awakened after the
1376  * deadline, but before the next period. In t    793  * deadline, but before the next period. In this case, replenishing the
1377  * task would allow it to run for runtime / d    794  * task would allow it to run for runtime / deadline. As in this case
1378  * deadline < period, CBS enables a task to r    795  * deadline < period, CBS enables a task to run for more than the
1379  * runtime / period. In a very loaded system,    796  * runtime / period. In a very loaded system, this can cause a domino
1380  * effect, making other tasks miss their dead    797  * effect, making other tasks miss their deadlines.
1381  *                                               798  *
1382  * To avoid this problem, in the activation o    799  * To avoid this problem, in the activation of a constrained deadline
1383  * task after the deadline but before the nex    800  * task after the deadline but before the next period, throttle the
1384  * task and set the replenishing timer to the    801  * task and set the replenishing timer to the begin of the next period,
1385  * unless it is boosted.                         802  * unless it is boosted.
1386  */                                              803  */
1387 static inline void dl_check_constrained_dl(st    804 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1388 {                                                805 {
1389         struct rq *rq = rq_of_dl_se(dl_se);   !! 806         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 807         struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1390                                                  808 
1391         if (dl_time_before(dl_se->deadline, r    809         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1392             dl_time_before(rq_clock(rq), dl_n    810             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1393                 if (unlikely(is_dl_boosted(dl !! 811                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1394                         return;                  812                         return;
1395                 dl_se->dl_throttled = 1;         813                 dl_se->dl_throttled = 1;
1396                 if (dl_se->runtime > 0)          814                 if (dl_se->runtime > 0)
1397                         dl_se->runtime = 0;      815                         dl_se->runtime = 0;
1398         }                                        816         }
1399 }                                                817 }
1400                                                  818 
1401 static                                           819 static
1402 int dl_runtime_exceeded(struct sched_dl_entit    820 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1403 {                                                821 {
1404         return (dl_se->runtime <= 0);            822         return (dl_se->runtime <= 0);
1405 }                                                823 }
1406                                                  824 
                                                   >> 825 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
                                                   >> 826 
1407 /*                                               827 /*
1408  * This function implements the GRUB accounti !! 828  * Update the current task's runtime statistics (provided it is still
1409  * GRUB reclaiming algorithm, the runtime is  !! 829  * a -deadline task and has not been removed from the dl_rq).
1410  * but as "dq = -(max{u, (Umax - Uinact - Uex << 
1411  * where u is the utilization of the task, Um << 
1412  * utilization, Uinact is the (per-runqueue)  << 
1413  * as the difference between the "total runqu << 
1414  * "runqueue active utilization", and Uextra  << 
1415  * reclaimable utilization.                   << 
1416  * Since rq->dl.running_bw and rq->dl.this_bw << 
1417  * by 2^BW_SHIFT, the result has to be shifte << 
1418  * Since rq->dl.bw_ratio contains 1 / Umax mu << 
1419  * is multiplied by rq->dl.bw_ratio and shift << 
1420  * Since delta is a 64 bit variable, to have  << 
1421  * larger than 2^(64 - 20 - 8), which is more << 
1422  * not an issue here.                         << 
1423  */                                              830  */
1424 static u64 grub_reclaim(u64 delta, struct rq  !! 831 static void update_curr_dl(struct rq *rq)
1425 {                                             << 
1426         u64 u_act;                            << 
1427         u64 u_inact = rq->dl.this_bw - rq->dl << 
1428                                               << 
1429         /*                                    << 
1430          * Instead of computing max{u, (u_max << 
1431          * compare u_inact + u_extra with u_m << 
1432          * can be larger than u_max. So, u_ma << 
1433          * negative leading to wrong results. << 
1434          */                                   << 
1435         if (u_inact + rq->dl.extra_bw > rq->d << 
1436                 u_act = dl_se->dl_bw;         << 
1437         else                                  << 
1438                 u_act = rq->dl.max_bw - u_ina << 
1439                                               << 
1440         u_act = (u_act * rq->dl.bw_ratio) >>  << 
1441         return (delta * u_act) >> BW_SHIFT;   << 
1442 }                                             << 
1443                                               << 
1444 s64 dl_scaled_delta_exec(struct rq *rq, struc << 
1445 {                                             << 
1446         s64 scaled_delta_exec;                << 
1447                                               << 
1448         /*                                    << 
1449          * For tasks that participate in GRUB << 
1450          * spare reclaimed bandwidth is used  << 
1451          *                                    << 
1452          * For the others, we still need to s << 
1453          * according to current frequency and << 
1454          */                                   << 
1455         if (unlikely(dl_se->flags & SCHED_FLA << 
1456                 scaled_delta_exec = grub_recl << 
1457         } else {                              << 
1458                 int cpu = cpu_of(rq);         << 
1459                 unsigned long scale_freq = ar << 
1460                 unsigned long scale_cpu = arc << 
1461                                               << 
1462                 scaled_delta_exec = cap_scale << 
1463                 scaled_delta_exec = cap_scale << 
1464         }                                     << 
1465                                               << 
1466         return scaled_delta_exec;             << 
1467 }                                             << 
1468                                               << 
1469 static inline void                            << 
1470 update_stats_dequeue_dl(struct dl_rq *dl_rq,  << 
1471                         int flags);           << 
1472 static void update_curr_dl_se(struct rq *rq,  << 
1473 {                                                832 {
1474         s64 scaled_delta_exec;                !! 833         struct task_struct *curr = rq->curr;
1475                                               !! 834         struct sched_dl_entity *dl_se = &curr->dl;
1476         if (unlikely(delta_exec <= 0)) {      !! 835         u64 delta_exec;
1477                 if (unlikely(dl_se->dl_yielde << 
1478                         goto throttle;        << 
1479                 return;                       << 
1480         }                                     << 
1481                                               << 
1482         if (dl_server(dl_se) && dl_se->dl_thr << 
1483                 return;                       << 
1484                                                  836 
1485         if (dl_entity_is_special(dl_se))      !! 837         if (!dl_task(curr) || !on_dl_rq(dl_se))
1486                 return;                          838                 return;
1487                                                  839 
1488         scaled_delta_exec = dl_scaled_delta_e << 
1489                                               << 
1490         dl_se->runtime -= scaled_delta_exec;  << 
1491                                               << 
1492         /*                                       840         /*
1493          * The fair server can consume its ru !! 841          * Consumed budget is computed considering the time as
1494          * running as regular CFS).           !! 842          * observed by schedulable tasks (excluding time spent
1495          *                                    !! 843          * in hardirq context, etc.). Deadlines are instead
1496          * If the server consumes its entire  !! 844          * computed using hard walltime. This seems to be the more
1497          * is not required for the current pe !! 845          * natural solution, but the full ramifications of this
1498          * starting a new period, pushing the !! 846          * approach need further study.
1499          */                                      847          */
1500         if (dl_se->dl_defer && dl_se->dl_thro !! 848         delta_exec = rq_clock_task(rq) - curr->se.exec_start;
1501                 /*                            !! 849         if (unlikely((s64)delta_exec <= 0))
1502                  * If the server was previous !! 850                 return;
1503                  * took place, it this point  << 
1504                  * was able to get runtime in << 
1505                  * state.                     << 
1506                  */                           << 
1507                 dl_se->dl_defer_running = 0;  << 
1508                                                  851 
1509                 hrtimer_try_to_cancel(&dl_se- !! 852         schedstat_set(curr->se.statistics.exec_max,
                                                   >> 853                       max(curr->se.statistics.exec_max, delta_exec));
1510                                                  854 
1511                 replenish_dl_new_period(dl_se !! 855         curr->se.sum_exec_runtime += delta_exec;
                                                   >> 856         account_group_exec_runtime(curr, delta_exec);
1512                                                  857 
1513                 /*                            !! 858         curr->se.exec_start = rq_clock_task(rq);
1514                  * Not being able to start th !! 859         cpuacct_charge(curr, delta_exec);
1515                  * be started for whatever re << 
1516                  * and queue right away. Othe << 
1517                  * to what enqueue_dl_entity( << 
1518                  */                           << 
1519                 WARN_ON_ONCE(!start_dl_timer( << 
1520                                                  860 
1521                 return;                       !! 861         sched_rt_avg_update(rq, delta_exec);
1522         }                                     << 
1523                                                  862 
1524 throttle:                                     !! 863         dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
1525         if (dl_runtime_exceeded(dl_se) || dl_ !! 864         if (dl_runtime_exceeded(dl_se)) {
1526                 dl_se->dl_throttled = 1;         865                 dl_se->dl_throttled = 1;
                                                   >> 866                 __dequeue_task_dl(rq, curr, 0);
                                                   >> 867                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
                                                   >> 868                         enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1527                                                  869 
1528                 /* If requested, inform the u !! 870                 if (!is_leftmost(curr, &rq->dl))
1529                 if (dl_runtime_exceeded(dl_se << 
1530                     (dl_se->flags & SCHED_FLA << 
1531                         dl_se->dl_overrun = 1 << 
1532                                               << 
1533                 dequeue_dl_entity(dl_se, 0);  << 
1534                 if (!dl_server(dl_se)) {      << 
1535                         update_stats_dequeue_ << 
1536                         dequeue_pushable_dl_t << 
1537                 }                             << 
1538                                               << 
1539                 if (unlikely(is_dl_boosted(dl << 
1540                         if (dl_server(dl_se)) << 
1541                                 enqueue_dl_en << 
1542                         else                  << 
1543                                 enqueue_task_ << 
1544                 }                             << 
1545                                               << 
1546                 if (!is_leftmost(dl_se, &rq-> << 
1547                         resched_curr(rq);        871                         resched_curr(rq);
1548         }                                        872         }
1549                                                  873 
1550         /*                                       874         /*
1551          * The fair server (sole dl_server) d << 
1552          * workload because it is running fai << 
1553          */                                   << 
1554         if (dl_se == &rq->fair_server)        << 
1555                 return;                       << 
1556                                               << 
1557 #ifdef CONFIG_RT_GROUP_SCHED                  << 
1558         /*                                    << 
1559          * Because -- for now -- we share the    875          * Because -- for now -- we share the rt bandwidth, we need to
1560          * account our runtime there too, oth    876          * account our runtime there too, otherwise actual rt tasks
1561          * would be able to exceed the shared    877          * would be able to exceed the shared quota.
1562          *                                       878          *
1563          * Account to the root rt group for n    879          * Account to the root rt group for now.
1564          *                                       880          *
1565          * The solution we're working towards    881          * The solution we're working towards is having the RT groups scheduled
1566          * using deadline servers -- however     882          * using deadline servers -- however there's a few nasties to figure
1567          * out before that can happen.           883          * out before that can happen.
1568          */                                      884          */
1569         if (rt_bandwidth_enabled()) {            885         if (rt_bandwidth_enabled()) {
1570                 struct rt_rq *rt_rq = &rq->rt    886                 struct rt_rq *rt_rq = &rq->rt;
1571                                                  887 
1572                 raw_spin_lock(&rt_rq->rt_runt    888                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1573                 /*                               889                 /*
1574                  * We'll let actual RT tasks     890                  * We'll let actual RT tasks worry about the overflow here, we
1575                  * have our own CBS to keep u    891                  * have our own CBS to keep us inline; only account when RT
1576                  * bandwidth is relevant.        892                  * bandwidth is relevant.
1577                  */                              893                  */
1578                 if (sched_rt_bandwidth_accoun    894                 if (sched_rt_bandwidth_account(rt_rq))
1579                         rt_rq->rt_time += del    895                         rt_rq->rt_time += delta_exec;
1580                 raw_spin_unlock(&rt_rq->rt_ru    896                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1581         }                                        897         }
1582 #endif                                        << 
1583 }                                             << 
1584                                               << 
1585 /*                                            << 
1586  * In the non-defer mode, the idle time is no << 
1587  * server provides a guarantee.               << 
1588  *                                            << 
1589  * If the dl_server is in defer mode, the idl << 
1590  * as time available for the fair server, avo << 
1591  * rt scheduler that did not consumed that ti << 
1592  */                                           << 
1593 void dl_server_update_idle_time(struct rq *rq << 
1594 {                                             << 
1595         s64 delta_exec, scaled_delta_exec;    << 
1596                                               << 
1597         if (!rq->fair_server.dl_defer)        << 
1598                 return;                       << 
1599                                               << 
1600         /* no need to discount more */        << 
1601         if (rq->fair_server.runtime < 0)      << 
1602                 return;                       << 
1603                                               << 
1604         delta_exec = rq_clock_task(rq) - p->s << 
1605         if (delta_exec < 0)                   << 
1606                 return;                       << 
1607                                               << 
1608         scaled_delta_exec = dl_scaled_delta_e << 
1609                                               << 
1610         rq->fair_server.runtime -= scaled_del << 
1611                                               << 
1612         if (rq->fair_server.runtime < 0) {    << 
1613                 rq->fair_server.dl_defer_runn << 
1614                 rq->fair_server.runtime = 0;  << 
1615         }                                     << 
1616                                               << 
1617         p->se.exec_start = rq_clock_task(rq); << 
1618 }                                                898 }
1619                                                  899 
1620 void dl_server_update(struct sched_dl_entity  !! 900 #ifdef CONFIG_SMP
1621 {                                             << 
1622         /* 0 runtime = fair server disabled * << 
1623         if (dl_se->dl_runtime)                << 
1624                 update_curr_dl_se(dl_se->rq,  << 
1625 }                                             << 
1626                                               << 
1627 void dl_server_start(struct sched_dl_entity * << 
1628 {                                             << 
1629         struct rq *rq = dl_se->rq;            << 
1630                                               << 
1631         /*                                    << 
1632          * XXX: the apply do not work fine at << 
1633          * fair server because things are not << 
1634          * this before getting generic.       << 
1635          */                                   << 
1636         if (!dl_server(dl_se)) {              << 
1637                 u64 runtime =  50 * NSEC_PER_ << 
1638                 u64 period = 1000 * NSEC_PER_ << 
1639                                               << 
1640                 dl_server_apply_params(dl_se, << 
1641                                               << 
1642                 dl_se->dl_server = 1;         << 
1643                 dl_se->dl_defer = 1;          << 
1644                 setup_new_dl_entity(dl_se);   << 
1645         }                                     << 
1646                                               << 
1647         if (!dl_se->dl_runtime)               << 
1648                 return;                       << 
1649                                               << 
1650         enqueue_dl_entity(dl_se, ENQUEUE_WAKE << 
1651         if (!dl_task(dl_se->rq->curr) || dl_e << 
1652                 resched_curr(dl_se->rq);      << 
1653 }                                             << 
1654                                               << 
1655 void dl_server_stop(struct sched_dl_entity *d << 
1656 {                                             << 
1657         if (!dl_se->dl_runtime)               << 
1658                 return;                       << 
1659                                               << 
1660         dequeue_dl_entity(dl_se, DEQUEUE_SLEE << 
1661         hrtimer_try_to_cancel(&dl_se->dl_time << 
1662         dl_se->dl_defer_armed = 0;            << 
1663         dl_se->dl_throttled = 0;              << 
1664 }                                             << 
1665                                               << 
1666 void dl_server_init(struct sched_dl_entity *d << 
1667                     dl_server_has_tasks_f has << 
1668                     dl_server_pick_f pick_tas << 
1669 {                                             << 
1670         dl_se->rq = rq;                       << 
1671         dl_se->server_has_tasks = has_tasks;  << 
1672         dl_se->server_pick_task = pick_task;  << 
1673 }                                             << 
1674                                               << 
1675 void __dl_server_attach_root(struct sched_dl_ << 
1676 {                                             << 
1677         u64 new_bw = dl_se->dl_bw;            << 
1678         int cpu = cpu_of(rq);                 << 
1679         struct dl_bw *dl_b;                   << 
1680                                               << 
1681         dl_b = dl_bw_of(cpu_of(rq));          << 
1682         guard(raw_spinlock)(&dl_b->lock);     << 
1683                                               << 
1684         if (!dl_bw_cpus(cpu))                 << 
1685                 return;                       << 
1686                                               << 
1687         __dl_add(dl_b, new_bw, dl_bw_cpus(cpu << 
1688 }                                             << 
1689                                               << 
1690 int dl_server_apply_params(struct sched_dl_en << 
1691 {                                             << 
1692         u64 old_bw = init ? 0 : to_ratio(dl_s << 
1693         u64 new_bw = to_ratio(period, runtime << 
1694         struct rq *rq = dl_se->rq;            << 
1695         int cpu = cpu_of(rq);                 << 
1696         struct dl_bw *dl_b;                   << 
1697         unsigned long cap;                    << 
1698         int retval = 0;                       << 
1699         int cpus;                             << 
1700                                               << 
1701         dl_b = dl_bw_of(cpu);                 << 
1702         guard(raw_spinlock)(&dl_b->lock);     << 
1703                                               << 
1704         cpus = dl_bw_cpus(cpu);               << 
1705         cap = dl_bw_capacity(cpu);            << 
1706                                               << 
1707         if (__dl_overflow(dl_b, cap, old_bw,  << 
1708                 return -EBUSY;                << 
1709                                               << 
1710         if (init) {                           << 
1711                 __add_rq_bw(new_bw, &rq->dl); << 
1712                 __dl_add(dl_b, new_bw, cpus); << 
1713         } else {                              << 
1714                 __dl_sub(dl_b, dl_se->dl_bw,  << 
1715                 __dl_add(dl_b, new_bw, cpus); << 
1716                                               << 
1717                 dl_rq_change_utilization(rq,  << 
1718         }                                     << 
1719                                               << 
1720         dl_se->dl_runtime = runtime;          << 
1721         dl_se->dl_deadline = period;          << 
1722         dl_se->dl_period = period;            << 
1723                                               << 
1724         dl_se->runtime = 0;                   << 
1725         dl_se->deadline = 0;                  << 
1726                                               << 
1727         dl_se->dl_bw = to_ratio(dl_se->dl_per << 
1728         dl_se->dl_density = to_ratio(dl_se->d << 
1729                                               << 
1730         return retval;                        << 
1731 }                                             << 
1732                                               << 
1733 /*                                            << 
1734  * Update the current task's runtime statisti << 
1735  * a -deadline task and has not been removed  << 
1736  */                                           << 
1737 static void update_curr_dl(struct rq *rq)     << 
1738 {                                             << 
1739         struct task_struct *curr = rq->curr;  << 
1740         struct sched_dl_entity *dl_se = &curr << 
1741         s64 delta_exec;                       << 
1742                                               << 
1743         if (!dl_task(curr) || !on_dl_rq(dl_se << 
1744                 return;                       << 
1745                                               << 
1746         /*                                    << 
1747          * Consumed budget is computed consid << 
1748          * observed by schedulable tasks (exc << 
1749          * in hardirq context, etc.). Deadlin << 
1750          * computed using hard walltime. This << 
1751          * natural solution, but the full ram << 
1752          * approach need further study.       << 
1753          */                                   << 
1754         delta_exec = update_curr_common(rq);  << 
1755         update_curr_dl_se(rq, dl_se, delta_ex << 
1756 }                                             << 
1757                                               << 
1758 static enum hrtimer_restart inactive_task_tim << 
1759 {                                             << 
1760         struct sched_dl_entity *dl_se = conta << 
1761                                               << 
1762                                               << 
1763         struct task_struct *p = NULL;         << 
1764         struct rq_flags rf;                   << 
1765         struct rq *rq;                        << 
1766                                               << 
1767         if (!dl_server(dl_se)) {              << 
1768                 p = dl_task_of(dl_se);        << 
1769                 rq = task_rq_lock(p, &rf);    << 
1770         } else {                              << 
1771                 rq = dl_se->rq;               << 
1772                 rq_lock(rq, &rf);             << 
1773         }                                     << 
1774                                               << 
1775         sched_clock_tick();                   << 
1776         update_rq_clock(rq);                  << 
1777                                               << 
1778         if (dl_server(dl_se))                 << 
1779                 goto no_task;                 << 
1780                                               << 
1781         if (!dl_task(p) || READ_ONCE(p->__sta << 
1782                 struct dl_bw *dl_b = dl_bw_of << 
1783                                               << 
1784                 if (READ_ONCE(p->__state) ==  << 
1785                         sub_running_bw(&p->dl << 
1786                         sub_rq_bw(&p->dl, dl_ << 
1787                         dl_se->dl_non_contend << 
1788                 }                             << 
1789                                               << 
1790                 raw_spin_lock(&dl_b->lock);   << 
1791                 __dl_sub(dl_b, p->dl.dl_bw, d << 
1792                 raw_spin_unlock(&dl_b->lock); << 
1793                 __dl_clear_params(dl_se);     << 
1794                                               << 
1795                 goto unlock;                  << 
1796         }                                     << 
1797                                               << 
1798 no_task:                                      << 
1799         if (dl_se->dl_non_contending == 0)    << 
1800                 goto unlock;                  << 
1801                                               << 
1802         sub_running_bw(dl_se, &rq->dl);       << 
1803         dl_se->dl_non_contending = 0;         << 
1804 unlock:                                       << 
1805                                               << 
1806         if (!dl_server(dl_se)) {              << 
1807                 task_rq_unlock(rq, p, &rf);   << 
1808                 put_task_struct(p);           << 
1809         } else {                              << 
1810                 rq_unlock(rq, &rf);           << 
1811         }                                     << 
1812                                                  901 
1813         return HRTIMER_NORESTART;             !! 902 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
1814 }                                             << 
1815                                                  903 
1816 static void init_dl_inactive_task_timer(struc !! 904 static inline u64 next_deadline(struct rq *rq)
1817 {                                                905 {
1818         struct hrtimer *timer = &dl_se->inact !! 906         struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
1819                                                  907 
1820         hrtimer_init(timer, CLOCK_MONOTONIC,  !! 908         if (next && dl_prio(next->prio))
1821         timer->function = inactive_task_timer !! 909                 return next->dl.deadline;
                                                   >> 910         else
                                                   >> 911                 return 0;
1822 }                                                912 }
1823                                                  913 
1824 #define __node_2_dle(node) \                  << 
1825         rb_entry((node), struct sched_dl_enti << 
1826                                               << 
1827 #ifdef CONFIG_SMP                             << 
1828                                               << 
1829 static void inc_dl_deadline(struct dl_rq *dl_    914 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1830 {                                                915 {
1831         struct rq *rq = rq_of_dl_rq(dl_rq);      916         struct rq *rq = rq_of_dl_rq(dl_rq);
1832                                                  917 
1833         if (dl_rq->earliest_dl.curr == 0 ||      918         if (dl_rq->earliest_dl.curr == 0 ||
1834             dl_time_before(deadline, dl_rq->e    919             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1835                 if (dl_rq->earliest_dl.curr = !! 920                 /*
1836                         cpupri_set(&rq->rd->c !! 921                  * If the dl_rq had no -deadline tasks, or if the new task
                                                   >> 922                  * has shorter deadline than the current one on dl_rq, we
                                                   >> 923                  * know that the previous earliest becomes our next earliest,
                                                   >> 924                  * as the new task becomes the earliest itself.
                                                   >> 925                  */
                                                   >> 926                 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
1837                 dl_rq->earliest_dl.curr = dea    927                 dl_rq->earliest_dl.curr = deadline;
1838                 cpudl_set(&rq->rd->cpudl, rq- !! 928                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
                                                   >> 929         } else if (dl_rq->earliest_dl.next == 0 ||
                                                   >> 930                    dl_time_before(deadline, dl_rq->earliest_dl.next)) {
                                                   >> 931                 /*
                                                   >> 932                  * On the other hand, if the new -deadline task has a
                                                   >> 933                  * a later deadline than the earliest one on dl_rq, but
                                                   >> 934                  * it is earlier than the next (if any), we must
                                                   >> 935                  * recompute the next-earliest.
                                                   >> 936                  */
                                                   >> 937                 dl_rq->earliest_dl.next = next_deadline(rq);
1839         }                                        938         }
1840 }                                                939 }
1841                                                  940 
1842 static void dec_dl_deadline(struct dl_rq *dl_    941 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1843 {                                                942 {
1844         struct rq *rq = rq_of_dl_rq(dl_rq);      943         struct rq *rq = rq_of_dl_rq(dl_rq);
1845                                                  944 
1846         /*                                       945         /*
1847          * Since we may have removed our earl    946          * Since we may have removed our earliest (and/or next earliest)
1848          * task we must recompute them.          947          * task we must recompute them.
1849          */                                      948          */
1850         if (!dl_rq->dl_nr_running) {             949         if (!dl_rq->dl_nr_running) {
1851                 dl_rq->earliest_dl.curr = 0;     950                 dl_rq->earliest_dl.curr = 0;
1852                 dl_rq->earliest_dl.next = 0;     951                 dl_rq->earliest_dl.next = 0;
1853                 cpudl_clear(&rq->rd->cpudl, r !! 952                 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1854                 cpupri_set(&rq->rd->cpupri, r << 
1855         } else {                                 953         } else {
1856                 struct rb_node *leftmost = rb !! 954                 struct rb_node *leftmost = dl_rq->rb_leftmost;
1857                 struct sched_dl_entity *entry !! 955                 struct sched_dl_entity *entry;
1858                                                  956 
                                                   >> 957                 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1859                 dl_rq->earliest_dl.curr = ent    958                 dl_rq->earliest_dl.curr = entry->deadline;
1860                 cpudl_set(&rq->rd->cpudl, rq- !! 959                 dl_rq->earliest_dl.next = next_deadline(rq);
                                                   >> 960                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1861         }                                        961         }
1862 }                                                962 }
1863                                                  963 
1864 #else                                            964 #else
1865                                                  965 
1866 static inline void inc_dl_deadline(struct dl_    966 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1867 static inline void dec_dl_deadline(struct dl_    967 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1868                                                  968 
1869 #endif /* CONFIG_SMP */                          969 #endif /* CONFIG_SMP */
1870                                                  970 
1871 static inline                                    971 static inline
1872 void inc_dl_tasks(struct sched_dl_entity *dl_    972 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1873 {                                                973 {
                                                   >> 974         int prio = dl_task_of(dl_se)->prio;
1874         u64 deadline = dl_se->deadline;          975         u64 deadline = dl_se->deadline;
1875                                                  976 
                                                   >> 977         WARN_ON(!dl_prio(prio));
1876         dl_rq->dl_nr_running++;                  978         dl_rq->dl_nr_running++;
1877         add_nr_running(rq_of_dl_rq(dl_rq), 1)    979         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1878                                                  980 
1879         inc_dl_deadline(dl_rq, deadline);        981         inc_dl_deadline(dl_rq, deadline);
                                                   >> 982         inc_dl_migration(dl_se, dl_rq);
1880 }                                                983 }
1881                                                  984 
1882 static inline                                    985 static inline
1883 void dec_dl_tasks(struct sched_dl_entity *dl_    986 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1884 {                                                987 {
                                                   >> 988         int prio = dl_task_of(dl_se)->prio;
                                                   >> 989 
                                                   >> 990         WARN_ON(!dl_prio(prio));
1885         WARN_ON(!dl_rq->dl_nr_running);          991         WARN_ON(!dl_rq->dl_nr_running);
1886         dl_rq->dl_nr_running--;                  992         dl_rq->dl_nr_running--;
1887         sub_nr_running(rq_of_dl_rq(dl_rq), 1)    993         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1888                                                  994 
1889         dec_dl_deadline(dl_rq, dl_se->deadlin    995         dec_dl_deadline(dl_rq, dl_se->deadline);
1890 }                                             !! 996         dec_dl_migration(dl_se, dl_rq);
1891                                               << 
1892 static inline bool __dl_less(struct rb_node * << 
1893 {                                             << 
1894         return dl_time_before(__node_2_dle(a) << 
1895 }                                             << 
1896                                               << 
1897 static __always_inline struct sched_statistic << 
1898 __schedstats_from_dl_se(struct sched_dl_entit << 
1899 {                                             << 
1900         if (!schedstat_enabled())             << 
1901                 return NULL;                  << 
1902                                               << 
1903         if (dl_server(dl_se))                 << 
1904                 return NULL;                  << 
1905                                               << 
1906         return &dl_task_of(dl_se)->stats;     << 
1907 }                                             << 
1908                                               << 
1909 static inline void                            << 
1910 update_stats_wait_start_dl(struct dl_rq *dl_r << 
1911 {                                             << 
1912         struct sched_statistics *stats = __sc << 
1913         if (stats)                            << 
1914                 __update_stats_wait_start(rq_ << 
1915 }                                             << 
1916                                               << 
1917 static inline void                            << 
1918 update_stats_wait_end_dl(struct dl_rq *dl_rq, << 
1919 {                                             << 
1920         struct sched_statistics *stats = __sc << 
1921         if (stats)                            << 
1922                 __update_stats_wait_end(rq_of << 
1923 }                                             << 
1924                                               << 
1925 static inline void                            << 
1926 update_stats_enqueue_sleeper_dl(struct dl_rq  << 
1927 {                                             << 
1928         struct sched_statistics *stats = __sc << 
1929         if (stats)                            << 
1930                 __update_stats_enqueue_sleepe << 
1931 }                                             << 
1932                                               << 
1933 static inline void                            << 
1934 update_stats_enqueue_dl(struct dl_rq *dl_rq,  << 
1935                         int flags)            << 
1936 {                                             << 
1937         if (!schedstat_enabled())             << 
1938                 return;                       << 
1939                                               << 
1940         if (flags & ENQUEUE_WAKEUP)           << 
1941                 update_stats_enqueue_sleeper_ << 
1942 }                                             << 
1943                                               << 
1944 static inline void                            << 
1945 update_stats_dequeue_dl(struct dl_rq *dl_rq,  << 
1946                         int flags)            << 
1947 {                                             << 
1948         struct task_struct *p = dl_task_of(dl << 
1949                                               << 
1950         if (!schedstat_enabled())             << 
1951                 return;                       << 
1952                                               << 
1953         if ((flags & DEQUEUE_SLEEP)) {        << 
1954                 unsigned int state;           << 
1955                                               << 
1956                 state = READ_ONCE(p->__state) << 
1957                 if (state & TASK_INTERRUPTIBL << 
1958                         __schedstat_set(p->st << 
1959                                         rq_cl << 
1960                                               << 
1961                 if (state & TASK_UNINTERRUPTI << 
1962                         __schedstat_set(p->st << 
1963                                         rq_cl << 
1964         }                                     << 
1965 }                                                997 }
1966                                                  998 
1967 static void __enqueue_dl_entity(struct sched_    999 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1968 {                                                1000 {
1969         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1001         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
                                                   >> 1002         struct rb_node **link = &dl_rq->rb_root.rb_node;
                                                   >> 1003         struct rb_node *parent = NULL;
                                                   >> 1004         struct sched_dl_entity *entry;
                                                   >> 1005         int leftmost = 1;
                                                   >> 1006 
                                                   >> 1007         BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
                                                   >> 1008 
                                                   >> 1009         while (*link) {
                                                   >> 1010                 parent = *link;
                                                   >> 1011                 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
                                                   >> 1012                 if (dl_time_before(dl_se->deadline, entry->deadline))
                                                   >> 1013                         link = &parent->rb_left;
                                                   >> 1014                 else {
                                                   >> 1015                         link = &parent->rb_right;
                                                   >> 1016                         leftmost = 0;
                                                   >> 1017                 }
                                                   >> 1018         }
1970                                                  1019 
1971         WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->r !! 1020         if (leftmost)
                                                   >> 1021                 dl_rq->rb_leftmost = &dl_se->rb_node;
1972                                                  1022 
1973         rb_add_cached(&dl_se->rb_node, &dl_rq !! 1023         rb_link_node(&dl_se->rb_node, parent, link);
                                                   >> 1024         rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
1974                                                  1025 
1975         inc_dl_tasks(dl_se, dl_rq);              1026         inc_dl_tasks(dl_se, dl_rq);
1976 }                                                1027 }
1977                                                  1028 
1978 static void __dequeue_dl_entity(struct sched_    1029 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1979 {                                                1030 {
1980         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1031         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1981                                                  1032 
1982         if (RB_EMPTY_NODE(&dl_se->rb_node))      1033         if (RB_EMPTY_NODE(&dl_se->rb_node))
1983                 return;                          1034                 return;
1984                                                  1035 
1985         rb_erase_cached(&dl_se->rb_node, &dl_ !! 1036         if (dl_rq->rb_leftmost == &dl_se->rb_node) {
                                                   >> 1037                 struct rb_node *next_node;
1986                                                  1038 
                                                   >> 1039                 next_node = rb_next(&dl_se->rb_node);
                                                   >> 1040                 dl_rq->rb_leftmost = next_node;
                                                   >> 1041         }
                                                   >> 1042 
                                                   >> 1043         rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1987         RB_CLEAR_NODE(&dl_se->rb_node);          1044         RB_CLEAR_NODE(&dl_se->rb_node);
1988                                                  1045 
1989         dec_dl_tasks(dl_se, dl_rq);              1046         dec_dl_tasks(dl_se, dl_rq);
1990 }                                                1047 }
1991                                                  1048 
1992 static void                                      1049 static void
1993 enqueue_dl_entity(struct sched_dl_entity *dl_ !! 1050 enqueue_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 1051                   struct sched_dl_entity *pi_se, int flags)
1994 {                                                1052 {
1995         WARN_ON_ONCE(on_dl_rq(dl_se));        !! 1053         BUG_ON(on_dl_rq(dl_se));
1996                                               << 
1997         update_stats_enqueue_dl(dl_rq_of_se(d << 
1998                                               << 
1999         /*                                    << 
2000          * Check if a constrained deadline ta << 
2001          * after the deadline but before the  << 
2002          * If that is the case, the task will << 
2003          * the replenishment timer will be se << 
2004          */                                   << 
2005         if (!dl_se->dl_throttled && !dl_is_im << 
2006                 dl_check_constrained_dl(dl_se << 
2007                                               << 
2008         if (flags & (ENQUEUE_RESTORE|ENQUEUE_ << 
2009                 struct dl_rq *dl_rq = dl_rq_o << 
2010                                               << 
2011                 add_rq_bw(dl_se, dl_rq);      << 
2012                 add_running_bw(dl_se, dl_rq); << 
2013         }                                     << 
2014                                               << 
2015         /*                                    << 
2016          * If p is throttled, we do not enque << 
2017          * its budget it needs a replenishmen << 
2018          * its rq, the bandwidth timer callba << 
2019          * run yet) will take care of this.   << 
2020          * However, the active utilization do << 
2021          * that the task is on the runqueue o << 
2022          * task's state - in GRUB parlance, " << 
2023          * In other words, even if a task is  << 
2024          * be counted in the active utilizati << 
2025          * add_running_bw().                  << 
2026          */                                   << 
2027         if (!dl_se->dl_defer && dl_se->dl_thr << 
2028                 if (flags & ENQUEUE_WAKEUP)   << 
2029                         task_contending(dl_se << 
2030                                               << 
2031                 return;                       << 
2032         }                                     << 
2033                                                  1054 
2034         /*                                       1055         /*
2035          * If this is a wakeup or a new insta    1056          * If this is a wakeup or a new instance, the scheduling
2036          * parameters of the task might need     1057          * parameters of the task might need updating. Otherwise,
2037          * we want a replenishment of its run    1058          * we want a replenishment of its runtime.
2038          */                                      1059          */
2039         if (flags & ENQUEUE_WAKEUP) {         !! 1060         if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
2040                 task_contending(dl_se, flags) !! 1061                 update_dl_entity(dl_se, pi_se);
2041                 update_dl_entity(dl_se);      !! 1062         else if (flags & ENQUEUE_REPLENISH)
2042         } else if (flags & ENQUEUE_REPLENISH) !! 1063                 replenish_dl_entity(dl_se, pi_se);
2043                 replenish_dl_entity(dl_se);   << 
2044         } else if ((flags & ENQUEUE_RESTORE)  << 
2045                    dl_time_before(dl_se->dead << 
2046                 setup_new_dl_entity(dl_se);   << 
2047         }                                     << 
2048                                               << 
2049         /*                                    << 
2050          * If the reservation is still thrott << 
2051          * deferred task and still got to wai << 
2052          */                                   << 
2053         if (dl_se->dl_throttled && start_dl_t << 
2054                 return;                       << 
2055                                               << 
2056         /*                                    << 
2057          * We're about to enqueue, make sure  << 
2058          * In case the timer was not started, << 
2059          * has passed, mark as not throttled  << 
2060          * Also cancel earlier timers, since  << 
2061          */                                   << 
2062         if (dl_se->dl_throttled) {            << 
2063                 hrtimer_try_to_cancel(&dl_se- << 
2064                 dl_se->dl_defer_armed = 0;    << 
2065                 dl_se->dl_throttled = 0;      << 
2066         }                                     << 
2067                                                  1064 
2068         __enqueue_dl_entity(dl_se);              1065         __enqueue_dl_entity(dl_se);
2069 }                                                1066 }
2070                                                  1067 
2071 static void dequeue_dl_entity(struct sched_dl !! 1068 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
2072 {                                                1069 {
2073         __dequeue_dl_entity(dl_se);              1070         __dequeue_dl_entity(dl_se);
2074                                               << 
2075         if (flags & (DEQUEUE_SAVE|DEQUEUE_MIG << 
2076                 struct dl_rq *dl_rq = dl_rq_o << 
2077                                               << 
2078                 sub_running_bw(dl_se, dl_rq); << 
2079                 sub_rq_bw(dl_se, dl_rq);      << 
2080         }                                     << 
2081                                               << 
2082         /*                                    << 
2083          * This check allows to start the ina << 
2084          * decrease the active utilization, i << 
2085          * when the task blocks and when it i << 
2086          * (p->state == TASK_DEAD). We can ha << 
2087          * way, because from GRUB's point of  << 
2088          * (the task moves from "active conte << 
2089          * or "inactive")                     << 
2090          */                                   << 
2091         if (flags & DEQUEUE_SLEEP)            << 
2092                 task_non_contending(dl_se);   << 
2093 }                                                1071 }
2094                                                  1072 
2095 static void enqueue_task_dl(struct rq *rq, st    1073 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2096 {                                                1074 {
2097         if (is_dl_boosted(&p->dl)) {          !! 1075         struct task_struct *pi_task = rt_mutex_get_top_task(p);
2098                 /*                            !! 1076         struct sched_dl_entity *pi_se = &p->dl;
2099                  * Because of delays in the d !! 1077 
2100                  * thread's runtime, it might !! 1078         /*
2101                  * goes to sleep in a rt mute !! 1079          * Use the scheduling parameters of the top pi-waiter
2102                  * a consequence, the thread  !! 1080          * task if we have one and its (absolute) deadline is
2103                  *                            !! 1081          * smaller than our one... OTW we keep our runtime and
2104                  * While waiting for the mute !! 1082          * deadline.
2105                  * boosted via PI, resulting  !! 1083          */
2106                  * and boosted at the same ti !! 1084         if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
2107                  *                            !! 1085                 pi_se = &pi_task->dl;
2108                  * In this case, the boost ov << 
2109                  */                           << 
2110                 if (p->dl.dl_throttled) {     << 
2111                         /*                    << 
2112                          * The replenish time << 
2113                          * problem if it fire << 
2114                          * are ignored in dl_ << 
2115                          *                    << 
2116                          * If the timer callb << 
2117                          * it will eventually << 
2118                          */                   << 
2119                         if (hrtimer_try_to_ca << 
2120                             !dl_server(&p->dl << 
2121                                 put_task_stru << 
2122                         p->dl.dl_throttled =  << 
2123                 }                             << 
2124         } else if (!dl_prio(p->normal_prio))     1086         } else if (!dl_prio(p->normal_prio)) {
2125                 /*                               1087                 /*
2126                  * Special case in which we h !! 1088                  * Special case in which we have a !SCHED_DEADLINE task
2127                  * to be deboosted, but excee !! 1089                  * that is going to be deboosted, but exceedes its
2128                  * replenishing it, as it's g !! 1090                  * runtime while doing so. No point in replenishing
2129                  * scheduling class after thi !! 1091                  * it, as it's going to return back to its original
2130                  * clear the flag, otherwise  !! 1092                  * scheduling class after this.
2131                  * being boosted again with n << 
2132                  * the throttle.              << 
2133                  */                              1093                  */
2134                 p->dl.dl_throttled = 0;       !! 1094                 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
2135                 if (!(flags & ENQUEUE_REPLENI << 
2136                         printk_deferred_once( << 
2137                                               << 
2138                                               << 
2139                 return;                          1095                 return;
2140         }                                        1096         }
2141                                                  1097 
2142         check_schedstat_required();           !! 1098         /*
2143         update_stats_wait_start_dl(dl_rq_of_s !! 1099          * Check if a constrained deadline task was activated
2144                                               !! 1100          * after the deadline but before the next period.
2145         if (p->on_rq == TASK_ON_RQ_MIGRATING) !! 1101          * If that is the case, the task will be throttled and
2146                 flags |= ENQUEUE_MIGRATING;   !! 1102          * the replenishment timer will be set to the next period.
2147                                               !! 1103          */
2148         enqueue_dl_entity(&p->dl, flags);     !! 1104         if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
                                                   >> 1105                 dl_check_constrained_dl(&p->dl);
2149                                                  1106 
2150         if (dl_server(&p->dl))                !! 1107         /*
                                                   >> 1108          * If p is throttled, we do nothing. In fact, if it exhausted
                                                   >> 1109          * its budget it needs a replenishment and, since it now is on
                                                   >> 1110          * its rq, the bandwidth timer callback (which clearly has not
                                                   >> 1111          * run yet) will take care of this.
                                                   >> 1112          */
                                                   >> 1113         if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
2151                 return;                          1114                 return;
2152                                                  1115 
2153         if (!task_current(rq, p) && !p->dl.dl !! 1116         enqueue_dl_entity(&p->dl, pi_se, flags);
                                                   >> 1117 
                                                   >> 1118         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
2154                 enqueue_pushable_dl_task(rq,     1119                 enqueue_pushable_dl_task(rq, p);
2155 }                                                1120 }
2156                                                  1121 
2157 static bool dequeue_task_dl(struct rq *rq, st !! 1122 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2158 {                                                1123 {
2159         update_curr_dl(rq);                   !! 1124         dequeue_dl_entity(&p->dl);
2160                                               !! 1125         dequeue_pushable_dl_task(rq, p);
2161         if (p->on_rq == TASK_ON_RQ_MIGRATING) !! 1126 }
2162                 flags |= DEQUEUE_MIGRATING;   << 
2163                                               << 
2164         dequeue_dl_entity(&p->dl, flags);     << 
2165         if (!p->dl.dl_throttled && !dl_server << 
2166                 dequeue_pushable_dl_task(rq,  << 
2167                                                  1127 
2168         return true;                          !! 1128 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
                                                   >> 1129 {
                                                   >> 1130         update_curr_dl(rq);
                                                   >> 1131         __dequeue_task_dl(rq, p, flags);
2169 }                                                1132 }
2170                                                  1133 
2171 /*                                               1134 /*
2172  * Yield task semantic for -deadline tasks is    1135  * Yield task semantic for -deadline tasks is:
2173  *                                               1136  *
2174  *   get off from the CPU until our next inst    1137  *   get off from the CPU until our next instance, with
2175  *   a new runtime. This is of little use now    1138  *   a new runtime. This is of little use now, since we
2176  *   don't have a bandwidth reclaiming mechan    1139  *   don't have a bandwidth reclaiming mechanism. Anyway,
2177  *   bandwidth reclaiming is planned for the     1140  *   bandwidth reclaiming is planned for the future, and
2178  *   yield_task_dl will indicate that some sp    1141  *   yield_task_dl will indicate that some spare budget
2179  *   is available for other task instances to    1142  *   is available for other task instances to use it.
2180  */                                              1143  */
2181 static void yield_task_dl(struct rq *rq)         1144 static void yield_task_dl(struct rq *rq)
2182 {                                                1145 {
                                                   >> 1146         struct task_struct *p = rq->curr;
                                                   >> 1147 
2183         /*                                       1148         /*
2184          * We make the task go to sleep until    1149          * We make the task go to sleep until its current deadline by
2185          * forcing its runtime to zero. This     1150          * forcing its runtime to zero. This way, update_curr_dl() stops
2186          * it and the bandwidth timer will wa    1151          * it and the bandwidth timer will wake it up and will give it
2187          * new scheduling parameters (thanks     1152          * new scheduling parameters (thanks to dl_yielded=1).
2188          */                                      1153          */
2189         rq->curr->dl.dl_yielded = 1;          !! 1154         if (p->dl.runtime > 0) {
2190                                               !! 1155                 rq->curr->dl.dl_yielded = 1;
                                                   >> 1156                 p->dl.runtime = 0;
                                                   >> 1157         }
2191         update_rq_clock(rq);                     1158         update_rq_clock(rq);
2192         update_curr_dl(rq);                      1159         update_curr_dl(rq);
2193         /*                                       1160         /*
2194          * Tell update_rq_clock() that we've     1161          * Tell update_rq_clock() that we've just updated,
2195          * so we don't do microscopic update     1162          * so we don't do microscopic update in schedule()
2196          * and double the fastpath cost.         1163          * and double the fastpath cost.
2197          */                                      1164          */
2198         rq_clock_skip_update(rq);             !! 1165         rq_clock_skip_update(rq, true);
2199 }                                                1166 }
2200                                                  1167 
2201 #ifdef CONFIG_SMP                                1168 #ifdef CONFIG_SMP
2202                                                  1169 
2203 static inline bool dl_task_is_earliest_deadli << 
2204                                               << 
2205 {                                             << 
2206         return (!rq->dl.dl_nr_running ||      << 
2207                 dl_time_before(p->dl.deadline << 
2208                                rq->dl.earlies << 
2209 }                                             << 
2210                                               << 
2211 static int find_later_rq(struct task_struct *    1170 static int find_later_rq(struct task_struct *task);
2212                                                  1171 
2213 static int                                       1172 static int
2214 select_task_rq_dl(struct task_struct *p, int  !! 1173 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
2215 {                                                1174 {
2216         struct task_struct *curr;                1175         struct task_struct *curr;
2217         bool select_rq;                       << 
2218         struct rq *rq;                           1176         struct rq *rq;
2219                                                  1177 
2220         if (!(flags & WF_TTWU))               !! 1178         if (sd_flag != SD_BALANCE_WAKE)
2221                 goto out;                        1179                 goto out;
2222                                                  1180 
2223         rq = cpu_rq(cpu);                        1181         rq = cpu_rq(cpu);
2224                                                  1182 
2225         rcu_read_lock();                         1183         rcu_read_lock();
2226         curr = READ_ONCE(rq->curr); /* unlock    1184         curr = READ_ONCE(rq->curr); /* unlocked access */
2227                                                  1185 
2228         /*                                       1186         /*
2229          * If we are dealing with a -deadline    1187          * If we are dealing with a -deadline task, we must
2230          * decide where to wake it up.           1188          * decide where to wake it up.
2231          * If it has a later deadline and the    1189          * If it has a later deadline and the current task
2232          * on this rq can't move (provided th    1190          * on this rq can't move (provided the waking task
2233          * can!) we prefer to send it somewhe    1191          * can!) we prefer to send it somewhere else. On the
2234          * other hand, if it has a shorter de    1192          * other hand, if it has a shorter deadline, we
2235          * try to make it stay here, it might    1193          * try to make it stay here, it might be important.
2236          */                                      1194          */
2237         select_rq = unlikely(dl_task(curr)) & !! 1195         if (unlikely(dl_task(curr)) &&
2238                     (curr->nr_cpus_allowed <  !! 1196             (curr->nr_cpus_allowed < 2 ||
2239                      !dl_entity_preempt(&p->d !! 1197              !dl_entity_preempt(&p->dl, &curr->dl)) &&
2240                     p->nr_cpus_allowed > 1;   !! 1198             (p->nr_cpus_allowed > 1)) {
2241                                               << 
2242         /*                                    << 
2243          * Take the capacity of the CPU into  << 
2244          * ensure it fits the requirement of  << 
2245          */                                   << 
2246         if (sched_asym_cpucap_active())       << 
2247                 select_rq |= !dl_task_fits_ca << 
2248                                               << 
2249         if (select_rq) {                      << 
2250                 int target = find_later_rq(p)    1199                 int target = find_later_rq(p);
2251                                                  1200 
2252                 if (target != -1 &&              1201                 if (target != -1 &&
2253                     dl_task_is_earliest_deadl !! 1202                                 (dl_time_before(p->dl.deadline,
                                                   >> 1203                                         cpu_rq(target)->dl.earliest_dl.curr) ||
                                                   >> 1204                                 (cpu_rq(target)->dl.dl_nr_running == 0)))
2254                         cpu = target;            1205                         cpu = target;
2255         }                                        1206         }
2256         rcu_read_unlock();                       1207         rcu_read_unlock();
2257                                                  1208 
2258 out:                                             1209 out:
2259         return cpu;                              1210         return cpu;
2260 }                                                1211 }
2261                                                  1212 
2262 static void migrate_task_rq_dl(struct task_st << 
2263 {                                             << 
2264         struct rq_flags rf;                   << 
2265         struct rq *rq;                        << 
2266                                               << 
2267         if (READ_ONCE(p->__state) != TASK_WAK << 
2268                 return;                       << 
2269                                               << 
2270         rq = task_rq(p);                      << 
2271         /*                                    << 
2272          * Since p->state == TASK_WAKING, set << 
2273          * from try_to_wake_up(). Hence, p->p << 
2274          * rq->lock is not... So, lock it     << 
2275          */                                   << 
2276         rq_lock(rq, &rf);                     << 
2277         if (p->dl.dl_non_contending) {        << 
2278                 update_rq_clock(rq);          << 
2279                 sub_running_bw(&p->dl, &rq->d << 
2280                 p->dl.dl_non_contending = 0;  << 
2281                 /*                            << 
2282                  * If the timer handler is cu << 
2283                  * timer cannot be canceled,  << 
2284                  * will see that dl_not_conte << 
2285                  * will not touch the rq's ac << 
2286                  * so we are still safe.      << 
2287                  */                           << 
2288                 if (hrtimer_try_to_cancel(&p- << 
2289                         put_task_struct(p);   << 
2290         }                                     << 
2291         sub_rq_bw(&p->dl, &rq->dl);           << 
2292         rq_unlock(rq, &rf);                   << 
2293 }                                             << 
2294                                               << 
2295 static void check_preempt_equal_dl(struct rq     1213 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2296 {                                                1214 {
2297         /*                                       1215         /*
2298          * Current can't be migrated, useless    1216          * Current can't be migrated, useless to reschedule,
2299          * let's hope p can move out.            1217          * let's hope p can move out.
2300          */                                      1218          */
2301         if (rq->curr->nr_cpus_allowed == 1 ||    1219         if (rq->curr->nr_cpus_allowed == 1 ||
2302             !cpudl_find(&rq->rd->cpudl, rq->c !! 1220             cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
2303                 return;                          1221                 return;
2304                                                  1222 
2305         /*                                       1223         /*
2306          * p is migratable, so let's not sche    1224          * p is migratable, so let's not schedule it and
2307          * see if it is pushed or pulled some    1225          * see if it is pushed or pulled somewhere else.
2308          */                                      1226          */
2309         if (p->nr_cpus_allowed != 1 &&           1227         if (p->nr_cpus_allowed != 1 &&
2310             cpudl_find(&rq->rd->cpudl, p, NUL !! 1228             cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
2311                 return;                          1229                 return;
2312                                                  1230 
2313         resched_curr(rq);                        1231         resched_curr(rq);
2314 }                                                1232 }
2315                                                  1233 
2316 static int balance_dl(struct rq *rq, struct t << 
2317 {                                             << 
2318         if (!on_dl_rq(&p->dl) && need_pull_dl << 
2319                 /*                            << 
2320                  * This is OK, because curren << 
2321                  * picked for load-balance an << 
2322                  * disabled avoiding further  << 
2323                  * not yet started the pickin << 
2324                  */                           << 
2325                 rq_unpin_lock(rq, rf);        << 
2326                 pull_dl_task(rq);             << 
2327                 rq_repin_lock(rq, rf);        << 
2328         }                                     << 
2329                                               << 
2330         return sched_stop_runnable(rq) || sch << 
2331 }                                             << 
2332 #endif /* CONFIG_SMP */                          1234 #endif /* CONFIG_SMP */
2333                                                  1235 
2334 /*                                               1236 /*
2335  * Only called when both the current and waki    1237  * Only called when both the current and waking task are -deadline
2336  * tasks.                                        1238  * tasks.
2337  */                                              1239  */
2338 static void wakeup_preempt_dl(struct rq *rq,  !! 1240 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
2339                                   int flags)     1241                                   int flags)
2340 {                                                1242 {
2341         if (dl_entity_preempt(&p->dl, &rq->cu    1243         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2342                 resched_curr(rq);                1244                 resched_curr(rq);
2343                 return;                          1245                 return;
2344         }                                        1246         }
2345                                                  1247 
2346 #ifdef CONFIG_SMP                                1248 #ifdef CONFIG_SMP
2347         /*                                       1249         /*
2348          * In the unlikely case current and p    1250          * In the unlikely case current and p have the same deadline
2349          * let us try to decide what's the be    1251          * let us try to decide what's the best thing to do...
2350          */                                      1252          */
2351         if ((p->dl.deadline == rq->curr->dl.d    1253         if ((p->dl.deadline == rq->curr->dl.deadline) &&
2352             !test_tsk_need_resched(rq->curr))    1254             !test_tsk_need_resched(rq->curr))
2353                 check_preempt_equal_dl(rq, p)    1255                 check_preempt_equal_dl(rq, p);
2354 #endif /* CONFIG_SMP */                          1256 #endif /* CONFIG_SMP */
2355 }                                                1257 }
2356                                                  1258 
2357 #ifdef CONFIG_SCHED_HRTICK                       1259 #ifdef CONFIG_SCHED_HRTICK
2358 static void start_hrtick_dl(struct rq *rq, st !! 1260 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2359 {                                                1261 {
2360         hrtick_start(rq, dl_se->runtime);     !! 1262         hrtick_start(rq, p->dl.runtime);
2361 }                                                1263 }
2362 #else /* !CONFIG_SCHED_HRTICK */                 1264 #else /* !CONFIG_SCHED_HRTICK */
2363 static void start_hrtick_dl(struct rq *rq, st !! 1265 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2364 {                                                1266 {
2365 }                                                1267 }
2366 #endif                                           1268 #endif
2367                                                  1269 
2368 static void set_next_task_dl(struct rq *rq, s !! 1270 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
                                                   >> 1271                                                    struct dl_rq *dl_rq)
2369 {                                                1272 {
2370         struct sched_dl_entity *dl_se = &p->d !! 1273         struct rb_node *left = dl_rq->rb_leftmost;
2371         struct dl_rq *dl_rq = &rq->dl;        << 
2372                                               << 
2373         p->se.exec_start = rq_clock_task(rq); << 
2374         if (on_dl_rq(&p->dl))                 << 
2375                 update_stats_wait_end_dl(dl_r << 
2376                                               << 
2377         /* You can't push away the running ta << 
2378         dequeue_pushable_dl_task(rq, p);      << 
2379                                               << 
2380         if (!first)                           << 
2381                 return;                       << 
2382                                               << 
2383         if (rq->curr->sched_class != &dl_sche << 
2384                 update_dl_rq_load_avg(rq_cloc << 
2385                                               << 
2386         deadline_queue_push_tasks(rq);        << 
2387                                               << 
2388         if (hrtick_enabled_dl(rq))            << 
2389                 start_hrtick_dl(rq, &p->dl);  << 
2390 }                                             << 
2391                                               << 
2392 static struct sched_dl_entity *pick_next_dl_e << 
2393 {                                             << 
2394         struct rb_node *left = rb_first_cache << 
2395                                                  1274 
2396         if (!left)                               1275         if (!left)
2397                 return NULL;                     1276                 return NULL;
2398                                                  1277 
2399         return __node_2_dle(left);            !! 1278         return rb_entry(left, struct sched_dl_entity, rb_node);
2400 }                                                1279 }
2401                                                  1280 
2402 /*                                            !! 1281 struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
2403  * __pick_next_task_dl - Helper to pick the n << 
2404  * @rq: The runqueue to pick the next task fr << 
2405  */                                           << 
2406 static struct task_struct *__pick_task_dl(str << 
2407 {                                                1282 {
2408         struct sched_dl_entity *dl_se;           1283         struct sched_dl_entity *dl_se;
2409         struct dl_rq *dl_rq = &rq->dl;        << 
2410         struct task_struct *p;                   1284         struct task_struct *p;
                                                   >> 1285         struct dl_rq *dl_rq;
2411                                                  1286 
2412 again:                                        !! 1287         dl_rq = &rq->dl;
2413         if (!sched_dl_runnable(rq))           !! 1288 
                                                   >> 1289         if (need_pull_dl_task(rq, prev)) {
                                                   >> 1290                 /*
                                                   >> 1291                  * This is OK, because current is on_cpu, which avoids it being
                                                   >> 1292                  * picked for load-balance and preemption/IRQs are still
                                                   >> 1293                  * disabled avoiding further scheduler activity on it and we're
                                                   >> 1294                  * being very careful to re-start the picking loop.
                                                   >> 1295                  */
                                                   >> 1296                 lockdep_unpin_lock(&rq->lock);
                                                   >> 1297                 pull_dl_task(rq);
                                                   >> 1298                 lockdep_pin_lock(&rq->lock);
                                                   >> 1299                 /*
                                                   >> 1300                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
                                                   >> 1301                  * means a stop task can slip in, in which case we need to
                                                   >> 1302                  * re-start task selection.
                                                   >> 1303                  */
                                                   >> 1304                 if (rq->stop && task_on_rq_queued(rq->stop))
                                                   >> 1305                         return RETRY_TASK;
                                                   >> 1306         }
                                                   >> 1307 
                                                   >> 1308         /*
                                                   >> 1309          * When prev is DL, we may throttle it in put_prev_task().
                                                   >> 1310          * So, we update time before we check for dl_nr_running.
                                                   >> 1311          */
                                                   >> 1312         if (prev->sched_class == &dl_sched_class)
                                                   >> 1313                 update_curr_dl(rq);
                                                   >> 1314 
                                                   >> 1315         if (unlikely(!dl_rq->dl_nr_running))
2414                 return NULL;                     1316                 return NULL;
2415                                                  1317 
2416         dl_se = pick_next_dl_entity(dl_rq);   !! 1318         put_prev_task(rq, prev);
2417         WARN_ON_ONCE(!dl_se);                 << 
2418                                                  1319 
2419         if (dl_server(dl_se)) {               !! 1320         dl_se = pick_next_dl_entity(rq, dl_rq);
2420                 p = dl_se->server_pick_task(d !! 1321         BUG_ON(!dl_se);
2421                 if (!p) {                     << 
2422                         dl_se->dl_yielded = 1 << 
2423                         update_curr_dl_se(rq, << 
2424                         goto again;           << 
2425                 }                             << 
2426                 rq->dl_server = dl_se;        << 
2427         } else {                              << 
2428                 p = dl_task_of(dl_se);        << 
2429         }                                     << 
2430                                                  1322 
2431         return p;                             !! 1323         p = dl_task_of(dl_se);
2432 }                                             !! 1324         p->se.exec_start = rq_clock_task(rq);
2433                                                  1325 
2434 static struct task_struct *pick_task_dl(struc !! 1326         /* Running task will never be pushed. */
2435 {                                             !! 1327        dequeue_pushable_dl_task(rq, p);
2436         return __pick_task_dl(rq);            << 
2437 }                                             << 
2438                                                  1328 
2439 static void put_prev_task_dl(struct rq *rq, s !! 1329         if (hrtick_enabled(rq))
2440 {                                             !! 1330                 start_hrtick_dl(rq, p);
2441         struct sched_dl_entity *dl_se = &p->d << 
2442         struct dl_rq *dl_rq = &rq->dl;        << 
2443                                                  1331 
2444         if (on_dl_rq(&p->dl))                 !! 1332         queue_push_tasks(rq);
2445                 update_stats_wait_start_dl(dl << 
2446                                                  1333 
                                                   >> 1334         return p;
                                                   >> 1335 }
                                                   >> 1336 
                                                   >> 1337 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
                                                   >> 1338 {
2447         update_curr_dl(rq);                      1339         update_curr_dl(rq);
2448                                                  1340 
2449         update_dl_rq_load_avg(rq_clock_pelt(r << 
2450         if (on_dl_rq(&p->dl) && p->nr_cpus_al    1341         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2451                 enqueue_pushable_dl_task(rq,     1342                 enqueue_pushable_dl_task(rq, p);
2452 }                                                1343 }
2453                                                  1344 
2454 /*                                            << 
2455  * scheduler tick hitting a task of our sched << 
2456  *                                            << 
2457  * NOTE: This function can be called remotely << 
2458  * goes along full dynticks. Therefore no loc << 
2459  * and everything must be accessed through th << 
2460  * parameters.                                << 
2461  */                                           << 
2462 static void task_tick_dl(struct rq *rq, struc    1345 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2463 {                                                1346 {
2464         update_curr_dl(rq);                      1347         update_curr_dl(rq);
2465                                                  1348 
2466         update_dl_rq_load_avg(rq_clock_pelt(r << 
2467         /*                                       1349         /*
2468          * Even when we have runtime, update_    1350          * Even when we have runtime, update_curr_dl() might have resulted in us
2469          * not being the leftmost task anymor    1351          * not being the leftmost task anymore. In that case NEED_RESCHED will
2470          * be set and schedule() will start a    1352          * be set and schedule() will start a new hrtick for the next task.
2471          */                                      1353          */
2472         if (hrtick_enabled_dl(rq) && queued & !! 1354         if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
2473             is_leftmost(&p->dl, &rq->dl))     !! 1355             is_leftmost(p, &rq->dl))
2474                 start_hrtick_dl(rq, &p->dl);  !! 1356                 start_hrtick_dl(rq, p);
2475 }                                                1357 }
2476                                                  1358 
2477 static void task_fork_dl(struct task_struct *    1359 static void task_fork_dl(struct task_struct *p)
2478 {                                                1360 {
2479         /*                                       1361         /*
2480          * SCHED_DEADLINE tasks cannot fork a    1362          * SCHED_DEADLINE tasks cannot fork and this is achieved through
2481          * sched_fork()                          1363          * sched_fork()
2482          */                                      1364          */
2483 }                                                1365 }
2484                                                  1366 
                                                   >> 1367 static void task_dead_dl(struct task_struct *p)
                                                   >> 1368 {
                                                   >> 1369         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
                                                   >> 1370 
                                                   >> 1371         /*
                                                   >> 1372          * Since we are TASK_DEAD we won't slip out of the domain!
                                                   >> 1373          */
                                                   >> 1374         raw_spin_lock_irq(&dl_b->lock);
                                                   >> 1375         /* XXX we should retain the bw until 0-lag */
                                                   >> 1376         dl_b->total_bw -= p->dl.dl_bw;
                                                   >> 1377         raw_spin_unlock_irq(&dl_b->lock);
                                                   >> 1378 }
                                                   >> 1379 
                                                   >> 1380 static void set_curr_task_dl(struct rq *rq)
                                                   >> 1381 {
                                                   >> 1382         struct task_struct *p = rq->curr;
                                                   >> 1383 
                                                   >> 1384         p->se.exec_start = rq_clock_task(rq);
                                                   >> 1385 
                                                   >> 1386         /* You can't push away the running task */
                                                   >> 1387         dequeue_pushable_dl_task(rq, p);
                                                   >> 1388 }
                                                   >> 1389 
2485 #ifdef CONFIG_SMP                                1390 #ifdef CONFIG_SMP
2486                                                  1391 
2487 /* Only try algorithms three times */            1392 /* Only try algorithms three times */
2488 #define DL_MAX_TRIES 3                           1393 #define DL_MAX_TRIES 3
2489                                                  1394 
2490 static int pick_dl_task(struct rq *rq, struct    1395 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2491 {                                                1396 {
2492         if (!task_on_cpu(rq, p) &&            !! 1397         if (!task_running(rq, p) &&
2493             cpumask_test_cpu(cpu, &p->cpus_ma !! 1398             cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
2494                 return 1;                        1399                 return 1;
2495         return 0;                                1400         return 0;
2496 }                                                1401 }
2497                                                  1402 
                                                   >> 1403 /* Returns the second earliest -deadline task, NULL otherwise */
                                                   >> 1404 static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
                                                   >> 1405 {
                                                   >> 1406         struct rb_node *next_node = rq->dl.rb_leftmost;
                                                   >> 1407         struct sched_dl_entity *dl_se;
                                                   >> 1408         struct task_struct *p = NULL;
                                                   >> 1409 
                                                   >> 1410 next_node:
                                                   >> 1411         next_node = rb_next(next_node);
                                                   >> 1412         if (next_node) {
                                                   >> 1413                 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
                                                   >> 1414                 p = dl_task_of(dl_se);
                                                   >> 1415 
                                                   >> 1416                 if (pick_dl_task(rq, p, cpu))
                                                   >> 1417                         return p;
                                                   >> 1418 
                                                   >> 1419                 goto next_node;
                                                   >> 1420         }
                                                   >> 1421 
                                                   >> 1422         return NULL;
                                                   >> 1423 }
                                                   >> 1424 
2498 /*                                               1425 /*
2499  * Return the earliest pushable rq's task, wh    1426  * Return the earliest pushable rq's task, which is suitable to be executed
2500  * on the CPU, NULL otherwise:                   1427  * on the CPU, NULL otherwise:
2501  */                                              1428  */
2502 static struct task_struct *pick_earliest_push    1429 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2503 {                                                1430 {
                                                   >> 1431         struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
2504         struct task_struct *p = NULL;            1432         struct task_struct *p = NULL;
2505         struct rb_node *next_node;            << 
2506                                                  1433 
2507         if (!has_pushable_dl_tasks(rq))          1434         if (!has_pushable_dl_tasks(rq))
2508                 return NULL;                     1435                 return NULL;
2509                                                  1436 
2510         next_node = rb_first_cached(&rq->dl.p << 
2511                                               << 
2512 next_node:                                       1437 next_node:
2513         if (next_node) {                         1438         if (next_node) {
2514                 p = __node_2_pdl(next_node);  !! 1439                 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
2515                                                  1440 
2516                 if (pick_dl_task(rq, p, cpu))    1441                 if (pick_dl_task(rq, p, cpu))
2517                         return p;                1442                         return p;
2518                                                  1443 
2519                 next_node = rb_next(next_node    1444                 next_node = rb_next(next_node);
2520                 goto next_node;                  1445                 goto next_node;
2521         }                                        1446         }
2522                                                  1447 
2523         return NULL;                             1448         return NULL;
2524 }                                                1449 }
2525                                                  1450 
2526 static DEFINE_PER_CPU(cpumask_var_t, local_cp    1451 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2527                                                  1452 
2528 static int find_later_rq(struct task_struct *    1453 static int find_later_rq(struct task_struct *task)
2529 {                                                1454 {
2530         struct sched_domain *sd;                 1455         struct sched_domain *sd;
2531         struct cpumask *later_mask = this_cpu    1456         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2532         int this_cpu = smp_processor_id();       1457         int this_cpu = smp_processor_id();
2533         int cpu = task_cpu(task);             !! 1458         int best_cpu, cpu = task_cpu(task);
2534                                                  1459 
2535         /* Make sure the mask is initialized     1460         /* Make sure the mask is initialized first */
2536         if (unlikely(!later_mask))               1461         if (unlikely(!later_mask))
2537                 return -1;                       1462                 return -1;
2538                                                  1463 
2539         if (task->nr_cpus_allowed == 1)          1464         if (task->nr_cpus_allowed == 1)
2540                 return -1;                       1465                 return -1;
2541                                                  1466 
2542         /*                                       1467         /*
2543          * We have to consider system topolog    1468          * We have to consider system topology and task affinity
2544          * first, then we can look for a suit !! 1469          * first, then we can look for a suitable cpu.
2545          */                                      1470          */
2546         if (!cpudl_find(&task_rq(task)->rd->c !! 1471         best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
                                                   >> 1472                         task, later_mask);
                                                   >> 1473         if (best_cpu == -1)
2547                 return -1;                       1474                 return -1;
2548                                                  1475 
2549         /*                                       1476         /*
2550          * If we are here, some targets have  !! 1477          * If we are here, some target has been found,
2551          * the most suitable which is, among  !! 1478          * the most suitable of which is cached in best_cpu.
2552          * current tasks have later deadlines !! 1479          * This is, among the runqueues where the current tasks
2553          * rq with the latest possible one.   !! 1480          * have later deadlines than the task's one, the rq
                                                   >> 1481          * with the latest possible one.
2554          *                                       1482          *
2555          * Now we check how well this matches    1483          * Now we check how well this matches with task's
2556          * affinity and system topology.         1484          * affinity and system topology.
2557          *                                       1485          *
2558          * The last CPU where the task run is !! 1486          * The last cpu where the task run is our first
2559          * guess, since it is most likely cac    1487          * guess, since it is most likely cache-hot there.
2560          */                                      1488          */
2561         if (cpumask_test_cpu(cpu, later_mask)    1489         if (cpumask_test_cpu(cpu, later_mask))
2562                 return cpu;                      1490                 return cpu;
2563         /*                                       1491         /*
2564          * Check if this_cpu is to be skipped    1492          * Check if this_cpu is to be skipped (i.e., it is
2565          * not in the mask) or not.              1493          * not in the mask) or not.
2566          */                                      1494          */
2567         if (!cpumask_test_cpu(this_cpu, later    1495         if (!cpumask_test_cpu(this_cpu, later_mask))
2568                 this_cpu = -1;                   1496                 this_cpu = -1;
2569                                                  1497 
2570         rcu_read_lock();                         1498         rcu_read_lock();
2571         for_each_domain(cpu, sd) {               1499         for_each_domain(cpu, sd) {
2572                 if (sd->flags & SD_WAKE_AFFIN    1500                 if (sd->flags & SD_WAKE_AFFINE) {
2573                         int best_cpu;         << 
2574                                                  1501 
2575                         /*                       1502                         /*
2576                          * If possible, preem    1503                          * If possible, preempting this_cpu is
2577                          * cheaper than migra    1504                          * cheaper than migrating.
2578                          */                      1505                          */
2579                         if (this_cpu != -1 &&    1506                         if (this_cpu != -1 &&
2580                             cpumask_test_cpu(    1507                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2581                                 rcu_read_unlo    1508                                 rcu_read_unlock();
2582                                 return this_c    1509                                 return this_cpu;
2583                         }                        1510                         }
2584                                                  1511 
2585                         best_cpu = cpumask_an << 
2586                                               << 
2587                         /*                       1512                         /*
2588                          * Last chance: if a  !! 1513                          * Last chance: if best_cpu is valid and is
2589                          * and current sd spa !! 1514                          * in the mask, that becomes our choice.
2590                          * choice. Of course, << 
2591                          * already under cons << 
2592                          */                      1515                          */
2593                         if (best_cpu < nr_cpu !! 1516                         if (best_cpu < nr_cpu_ids &&
                                                   >> 1517                             cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
2594                                 rcu_read_unlo    1518                                 rcu_read_unlock();
2595                                 return best_c    1519                                 return best_cpu;
2596                         }                        1520                         }
2597                 }                                1521                 }
2598         }                                        1522         }
2599         rcu_read_unlock();                       1523         rcu_read_unlock();
2600                                                  1524 
2601         /*                                       1525         /*
2602          * At this point, all our guesses fai    1526          * At this point, all our guesses failed, we just return
2603          * 'something', and let the caller so    1527          * 'something', and let the caller sort the things out.
2604          */                                      1528          */
2605         if (this_cpu != -1)                      1529         if (this_cpu != -1)
2606                 return this_cpu;                 1530                 return this_cpu;
2607                                                  1531 
2608         cpu = cpumask_any_distribute(later_ma !! 1532         cpu = cpumask_any(later_mask);
2609         if (cpu < nr_cpu_ids)                    1533         if (cpu < nr_cpu_ids)
2610                 return cpu;                      1534                 return cpu;
2611                                                  1535 
2612         return -1;                               1536         return -1;
2613 }                                                1537 }
2614                                                  1538 
2615 /* Locks the rq it finds */                      1539 /* Locks the rq it finds */
2616 static struct rq *find_lock_later_rq(struct t    1540 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2617 {                                                1541 {
2618         struct rq *later_rq = NULL;              1542         struct rq *later_rq = NULL;
2619         int tries;                               1543         int tries;
2620         int cpu;                                 1544         int cpu;
2621                                                  1545 
2622         for (tries = 0; tries < DL_MAX_TRIES;    1546         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2623                 cpu = find_later_rq(task);       1547                 cpu = find_later_rq(task);
2624                                                  1548 
2625                 if ((cpu == -1) || (cpu == rq    1549                 if ((cpu == -1) || (cpu == rq->cpu))
2626                         break;                   1550                         break;
2627                                                  1551 
2628                 later_rq = cpu_rq(cpu);          1552                 later_rq = cpu_rq(cpu);
2629                                                  1553 
2630                 if (!dl_task_is_earliest_dead !! 1554                 if (later_rq->dl.dl_nr_running &&
                                                   >> 1555                     !dl_time_before(task->dl.deadline,
                                                   >> 1556                                         later_rq->dl.earliest_dl.curr)) {
2631                         /*                       1557                         /*
2632                          * Target rq has task    1558                          * Target rq has tasks of equal or earlier deadline,
2633                          * retrying does not     1559                          * retrying does not release any lock and is unlikely
2634                          * to yield a differe    1560                          * to yield a different result.
2635                          */                      1561                          */
2636                         later_rq = NULL;         1562                         later_rq = NULL;
2637                         break;                   1563                         break;
2638                 }                                1564                 }
2639                                                  1565 
2640                 /* Retry if something changed    1566                 /* Retry if something changed. */
2641                 if (double_lock_balance(rq, l    1567                 if (double_lock_balance(rq, later_rq)) {
2642                         if (unlikely(task_rq(    1568                         if (unlikely(task_rq(task) != rq ||
2643                                      !cpumask !! 1569                                      !cpumask_test_cpu(later_rq->cpu,
2644                                      task_on_ !! 1570                                                        &task->cpus_allowed) ||
2645                                      !dl_task !! 1571                                      task_running(rq, task) ||
2646                                      is_migra << 
2647                                      !task_on    1572                                      !task_on_rq_queued(task))) {
2648                                 double_unlock    1573                                 double_unlock_balance(rq, later_rq);
2649                                 later_rq = NU    1574                                 later_rq = NULL;
2650                                 break;           1575                                 break;
2651                         }                        1576                         }
2652                 }                                1577                 }
2653                                                  1578 
2654                 /*                               1579                 /*
2655                  * If the rq we found has no     1580                  * If the rq we found has no -deadline task, or
2656                  * its earliest one has a lat    1581                  * its earliest one has a later deadline than our
2657                  * task, the rq is a good one    1582                  * task, the rq is a good one.
2658                  */                              1583                  */
2659                 if (dl_task_is_earliest_deadl !! 1584                 if (!later_rq->dl.dl_nr_running ||
                                                   >> 1585                     dl_time_before(task->dl.deadline,
                                                   >> 1586                                    later_rq->dl.earliest_dl.curr))
2660                         break;                   1587                         break;
2661                                                  1588 
2662                 /* Otherwise we try again. */    1589                 /* Otherwise we try again. */
2663                 double_unlock_balance(rq, lat    1590                 double_unlock_balance(rq, later_rq);
2664                 later_rq = NULL;                 1591                 later_rq = NULL;
2665         }                                        1592         }
2666                                                  1593 
2667         return later_rq;                         1594         return later_rq;
2668 }                                                1595 }
2669                                                  1596 
2670 static struct task_struct *pick_next_pushable    1597 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2671 {                                                1598 {
2672         struct task_struct *p;                   1599         struct task_struct *p;
2673                                                  1600 
2674         if (!has_pushable_dl_tasks(rq))          1601         if (!has_pushable_dl_tasks(rq))
2675                 return NULL;                     1602                 return NULL;
2676                                                  1603 
2677         p = __node_2_pdl(rb_first_cached(&rq- !! 1604         p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
                                                   >> 1605                      struct task_struct, pushable_dl_tasks);
2678                                                  1606 
2679         WARN_ON_ONCE(rq->cpu != task_cpu(p)); !! 1607         BUG_ON(rq->cpu != task_cpu(p));
2680         WARN_ON_ONCE(task_current(rq, p));    !! 1608         BUG_ON(task_current(rq, p));
2681         WARN_ON_ONCE(p->nr_cpus_allowed <= 1) !! 1609         BUG_ON(p->nr_cpus_allowed <= 1);
2682                                                  1610 
2683         WARN_ON_ONCE(!task_on_rq_queued(p));  !! 1611         BUG_ON(!task_on_rq_queued(p));
2684         WARN_ON_ONCE(!dl_task(p));            !! 1612         BUG_ON(!dl_task(p));
2685                                                  1613 
2686         return p;                                1614         return p;
2687 }                                                1615 }
2688                                                  1616 
2689 /*                                               1617 /*
2690  * See if the non running -deadline tasks on     1618  * See if the non running -deadline tasks on this rq
2691  * can be sent to some other CPU where they c    1619  * can be sent to some other CPU where they can preempt
2692  * and start executing.                          1620  * and start executing.
2693  */                                              1621  */
2694 static int push_dl_task(struct rq *rq)           1622 static int push_dl_task(struct rq *rq)
2695 {                                                1623 {
2696         struct task_struct *next_task;           1624         struct task_struct *next_task;
2697         struct rq *later_rq;                     1625         struct rq *later_rq;
2698         int ret = 0;                             1626         int ret = 0;
2699                                                  1627 
                                                   >> 1628         if (!rq->dl.overloaded)
                                                   >> 1629                 return 0;
                                                   >> 1630 
2700         next_task = pick_next_pushable_dl_tas    1631         next_task = pick_next_pushable_dl_task(rq);
2701         if (!next_task)                          1632         if (!next_task)
2702                 return 0;                        1633                 return 0;
2703                                                  1634 
2704 retry:                                           1635 retry:
                                                   >> 1636         if (unlikely(next_task == rq->curr)) {
                                                   >> 1637                 WARN_ON(1);
                                                   >> 1638                 return 0;
                                                   >> 1639         }
                                                   >> 1640 
2705         /*                                       1641         /*
2706          * If next_task preempts rq->curr, an    1642          * If next_task preempts rq->curr, and rq->curr
2707          * can move away, it makes sense to j    1643          * can move away, it makes sense to just reschedule
2708          * without going further in pushing n    1644          * without going further in pushing next_task.
2709          */                                      1645          */
2710         if (dl_task(rq->curr) &&                 1646         if (dl_task(rq->curr) &&
2711             dl_time_before(next_task->dl.dead    1647             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2712             rq->curr->nr_cpus_allowed > 1) {     1648             rq->curr->nr_cpus_allowed > 1) {
2713                 resched_curr(rq);                1649                 resched_curr(rq);
2714                 return 0;                        1650                 return 0;
2715         }                                        1651         }
2716                                                  1652 
2717         if (is_migration_disabled(next_task)) << 
2718                 return 0;                     << 
2719                                               << 
2720         if (WARN_ON(next_task == rq->curr))   << 
2721                 return 0;                     << 
2722                                               << 
2723         /* We might release rq lock */           1653         /* We might release rq lock */
2724         get_task_struct(next_task);              1654         get_task_struct(next_task);
2725                                                  1655 
2726         /* Will lock the rq it'll find */        1656         /* Will lock the rq it'll find */
2727         later_rq = find_lock_later_rq(next_ta    1657         later_rq = find_lock_later_rq(next_task, rq);
2728         if (!later_rq) {                         1658         if (!later_rq) {
2729                 struct task_struct *task;        1659                 struct task_struct *task;
2730                                                  1660 
2731                 /*                               1661                 /*
2732                  * We must check all this aga    1662                  * We must check all this again, since
2733                  * find_lock_later_rq release    1663                  * find_lock_later_rq releases rq->lock and it is
2734                  * then possible that next_ta    1664                  * then possible that next_task has migrated.
2735                  */                              1665                  */
2736                 task = pick_next_pushable_dl_    1666                 task = pick_next_pushable_dl_task(rq);
2737                 if (task == next_task) {      !! 1667                 if (task_cpu(next_task) == rq->cpu && task == next_task) {
2738                         /*                       1668                         /*
2739                          * The task is still     1669                          * The task is still there. We don't try
2740                          * again, some other  !! 1670                          * again, some other cpu will pull it when ready.
2741                          */                      1671                          */
2742                         goto out;                1672                         goto out;
2743                 }                                1673                 }
2744                                                  1674 
2745                 if (!task)                       1675                 if (!task)
2746                         /* No more tasks */      1676                         /* No more tasks */
2747                         goto out;                1677                         goto out;
2748                                                  1678 
2749                 put_task_struct(next_task);      1679                 put_task_struct(next_task);
2750                 next_task = task;                1680                 next_task = task;
2751                 goto retry;                      1681                 goto retry;
2752         }                                        1682         }
2753                                                  1683 
2754         deactivate_task(rq, next_task, 0);       1684         deactivate_task(rq, next_task, 0);
2755         set_task_cpu(next_task, later_rq->cpu    1685         set_task_cpu(next_task, later_rq->cpu);
2756         activate_task(later_rq, next_task, 0)    1686         activate_task(later_rq, next_task, 0);
2757         ret = 1;                                 1687         ret = 1;
2758                                                  1688 
2759         resched_curr(later_rq);                  1689         resched_curr(later_rq);
2760                                                  1690 
2761         double_unlock_balance(rq, later_rq);     1691         double_unlock_balance(rq, later_rq);
2762                                                  1692 
2763 out:                                             1693 out:
2764         put_task_struct(next_task);              1694         put_task_struct(next_task);
2765                                                  1695 
2766         return ret;                              1696         return ret;
2767 }                                                1697 }
2768                                                  1698 
2769 static void push_dl_tasks(struct rq *rq)         1699 static void push_dl_tasks(struct rq *rq)
2770 {                                                1700 {
2771         /* push_dl_task() will return true if    1701         /* push_dl_task() will return true if it moved a -deadline task */
2772         while (push_dl_task(rq))                 1702         while (push_dl_task(rq))
2773                 ;                                1703                 ;
2774 }                                                1704 }
2775                                                  1705 
2776 static void pull_dl_task(struct rq *this_rq)     1706 static void pull_dl_task(struct rq *this_rq)
2777 {                                                1707 {
2778         int this_cpu = this_rq->cpu, cpu;        1708         int this_cpu = this_rq->cpu, cpu;
2779         struct task_struct *p, *push_task;    !! 1709         struct task_struct *p;
2780         bool resched = false;                    1710         bool resched = false;
2781         struct rq *src_rq;                       1711         struct rq *src_rq;
2782         u64 dmin = LONG_MAX;                     1712         u64 dmin = LONG_MAX;
2783                                                  1713 
2784         if (likely(!dl_overloaded(this_rq)))     1714         if (likely(!dl_overloaded(this_rq)))
2785                 return;                          1715                 return;
2786                                                  1716 
2787         /*                                       1717         /*
2788          * Match the barrier from dl_set_over    1718          * Match the barrier from dl_set_overloaded; this guarantees that if we
2789          * see overloaded we must also see th    1719          * see overloaded we must also see the dlo_mask bit.
2790          */                                      1720          */
2791         smp_rmb();                               1721         smp_rmb();
2792                                                  1722 
2793         for_each_cpu(cpu, this_rq->rd->dlo_ma    1723         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2794                 if (this_cpu == cpu)             1724                 if (this_cpu == cpu)
2795                         continue;                1725                         continue;
2796                                                  1726 
2797                 src_rq = cpu_rq(cpu);            1727                 src_rq = cpu_rq(cpu);
2798                                                  1728 
2799                 /*                               1729                 /*
2800                  * It looks racy, and it is!  !! 1730                  * It looks racy, abd it is! However, as in sched_rt.c,
2801                  * we are fine with this.        1731                  * we are fine with this.
2802                  */                              1732                  */
2803                 if (this_rq->dl.dl_nr_running    1733                 if (this_rq->dl.dl_nr_running &&
2804                     dl_time_before(this_rq->d    1734                     dl_time_before(this_rq->dl.earliest_dl.curr,
2805                                    src_rq->dl    1735                                    src_rq->dl.earliest_dl.next))
2806                         continue;                1736                         continue;
2807                                                  1737 
2808                 /* Might drop this_rq->lock *    1738                 /* Might drop this_rq->lock */
2809                 push_task = NULL;             << 
2810                 double_lock_balance(this_rq,     1739                 double_lock_balance(this_rq, src_rq);
2811                                                  1740 
2812                 /*                               1741                 /*
2813                  * If there are no more pulla    1742                  * If there are no more pullable tasks on the
2814                  * rq, we're done with it.       1743                  * rq, we're done with it.
2815                  */                              1744                  */
2816                 if (src_rq->dl.dl_nr_running     1745                 if (src_rq->dl.dl_nr_running <= 1)
2817                         goto skip;               1746                         goto skip;
2818                                                  1747 
2819                 p = pick_earliest_pushable_dl    1748                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2820                                                  1749 
2821                 /*                               1750                 /*
2822                  * We found a task to be pull    1751                  * We found a task to be pulled if:
2823                  *  - it preempts our current    1752                  *  - it preempts our current (if there's one),
2824                  *  - it will preempt the las    1753                  *  - it will preempt the last one we pulled (if any).
2825                  */                              1754                  */
2826                 if (p && dl_time_before(p->dl    1755                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2827                     dl_task_is_earliest_deadl !! 1756                     (!this_rq->dl.dl_nr_running ||
                                                   >> 1757                      dl_time_before(p->dl.deadline,
                                                   >> 1758                                     this_rq->dl.earliest_dl.curr))) {
2828                         WARN_ON(p == src_rq->    1759                         WARN_ON(p == src_rq->curr);
2829                         WARN_ON(!task_on_rq_q    1760                         WARN_ON(!task_on_rq_queued(p));
2830                                                  1761 
2831                         /*                       1762                         /*
2832                          * Then we pull iff p    1763                          * Then we pull iff p has actually an earlier
2833                          * deadline than the     1764                          * deadline than the current task of its runqueue.
2834                          */                      1765                          */
2835                         if (dl_time_before(p-    1766                         if (dl_time_before(p->dl.deadline,
2836                                            sr    1767                                            src_rq->curr->dl.deadline))
2837                                 goto skip;       1768                                 goto skip;
2838                                                  1769 
2839                         if (is_migration_disa !! 1770                         resched = true;
2840                                 push_task = g !! 1771 
2841                         } else {              !! 1772                         deactivate_task(src_rq, p, 0);
2842                                 deactivate_ta !! 1773                         set_task_cpu(p, this_cpu);
2843                                 set_task_cpu( !! 1774                         activate_task(this_rq, p, 0);
2844                                 activate_task !! 1775                         dmin = p->dl.deadline;
2845                                 dmin = p->dl. << 
2846                                 resched = tru << 
2847                         }                     << 
2848                                                  1776 
2849                         /* Is there any other    1777                         /* Is there any other task even earlier? */
2850                 }                                1778                 }
2851 skip:                                            1779 skip:
2852                 double_unlock_balance(this_rq    1780                 double_unlock_balance(this_rq, src_rq);
2853                                               << 
2854                 if (push_task) {              << 
2855                         preempt_disable();    << 
2856                         raw_spin_rq_unlock(th << 
2857                         stop_one_cpu_nowait(s << 
2858                                             p << 
2859                         preempt_enable();     << 
2860                         raw_spin_rq_lock(this << 
2861                 }                             << 
2862         }                                        1781         }
2863                                                  1782 
2864         if (resched)                             1783         if (resched)
2865                 resched_curr(this_rq);           1784                 resched_curr(this_rq);
2866 }                                                1785 }
2867                                                  1786 
2868 /*                                               1787 /*
2869  * Since the task is not running and a resche    1788  * Since the task is not running and a reschedule is not going to happen
2870  * anytime soon on its runqueue, we try pushi    1789  * anytime soon on its runqueue, we try pushing it away now.
2871  */                                              1790  */
2872 static void task_woken_dl(struct rq *rq, stru    1791 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2873 {                                                1792 {
2874         if (!task_on_cpu(rq, p) &&            !! 1793         if (!task_running(rq, p) &&
2875             !test_tsk_need_resched(rq->curr)     1794             !test_tsk_need_resched(rq->curr) &&
2876             p->nr_cpus_allowed > 1 &&            1795             p->nr_cpus_allowed > 1 &&
2877             dl_task(rq->curr) &&                 1796             dl_task(rq->curr) &&
2878             (rq->curr->nr_cpus_allowed < 2 ||    1797             (rq->curr->nr_cpus_allowed < 2 ||
2879              !dl_entity_preempt(&p->dl, &rq->    1798              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2880                 push_dl_tasks(rq);               1799                 push_dl_tasks(rq);
2881         }                                        1800         }
2882 }                                                1801 }
2883                                                  1802 
2884 static void set_cpus_allowed_dl(struct task_s    1803 static void set_cpus_allowed_dl(struct task_struct *p,
2885                                 struct affini !! 1804                                 const struct cpumask *new_mask)
2886 {                                                1805 {
2887         struct root_domain *src_rd;              1806         struct root_domain *src_rd;
2888         struct rq *rq;                           1807         struct rq *rq;
2889                                                  1808 
2890         WARN_ON_ONCE(!dl_task(p));            !! 1809         BUG_ON(!dl_task(p));
2891                                                  1810 
2892         rq = task_rq(p);                         1811         rq = task_rq(p);
2893         src_rd = rq->rd;                         1812         src_rd = rq->rd;
2894         /*                                       1813         /*
2895          * Migrating a SCHED_DEADLINE task be    1814          * Migrating a SCHED_DEADLINE task between exclusive
2896          * cpusets (different root_domains) e    1815          * cpusets (different root_domains) entails a bandwidth
2897          * update. We already made space for     1816          * update. We already made space for us in the destination
2898          * domain (see cpuset_can_attach()).     1817          * domain (see cpuset_can_attach()).
2899          */                                      1818          */
2900         if (!cpumask_intersects(src_rd->span, !! 1819         if (!cpumask_intersects(src_rd->span, new_mask)) {
2901                 struct dl_bw *src_dl_b;          1820                 struct dl_bw *src_dl_b;
2902                                                  1821 
2903                 src_dl_b = dl_bw_of(cpu_of(rq    1822                 src_dl_b = dl_bw_of(cpu_of(rq));
2904                 /*                               1823                 /*
2905                  * We now free resources of t    1824                  * We now free resources of the root_domain we are migrating
2906                  * off. In the worst case, sc    1825                  * off. In the worst case, sched_setattr() may temporary fail
2907                  * until we complete the upda    1826                  * until we complete the update.
2908                  */                              1827                  */
2909                 raw_spin_lock(&src_dl_b->lock    1828                 raw_spin_lock(&src_dl_b->lock);
2910                 __dl_sub(src_dl_b, p->dl.dl_b !! 1829                 __dl_clear(src_dl_b, p->dl.dl_bw);
2911                 raw_spin_unlock(&src_dl_b->lo    1830                 raw_spin_unlock(&src_dl_b->lock);
2912         }                                        1831         }
2913                                                  1832 
2914         set_cpus_allowed_common(p, ctx);      !! 1833         set_cpus_allowed_common(p, new_mask);
2915 }                                                1834 }
2916                                                  1835 
2917 /* Assumes rq->lock is held */                   1836 /* Assumes rq->lock is held */
2918 static void rq_online_dl(struct rq *rq)          1837 static void rq_online_dl(struct rq *rq)
2919 {                                                1838 {
2920         if (rq->dl.overloaded)                   1839         if (rq->dl.overloaded)
2921                 dl_set_overload(rq);             1840                 dl_set_overload(rq);
2922                                                  1841 
2923         cpudl_set_freecpu(&rq->rd->cpudl, rq-    1842         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2924         if (rq->dl.dl_nr_running > 0)            1843         if (rq->dl.dl_nr_running > 0)
2925                 cpudl_set(&rq->rd->cpudl, rq- !! 1844                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
2926 }                                                1845 }
2927                                                  1846 
2928 /* Assumes rq->lock is held */                   1847 /* Assumes rq->lock is held */
2929 static void rq_offline_dl(struct rq *rq)         1848 static void rq_offline_dl(struct rq *rq)
2930 {                                                1849 {
2931         if (rq->dl.overloaded)                   1850         if (rq->dl.overloaded)
2932                 dl_clear_overload(rq);           1851                 dl_clear_overload(rq);
2933                                                  1852 
2934         cpudl_clear(&rq->rd->cpudl, rq->cpu); !! 1853         cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
2935         cpudl_clear_freecpu(&rq->rd->cpudl, r    1854         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2936 }                                                1855 }
2937                                                  1856 
2938 void __init init_sched_dl_class(void)            1857 void __init init_sched_dl_class(void)
2939 {                                                1858 {
2940         unsigned int i;                          1859         unsigned int i;
2941                                                  1860 
2942         for_each_possible_cpu(i)                 1861         for_each_possible_cpu(i)
2943                 zalloc_cpumask_var_node(&per_    1862                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2944                                         GFP_K    1863                                         GFP_KERNEL, cpu_to_node(i));
2945 }                                                1864 }
2946                                                  1865 
2947 void dl_add_task_root_domain(struct task_stru << 
2948 {                                             << 
2949         struct rq_flags rf;                   << 
2950         struct rq *rq;                        << 
2951         struct dl_bw *dl_b;                   << 
2952                                               << 
2953         raw_spin_lock_irqsave(&p->pi_lock, rf << 
2954         if (!dl_task(p)) {                    << 
2955                 raw_spin_unlock_irqrestore(&p << 
2956                 return;                       << 
2957         }                                     << 
2958                                               << 
2959         rq = __task_rq_lock(p, &rf);          << 
2960                                               << 
2961         dl_b = &rq->rd->dl_bw;                << 
2962         raw_spin_lock(&dl_b->lock);           << 
2963                                               << 
2964         __dl_add(dl_b, p->dl.dl_bw, cpumask_w << 
2965                                               << 
2966         raw_spin_unlock(&dl_b->lock);         << 
2967                                               << 
2968         task_rq_unlock(rq, p, &rf);           << 
2969 }                                             << 
2970                                               << 
2971 void dl_clear_root_domain(struct root_domain  << 
2972 {                                             << 
2973         unsigned long flags;                  << 
2974                                               << 
2975         raw_spin_lock_irqsave(&rd->dl_bw.lock << 
2976         rd->dl_bw.total_bw = 0;               << 
2977         raw_spin_unlock_irqrestore(&rd->dl_bw << 
2978 }                                             << 
2979                                               << 
2980 #endif /* CONFIG_SMP */                          1866 #endif /* CONFIG_SMP */
2981                                                  1867 
2982 static void switched_from_dl(struct rq *rq, s    1868 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2983 {                                                1869 {
2984         /*                                       1870         /*
2985          * task_non_contending() can start th !! 1871          * Start the deadline timer; if we switch back to dl before this we'll
2986          * time is in the future). If the tas !! 1872          * continue consuming our current CBS slice. If we stay outside of
2987          * the "inactive timer" fires, it can !! 1873          * SCHED_DEADLINE until the deadline passes, the timer will reset the
2988          * runtime using its current deadline !! 1874          * task.
2989          * SCHED_DEADLINE until the 0-lag tim << 
2990          * will reset the task parameters.    << 
2991          */                                   << 
2992         if (task_on_rq_queued(p) && p->dl.dl_ << 
2993                 task_non_contending(&p->dl);  << 
2994                                               << 
2995         /*                                    << 
2996          * In case a task is setscheduled out << 
2997          * keep track of that on its cpuset ( << 
2998          */                                   << 
2999         dec_dl_tasks_cs(p);                   << 
3000                                               << 
3001         if (!task_on_rq_queued(p)) {          << 
3002                 /*                            << 
3003                  * Inactive timer is armed. H << 
3004                  * might migrate away from th << 
3005                  * some other class. We need  << 
3006                  * this rq running_bw now, or << 
3007                  */                           << 
3008                 if (p->dl.dl_non_contending)  << 
3009                         sub_running_bw(&p->dl << 
3010                 sub_rq_bw(&p->dl, &rq->dl);   << 
3011         }                                     << 
3012                                               << 
3013         /*                                    << 
3014          * We cannot use inactive_task_timer( << 
3015          * at the 0-lag time, because the tas << 
3016          * while SCHED_OTHER in the meanwhile << 
3017          */                                      1875          */
3018         if (p->dl.dl_non_contending)          !! 1876         if (!start_dl_timer(p))
3019                 p->dl.dl_non_contending = 0;  !! 1877                 __dl_clear_params(p);
3020                                                  1878 
3021         /*                                       1879         /*
3022          * Since this might be the only -dead    1880          * Since this might be the only -deadline task on the rq,
3023          * this is the right place to try to     1881          * this is the right place to try to pull some other one
3024          * from an overloaded CPU, if any.    !! 1882          * from an overloaded cpu, if any.
3025          */                                      1883          */
3026         if (!task_on_rq_queued(p) || rq->dl.d    1884         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3027                 return;                          1885                 return;
3028                                                  1886 
3029         deadline_queue_pull_task(rq);         !! 1887         queue_pull_task(rq);
3030 }                                                1888 }
3031                                                  1889 
3032 /*                                               1890 /*
3033  * When switching to -deadline, we may overlo    1891  * When switching to -deadline, we may overload the rq, then
3034  * we try to push someone off, if possible.      1892  * we try to push someone off, if possible.
3035  */                                              1893  */
3036 static void switched_to_dl(struct rq *rq, str    1894 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3037 {                                                1895 {
3038         if (hrtimer_try_to_cancel(&p->dl.inac !! 1896         if (task_on_rq_queued(p) && rq->curr != p) {
3039                 put_task_struct(p);           << 
3040                                               << 
3041         /*                                    << 
3042          * In case a task is setscheduled to  << 
3043          * track of that on its cpuset (for c << 
3044          */                                   << 
3045         inc_dl_tasks_cs(p);                   << 
3046                                               << 
3047         /* If p is not queued we will update  << 
3048         if (!task_on_rq_queued(p)) {          << 
3049                 add_rq_bw(&p->dl, &rq->dl);   << 
3050                                               << 
3051                 return;                       << 
3052         }                                     << 
3053                                               << 
3054         if (rq->curr != p) {                  << 
3055 #ifdef CONFIG_SMP                                1897 #ifdef CONFIG_SMP
3056                 if (p->nr_cpus_allowed > 1 &&    1898                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3057                         deadline_queue_push_t !! 1899                         queue_push_tasks(rq);
3058 #endif                                           1900 #endif
3059                 if (dl_task(rq->curr))           1901                 if (dl_task(rq->curr))
3060                         wakeup_preempt_dl(rq, !! 1902                         check_preempt_curr_dl(rq, p, 0);
3061                 else                             1903                 else
3062                         resched_curr(rq);        1904                         resched_curr(rq);
3063         } else {                              << 
3064                 update_dl_rq_load_avg(rq_cloc << 
3065         }                                        1905         }
3066 }                                                1906 }
3067                                                  1907 
3068 /*                                               1908 /*
3069  * If the scheduling parameters of a -deadlin    1909  * If the scheduling parameters of a -deadline task changed,
3070  * a push or pull operation might be needed.     1910  * a push or pull operation might be needed.
3071  */                                              1911  */
3072 static void prio_changed_dl(struct rq *rq, st    1912 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3073                             int oldprio)         1913                             int oldprio)
3074 {                                                1914 {
3075         if (!task_on_rq_queued(p))            !! 1915         if (task_on_rq_queued(p) || rq->curr == p) {
3076                 return;                       << 
3077                                               << 
3078 #ifdef CONFIG_SMP                                1916 #ifdef CONFIG_SMP
3079         /*                                    !! 1917                 /*
3080          * This might be too much, but unfort !! 1918                  * This might be too much, but unfortunately
3081          * we don't have the old deadline val !! 1919                  * we don't have the old deadline value, and
3082          * we can't argue if the task is incr !! 1920                  * we can't argue if the task is increasing
3083          * or lowering its prio, so...        !! 1921                  * or lowering its prio, so...
3084          */                                   !! 1922                  */
3085         if (!rq->dl.overloaded)               !! 1923                 if (!rq->dl.overloaded)
3086                 deadline_queue_pull_task(rq); !! 1924                         queue_pull_task(rq);
3087                                                  1925 
3088         if (task_current(rq, p)) {            << 
3089                 /*                               1926                 /*
3090                  * If we now have a earlier d    1927                  * If we now have a earlier deadline task than p,
3091                  * then reschedule, provided     1928                  * then reschedule, provided p is still on this
3092                  * runqueue.                     1929                  * runqueue.
3093                  */                              1930                  */
3094                 if (dl_time_before(rq->dl.ear    1931                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3095                         resched_curr(rq);        1932                         resched_curr(rq);
3096         } else {                              !! 1933 #else
3097                 /*                               1934                 /*
3098                  * Current may not be deadlin !! 1935                  * Again, we don't know if p has a earlier
3099                  * have just replenished it ( !! 1936                  * or later deadline, so let's blindly set a
3100                  *                            !! 1937                  * (maybe not needed) rescheduling point.
3101                  * Otherwise, if p was given  << 
3102                  */                              1938                  */
3103                 if (!dl_task(rq->curr) ||     !! 1939                 resched_curr(rq);
3104                     dl_time_before(p->dl.dead !! 1940 #endif /* CONFIG_SMP */
3105                         resched_curr(rq);     !! 1941         } else
3106         }                                     !! 1942                 switched_to_dl(rq, p);
3107 #else                                         << 
3108         /*                                    << 
3109          * We don't know if p has a earlier o << 
3110          * set a (maybe not needed) reschedul << 
3111          */                                   << 
3112         resched_curr(rq);                     << 
3113 #endif                                        << 
3114 }                                             << 
3115                                               << 
3116 #ifdef CONFIG_SCHED_CORE                      << 
3117 static int task_is_throttled_dl(struct task_s << 
3118 {                                             << 
3119         return p->dl.dl_throttled;            << 
3120 }                                                1943 }
3121 #endif                                        << 
3122                                               << 
3123 DEFINE_SCHED_CLASS(dl) = {                    << 
3124                                                  1944 
                                                   >> 1945 const struct sched_class dl_sched_class = {
                                                   >> 1946         .next                   = &rt_sched_class,
3125         .enqueue_task           = enqueue_tas    1947         .enqueue_task           = enqueue_task_dl,
3126         .dequeue_task           = dequeue_tas    1948         .dequeue_task           = dequeue_task_dl,
3127         .yield_task             = yield_task_    1949         .yield_task             = yield_task_dl,
3128                                                  1950 
3129         .wakeup_preempt         = wakeup_pree !! 1951         .check_preempt_curr     = check_preempt_curr_dl,
3130                                                  1952 
3131         .pick_task              = pick_task_d !! 1953         .pick_next_task         = pick_next_task_dl,
3132         .put_prev_task          = put_prev_ta    1954         .put_prev_task          = put_prev_task_dl,
3133         .set_next_task          = set_next_ta << 
3134                                                  1955 
3135 #ifdef CONFIG_SMP                                1956 #ifdef CONFIG_SMP
3136         .balance                = balance_dl, << 
3137         .select_task_rq         = select_task    1957         .select_task_rq         = select_task_rq_dl,
3138         .migrate_task_rq        = migrate_tas << 
3139         .set_cpus_allowed       = set_cpus_al    1958         .set_cpus_allowed       = set_cpus_allowed_dl,
3140         .rq_online              = rq_online_d    1959         .rq_online              = rq_online_dl,
3141         .rq_offline             = rq_offline_    1960         .rq_offline             = rq_offline_dl,
3142         .task_woken             = task_woken_    1961         .task_woken             = task_woken_dl,
3143         .find_lock_rq           = find_lock_l << 
3144 #endif                                           1962 #endif
3145                                                  1963 
                                                   >> 1964         .set_curr_task          = set_curr_task_dl,
3146         .task_tick              = task_tick_d    1965         .task_tick              = task_tick_dl,
3147         .task_fork              = task_fork_d    1966         .task_fork              = task_fork_dl,
                                                   >> 1967         .task_dead              = task_dead_dl,
3148                                                  1968 
3149         .prio_changed           = prio_change    1969         .prio_changed           = prio_changed_dl,
3150         .switched_from          = switched_fr    1970         .switched_from          = switched_from_dl,
3151         .switched_to            = switched_to    1971         .switched_to            = switched_to_dl,
3152                                                  1972 
3153         .update_curr            = update_curr    1973         .update_curr            = update_curr_dl,
3154 #ifdef CONFIG_SCHED_CORE                      << 
3155         .task_is_throttled      = task_is_thr << 
3156 #endif                                        << 
3157 };                                            << 
3158                                               << 
3159 /* Used for dl_bw check and update, used unde << 
3160 static u64 dl_generation;                     << 
3161                                               << 
3162 int sched_dl_global_validate(void)            << 
3163 {                                             << 
3164         u64 runtime = global_rt_runtime();    << 
3165         u64 period = global_rt_period();      << 
3166         u64 new_bw = to_ratio(period, runtime << 
3167         u64 gen = ++dl_generation;            << 
3168         struct dl_bw *dl_b;                   << 
3169         int cpu, cpus, ret = 0;               << 
3170         unsigned long flags;                  << 
3171                                               << 
3172         /*                                    << 
3173          * Here we want to check the bandwidt << 
3174          * value smaller than the currently a << 
3175          * any of the root_domains.           << 
3176          */                                   << 
3177         for_each_possible_cpu(cpu) {          << 
3178                 rcu_read_lock_sched();        << 
3179                                               << 
3180                 if (dl_bw_visited(cpu, gen))  << 
3181                         goto next;            << 
3182                                               << 
3183                 dl_b = dl_bw_of(cpu);         << 
3184                 cpus = dl_bw_cpus(cpu);       << 
3185                                               << 
3186                 raw_spin_lock_irqsave(&dl_b-> << 
3187                 if (new_bw * cpus < dl_b->tot << 
3188                         ret = -EBUSY;         << 
3189                 raw_spin_unlock_irqrestore(&d << 
3190                                               << 
3191 next:                                         << 
3192                 rcu_read_unlock_sched();      << 
3193                                               << 
3194                 if (ret)                      << 
3195                         break;                << 
3196         }                                     << 
3197                                               << 
3198         return ret;                           << 
3199 }                                             << 
3200                                               << 
3201 static void init_dl_rq_bw_ratio(struct dl_rq  << 
3202 {                                             << 
3203         if (global_rt_runtime() == RUNTIME_IN << 
3204                 dl_rq->bw_ratio = 1 << RATIO_ << 
3205                 dl_rq->max_bw = dl_rq->extra_ << 
3206         } else {                              << 
3207                 dl_rq->bw_ratio = to_ratio(gl << 
3208                           global_rt_period()) << 
3209                 dl_rq->max_bw = dl_rq->extra_ << 
3210                         to_ratio(global_rt_pe << 
3211         }                                     << 
3212 }                                             << 
3213                                               << 
3214 void sched_dl_do_global(void)                 << 
3215 {                                             << 
3216         u64 new_bw = -1;                      << 
3217         u64 gen = ++dl_generation;            << 
3218         struct dl_bw *dl_b;                   << 
3219         int cpu;                              << 
3220         unsigned long flags;                  << 
3221                                               << 
3222         if (global_rt_runtime() != RUNTIME_IN << 
3223                 new_bw = to_ratio(global_rt_p << 
3224                                               << 
3225         for_each_possible_cpu(cpu) {          << 
3226                 rcu_read_lock_sched();        << 
3227                                               << 
3228                 if (dl_bw_visited(cpu, gen))  << 
3229                         rcu_read_unlock_sched << 
3230                         continue;             << 
3231                 }                             << 
3232                                               << 
3233                 dl_b = dl_bw_of(cpu);         << 
3234                                               << 
3235                 raw_spin_lock_irqsave(&dl_b-> << 
3236                 dl_b->bw = new_bw;            << 
3237                 raw_spin_unlock_irqrestore(&d << 
3238                                               << 
3239                 rcu_read_unlock_sched();      << 
3240                 init_dl_rq_bw_ratio(&cpu_rq(c << 
3241         }                                     << 
3242 }                                             << 
3243                                               << 
3244 /*                                            << 
3245  * We must be sure that accepting a new task  << 
3246  * parameters of an existing one) is consiste << 
3247  * constraints. If yes, this function also ac << 
3248  * allocated bandwidth to reflect the new sit << 
3249  *                                            << 
3250  * This function is called while holding p's  << 
3251  */                                           << 
3252 int sched_dl_overflow(struct task_struct *p,  << 
3253                       const struct sched_attr << 
3254 {                                             << 
3255         u64 period = attr->sched_period ?: at << 
3256         u64 runtime = attr->sched_runtime;    << 
3257         u64 new_bw = dl_policy(policy) ? to_r << 
3258         int cpus, err = -1, cpu = task_cpu(p) << 
3259         struct dl_bw *dl_b = dl_bw_of(cpu);   << 
3260         unsigned long cap;                    << 
3261                                               << 
3262         if (attr->sched_flags & SCHED_FLAG_SU << 
3263                 return 0;                     << 
3264                                               << 
3265         /* !deadline task may carry old deadl << 
3266         if (new_bw == p->dl.dl_bw && task_has << 
3267                 return 0;                     << 
3268                                               << 
3269         /*                                    << 
3270          * Either if a task, enters, leave, o << 
3271          * its parameters, we may need to upd << 
3272          * allocated bandwidth of the contain << 
3273          */                                   << 
3274         raw_spin_lock(&dl_b->lock);           << 
3275         cpus = dl_bw_cpus(cpu);               << 
3276         cap = dl_bw_capacity(cpu);            << 
3277                                               << 
3278         if (dl_policy(policy) && !task_has_dl << 
3279             !__dl_overflow(dl_b, cap, 0, new_ << 
3280                 if (hrtimer_active(&p->dl.ina << 
3281                         __dl_sub(dl_b, p->dl. << 
3282                 __dl_add(dl_b, new_bw, cpus); << 
3283                 err = 0;                      << 
3284         } else if (dl_policy(policy) && task_ << 
3285                    !__dl_overflow(dl_b, cap,  << 
3286                 /*                            << 
3287                  * XXX this is slightly incor << 
3288                  * utilization decreases, we  << 
3289                  * utilization change until t << 
3290                  * But this would require to  << 
3291                  * timer" when the task is no << 
3292                  */                           << 
3293                 __dl_sub(dl_b, p->dl.dl_bw, c << 
3294                 __dl_add(dl_b, new_bw, cpus); << 
3295                 dl_change_utilization(p, new_ << 
3296                 err = 0;                      << 
3297         } else if (!dl_policy(policy) && task << 
3298                 /*                            << 
3299                  * Do not decrease the total  << 
3300                  * switched_from_dl() will ta << 
3301                  * (0-lag) time.              << 
3302                  */                           << 
3303                 err = 0;                      << 
3304         }                                     << 
3305         raw_spin_unlock(&dl_b->lock);         << 
3306                                               << 
3307         return err;                           << 
3308 }                                             << 
3309                                               << 
3310 /*                                            << 
3311  * This function initializes the sched_dl_ent << 
3312  * SCHED_DEADLINE task.                       << 
3313  *                                            << 
3314  * Only the static values are considered here << 
3315  * absolute deadline will be properly calcula << 
3316  * for the first time with its new policy.    << 
3317  */                                           << 
3318 void __setparam_dl(struct task_struct *p, con << 
3319 {                                             << 
3320         struct sched_dl_entity *dl_se = &p->d << 
3321                                               << 
3322         dl_se->dl_runtime = attr->sched_runti << 
3323         dl_se->dl_deadline = attr->sched_dead << 
3324         dl_se->dl_period = attr->sched_period << 
3325         dl_se->flags = attr->sched_flags & SC << 
3326         dl_se->dl_bw = to_ratio(dl_se->dl_per << 
3327         dl_se->dl_density = to_ratio(dl_se->d << 
3328 }                                             << 
3329                                               << 
3330 void __getparam_dl(struct task_struct *p, str << 
3331 {                                             << 
3332         struct sched_dl_entity *dl_se = &p->d << 
3333                                               << 
3334         attr->sched_priority = p->rt_priority << 
3335         attr->sched_runtime = dl_se->dl_runti << 
3336         attr->sched_deadline = dl_se->dl_dead << 
3337         attr->sched_period = dl_se->dl_period << 
3338         attr->sched_flags &= ~SCHED_DL_FLAGS; << 
3339         attr->sched_flags |= dl_se->flags;    << 
3340 }                                             << 
3341                                               << 
3342 /*                                            << 
3343  * This function validates the new parameters << 
3344  * We ask for the deadline not being zero, an << 
3345  * than the runtime, as well as the period of << 
3346  * greater than deadline. Furthermore, we hav << 
3347  * user parameters are above the internal res << 
3348  * check sched_runtime only since it is alway << 
3349  * below 2^63 ns (we have to check both sched << 
3350  * sched_period, as the latter can be zero).  << 
3351  */                                           << 
3352 bool __checkparam_dl(const struct sched_attr  << 
3353 {                                             << 
3354         u64 period, max, min;                 << 
3355                                               << 
3356         /* special dl tasks don't actually us << 
3357         if (attr->sched_flags & SCHED_FLAG_SU << 
3358                 return true;                  << 
3359                                               << 
3360         /* deadline != 0 */                   << 
3361         if (attr->sched_deadline == 0)        << 
3362                 return false;                 << 
3363                                               << 
3364         /*                                    << 
3365          * Since we truncate DL_SCALE bits, m << 
3366          * that big.                          << 
3367          */                                   << 
3368         if (attr->sched_runtime < (1ULL << DL << 
3369                 return false;                 << 
3370                                               << 
3371         /*                                    << 
3372          * Since we use the MSB for wrap-arou << 
3373          * sure it's not set (mind that perio << 
3374          */                                   << 
3375         if (attr->sched_deadline & (1ULL << 6 << 
3376             attr->sched_period & (1ULL << 63) << 
3377                 return false;                 << 
3378                                               << 
3379         period = attr->sched_period;          << 
3380         if (!period)                          << 
3381                 period = attr->sched_deadline << 
3382                                               << 
3383         /* runtime <= deadline <= period (if  << 
3384         if (period < attr->sched_deadline ||  << 
3385             attr->sched_deadline < attr->sche << 
3386                 return false;                 << 
3387                                               << 
3388         max = (u64)READ_ONCE(sysctl_sched_dl_ << 
3389         min = (u64)READ_ONCE(sysctl_sched_dl_ << 
3390                                               << 
3391         if (period < min || period > max)     << 
3392                 return false;                 << 
3393                                               << 
3394         return true;                          << 
3395 }                                             << 
3396                                               << 
3397 /*                                            << 
3398  * This function clears the sched_dl_entity s << 
3399  */                                           << 
3400 static void __dl_clear_params(struct sched_dl << 
3401 {                                             << 
3402         dl_se->dl_runtime               = 0;  << 
3403         dl_se->dl_deadline              = 0;  << 
3404         dl_se->dl_period                = 0;  << 
3405         dl_se->flags                    = 0;  << 
3406         dl_se->dl_bw                    = 0;  << 
3407         dl_se->dl_density               = 0;  << 
3408                                               << 
3409         dl_se->dl_throttled             = 0;  << 
3410         dl_se->dl_yielded               = 0;  << 
3411         dl_se->dl_non_contending        = 0;  << 
3412         dl_se->dl_overrun               = 0;  << 
3413         dl_se->dl_server                = 0;  << 
3414                                               << 
3415 #ifdef CONFIG_RT_MUTEXES                      << 
3416         dl_se->pi_se                    = dl_ << 
3417 #endif                                        << 
3418 }                                             << 
3419                                               << 
3420 void init_dl_entity(struct sched_dl_entity *d << 
3421 {                                             << 
3422         RB_CLEAR_NODE(&dl_se->rb_node);       << 
3423         init_dl_task_timer(dl_se);            << 
3424         init_dl_inactive_task_timer(dl_se);   << 
3425         __dl_clear_params(dl_se);             << 
3426 }                                             << 
3427                                               << 
3428 bool dl_param_changed(struct task_struct *p,  << 
3429 {                                             << 
3430         struct sched_dl_entity *dl_se = &p->d << 
3431                                               << 
3432         if (dl_se->dl_runtime != attr->sched_ << 
3433             dl_se->dl_deadline != attr->sched << 
3434             dl_se->dl_period != attr->sched_p << 
3435             dl_se->flags != (attr->sched_flag << 
3436                 return true;                  << 
3437                                               << 
3438         return false;                         << 
3439 }                                             << 
3440                                               << 
3441 #ifdef CONFIG_SMP                             << 
3442 int dl_cpuset_cpumask_can_shrink(const struct << 
3443                                  const struct << 
3444 {                                             << 
3445         unsigned long flags, cap;             << 
3446         struct dl_bw *cur_dl_b;               << 
3447         int ret = 1;                          << 
3448                                               << 
3449         rcu_read_lock_sched();                << 
3450         cur_dl_b = dl_bw_of(cpumask_any(cur)) << 
3451         cap = __dl_bw_capacity(trial);        << 
3452         raw_spin_lock_irqsave(&cur_dl_b->lock << 
3453         if (__dl_overflow(cur_dl_b, cap, 0, 0 << 
3454                 ret = 0;                      << 
3455         raw_spin_unlock_irqrestore(&cur_dl_b- << 
3456         rcu_read_unlock_sched();              << 
3457                                               << 
3458         return ret;                           << 
3459 }                                             << 
3460                                               << 
3461 enum dl_bw_request {                          << 
3462         dl_bw_req_check_overflow = 0,         << 
3463         dl_bw_req_alloc,                      << 
3464         dl_bw_req_free                        << 
3465 };                                               1974 };
3466                                                  1975 
3467 static int dl_bw_manage(enum dl_bw_request re << 
3468 {                                             << 
3469         unsigned long flags;                  << 
3470         struct dl_bw *dl_b;                   << 
3471         bool overflow = 0;                    << 
3472                                               << 
3473         rcu_read_lock_sched();                << 
3474         dl_b = dl_bw_of(cpu);                 << 
3475         raw_spin_lock_irqsave(&dl_b->lock, fl << 
3476                                               << 
3477         if (req == dl_bw_req_free) {          << 
3478                 __dl_sub(dl_b, dl_bw, dl_bw_c << 
3479         } else {                              << 
3480                 unsigned long cap = dl_bw_cap << 
3481                                               << 
3482                 overflow = __dl_overflow(dl_b << 
3483                                               << 
3484                 if (req == dl_bw_req_alloc && << 
3485                         /*                    << 
3486                          * We reserve space i << 
3487                          * root_domain, as we << 
3488                          * We will free resou << 
3489                          * later on (see set_ << 
3490                          */                   << 
3491                         __dl_add(dl_b, dl_bw, << 
3492                 }                             << 
3493         }                                     << 
3494                                               << 
3495         raw_spin_unlock_irqrestore(&dl_b->loc << 
3496         rcu_read_unlock_sched();              << 
3497                                               << 
3498         return overflow ? -EBUSY : 0;         << 
3499 }                                             << 
3500                                               << 
3501 int dl_bw_check_overflow(int cpu)             << 
3502 {                                             << 
3503         return dl_bw_manage(dl_bw_req_check_o << 
3504 }                                             << 
3505                                               << 
3506 int dl_bw_alloc(int cpu, u64 dl_bw)           << 
3507 {                                             << 
3508         return dl_bw_manage(dl_bw_req_alloc,  << 
3509 }                                             << 
3510                                               << 
3511 void dl_bw_free(int cpu, u64 dl_bw)           << 
3512 {                                             << 
3513         dl_bw_manage(dl_bw_req_free, cpu, dl_ << 
3514 }                                             << 
3515 #endif                                        << 
3516                                               << 
3517 #ifdef CONFIG_SCHED_DEBUG                        1976 #ifdef CONFIG_SCHED_DEBUG
                                                   >> 1977 extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
                                                   >> 1978 
3518 void print_dl_stats(struct seq_file *m, int c    1979 void print_dl_stats(struct seq_file *m, int cpu)
3519 {                                                1980 {
3520         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl)    1981         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3521 }                                                1982 }
3522 #endif /* CONFIG_SCHED_DEBUG */                  1983 #endif /* CONFIG_SCHED_DEBUG */
3523                                                  1984 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php