~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/deadline.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/sched/deadline.c (Version linux-6.12-rc7) and /kernel/sched/deadline.c (Version linux-5.9.16)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Deadline Scheduling Class (SCHED_DEADLINE)       3  * Deadline Scheduling Class (SCHED_DEADLINE)
  4  *                                                  4  *
  5  * Earliest Deadline First (EDF) + Constant Ba      5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
  6  *                                                  6  *
  7  * Tasks that periodically executes their inst      7  * Tasks that periodically executes their instances for less than their
  8  * runtime won't miss any of their deadlines.       8  * runtime won't miss any of their deadlines.
  9  * Tasks that are not periodic or sporadic or       9  * Tasks that are not periodic or sporadic or that tries to execute more
 10  * than their reserved bandwidth will be slowe     10  * than their reserved bandwidth will be slowed down (and may potentially
 11  * miss some of their deadlines), and won't af     11  * miss some of their deadlines), and won't affect any other task.
 12  *                                                 12  *
 13  * Copyright (C) 2012 Dario Faggioli <raistlin     13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
 14  *                    Juri Lelli <juri.lelli@g     14  *                    Juri Lelli <juri.lelli@gmail.com>,
 15  *                    Michael Trimarchi <micha     15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
 16  *                    Fabio Checconi <fcheccon     16  *                    Fabio Checconi <fchecconi@gmail.com>
 17  */                                                17  */
                                                   >>  18 #include "sched.h"
                                                   >>  19 #include "pelt.h"
 18                                                    20 
 19 #include <linux/cpuset.h>                      !!  21 struct dl_bandwidth def_dl_bandwidth;
 20                                                << 
 21 /*                                             << 
 22  * Default limits for DL period; on the top en << 
 23  * tasks still getting ridiculously long effec << 
 24  * guard against timer DoS.                    << 
 25  */                                            << 
 26 static unsigned int sysctl_sched_dl_period_max << 
 27 static unsigned int sysctl_sched_dl_period_min << 
 28 #ifdef CONFIG_SYSCTL                           << 
 29 static struct ctl_table sched_dl_sysctls[] = { << 
 30         {                                      << 
 31                 .procname       = "sched_deadl << 
 32                 .data           = &sysctl_sche << 
 33                 .maxlen         = sizeof(unsig << 
 34                 .mode           = 0644,        << 
 35                 .proc_handler   = proc_douintv << 
 36                 .extra1         = (void *)&sys << 
 37         },                                     << 
 38         {                                      << 
 39                 .procname       = "sched_deadl << 
 40                 .data           = &sysctl_sche << 
 41                 .maxlen         = sizeof(unsig << 
 42                 .mode           = 0644,        << 
 43                 .proc_handler   = proc_douintv << 
 44                 .extra2         = (void *)&sys << 
 45         },                                     << 
 46 };                                             << 
 47                                                << 
 48 static int __init sched_dl_sysctl_init(void)   << 
 49 {                                              << 
 50         register_sysctl_init("kernel", sched_d << 
 51         return 0;                              << 
 52 }                                              << 
 53 late_initcall(sched_dl_sysctl_init);           << 
 54 #endif                                         << 
 55                                                << 
 56 static bool dl_server(struct sched_dl_entity * << 
 57 {                                              << 
 58         return dl_se->dl_server;               << 
 59 }                                              << 
 60                                                    22 
 61 static inline struct task_struct *dl_task_of(s     23 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
 62 {                                                  24 {
 63         BUG_ON(dl_server(dl_se));              << 
 64         return container_of(dl_se, struct task     25         return container_of(dl_se, struct task_struct, dl);
 65 }                                                  26 }
 66                                                    27 
 67 static inline struct rq *rq_of_dl_rq(struct dl     28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
 68 {                                                  29 {
 69         return container_of(dl_rq, struct rq,      30         return container_of(dl_rq, struct rq, dl);
 70 }                                                  31 }
 71                                                    32 
 72 static inline struct rq *rq_of_dl_se(struct sc << 
 73 {                                              << 
 74         struct rq *rq = dl_se->rq;             << 
 75                                                << 
 76         if (!dl_server(dl_se))                 << 
 77                 rq = task_rq(dl_task_of(dl_se) << 
 78                                                << 
 79         return rq;                             << 
 80 }                                              << 
 81                                                << 
 82 static inline struct dl_rq *dl_rq_of_se(struct     33 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
 83 {                                                  34 {
 84         return &rq_of_dl_se(dl_se)->dl;        !!  35         struct task_struct *p = dl_task_of(dl_se);
                                                   >>  36         struct rq *rq = task_rq(p);
                                                   >>  37 
                                                   >>  38         return &rq->dl;
 85 }                                                  39 }
 86                                                    40 
 87 static inline int on_dl_rq(struct sched_dl_ent     41 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
 88 {                                                  42 {
 89         return !RB_EMPTY_NODE(&dl_se->rb_node)     43         return !RB_EMPTY_NODE(&dl_se->rb_node);
 90 }                                                  44 }
 91                                                    45 
 92 #ifdef CONFIG_RT_MUTEXES                       << 
 93 static inline struct sched_dl_entity *pi_of(st << 
 94 {                                              << 
 95         return dl_se->pi_se;                   << 
 96 }                                              << 
 97                                                << 
 98 static inline bool is_dl_boosted(struct sched_ << 
 99 {                                              << 
100         return pi_of(dl_se) != dl_se;          << 
101 }                                              << 
102 #else                                          << 
103 static inline struct sched_dl_entity *pi_of(st << 
104 {                                              << 
105         return dl_se;                          << 
106 }                                              << 
107                                                << 
108 static inline bool is_dl_boosted(struct sched_ << 
109 {                                              << 
110         return false;                          << 
111 }                                              << 
112 #endif                                         << 
113                                                << 
114 #ifdef CONFIG_SMP                                  46 #ifdef CONFIG_SMP
115 static inline struct dl_bw *dl_bw_of(int i)        47 static inline struct dl_bw *dl_bw_of(int i)
116 {                                                  48 {
117         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_     49         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118                          "sched RCU must be he     50                          "sched RCU must be held");
119         return &cpu_rq(i)->rd->dl_bw;              51         return &cpu_rq(i)->rd->dl_bw;
120 }                                                  52 }
121                                                    53 
122 static inline int dl_bw_cpus(int i)                54 static inline int dl_bw_cpus(int i)
123 {                                                  55 {
124         struct root_domain *rd = cpu_rq(i)->rd     56         struct root_domain *rd = cpu_rq(i)->rd;
125         int cpus;                                  57         int cpus;
126                                                    58 
127         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_     59         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128                          "sched RCU must be he     60                          "sched RCU must be held");
129                                                    61 
130         if (cpumask_subset(rd->span, cpu_activ     62         if (cpumask_subset(rd->span, cpu_active_mask))
131                 return cpumask_weight(rd->span     63                 return cpumask_weight(rd->span);
132                                                    64 
133         cpus = 0;                                  65         cpus = 0;
134                                                    66 
135         for_each_cpu_and(i, rd->span, cpu_acti     67         for_each_cpu_and(i, rd->span, cpu_active_mask)
136                 cpus++;                            68                 cpus++;
137                                                    69 
138         return cpus;                               70         return cpus;
139 }                                                  71 }
140                                                    72 
141 static inline unsigned long __dl_bw_capacity(c !!  73 static inline unsigned long __dl_bw_capacity(int i)
142 {                                                  74 {
                                                   >>  75         struct root_domain *rd = cpu_rq(i)->rd;
143         unsigned long cap = 0;                     76         unsigned long cap = 0;
144         int i;                                 << 
145                                                    77 
146         for_each_cpu_and(i, mask, cpu_active_m !!  78         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
147                 cap += arch_scale_cpu_capacity !!  79                          "sched RCU must be held");
                                                   >>  80 
                                                   >>  81         for_each_cpu_and(i, rd->span, cpu_active_mask)
                                                   >>  82                 cap += capacity_orig_of(i);
148                                                    83 
149         return cap;                                84         return cap;
150 }                                                  85 }
151                                                    86 
152 /*                                                 87 /*
153  * XXX Fix: If 'rq->rd == def_root_domain' per     88  * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154  * of the CPU the task is running on rather rd     89  * of the CPU the task is running on rather rd's \Sum CPU capacity.
155  */                                                90  */
156 static inline unsigned long dl_bw_capacity(int     91 static inline unsigned long dl_bw_capacity(int i)
157 {                                                  92 {
158         if (!sched_asym_cpucap_active() &&     !!  93         if (!static_branch_unlikely(&sched_asym_cpucapacity) &&
159             arch_scale_cpu_capacity(i) == SCHE !!  94             capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
160                 return dl_bw_cpus(i) << SCHED_     95                 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161         } else {                                   96         } else {
162                 RCU_LOCKDEP_WARN(!rcu_read_loc !!  97                 return __dl_bw_capacity(i);
163                                  "sched RCU mu << 
164                                                << 
165                 return __dl_bw_capacity(cpu_rq << 
166         }                                      << 
167 }                                              << 
168                                                << 
169 static inline bool dl_bw_visited(int cpu, u64  << 
170 {                                              << 
171         struct root_domain *rd = cpu_rq(cpu)-> << 
172                                                << 
173         if (rd->visit_gen == gen)              << 
174                 return true;                   << 
175                                                << 
176         rd->visit_gen = gen;                   << 
177         return false;                          << 
178 }                                              << 
179                                                << 
180 static inline                                  << 
181 void __dl_update(struct dl_bw *dl_b, s64 bw)   << 
182 {                                              << 
183         struct root_domain *rd = container_of( << 
184         int i;                                 << 
185                                                << 
186         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_ << 
187                          "sched RCU must be he << 
188         for_each_cpu_and(i, rd->span, cpu_acti << 
189                 struct rq *rq = cpu_rq(i);     << 
190                                                << 
191                 rq->dl.extra_bw += bw;         << 
192         }                                          98         }
193 }                                                  99 }
194 #else                                             100 #else
195 static inline struct dl_bw *dl_bw_of(int i)       101 static inline struct dl_bw *dl_bw_of(int i)
196 {                                                 102 {
197         return &cpu_rq(i)->dl.dl_bw;              103         return &cpu_rq(i)->dl.dl_bw;
198 }                                                 104 }
199                                                   105 
200 static inline int dl_bw_cpus(int i)               106 static inline int dl_bw_cpus(int i)
201 {                                                 107 {
202         return 1;                                 108         return 1;
203 }                                                 109 }
204                                                   110 
205 static inline unsigned long dl_bw_capacity(int    111 static inline unsigned long dl_bw_capacity(int i)
206 {                                                 112 {
207         return SCHED_CAPACITY_SCALE;              113         return SCHED_CAPACITY_SCALE;
208 }                                                 114 }
209                                                << 
210 static inline bool dl_bw_visited(int cpu, u64  << 
211 {                                              << 
212         return false;                          << 
213 }                                              << 
214                                                << 
215 static inline                                  << 
216 void __dl_update(struct dl_bw *dl_b, s64 bw)   << 
217 {                                              << 
218         struct dl_rq *dl = container_of(dl_b,  << 
219                                                << 
220         dl->extra_bw += bw;                    << 
221 }                                              << 
222 #endif                                            115 #endif
223                                                   116 
224 static inline                                     117 static inline
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw,  << 
226 {                                              << 
227         dl_b->total_bw -= tsk_bw;              << 
228         __dl_update(dl_b, (s32)tsk_bw / cpus); << 
229 }                                              << 
230                                                << 
231 static inline                                  << 
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw,  << 
233 {                                              << 
234         dl_b->total_bw += tsk_bw;              << 
235         __dl_update(dl_b, -((s32)tsk_bw / cpus << 
236 }                                              << 
237                                                << 
238 static inline bool                             << 
239 __dl_overflow(struct dl_bw *dl_b, unsigned lon << 
240 {                                              << 
241         return dl_b->bw != -1 &&               << 
242                cap_scale(dl_b->bw, cap) < dl_b << 
243 }                                              << 
244                                                << 
245 static inline                                  << 
246 void __add_running_bw(u64 dl_bw, struct dl_rq     118 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
247 {                                                 119 {
248         u64 old = dl_rq->running_bw;              120         u64 old = dl_rq->running_bw;
249                                                   121 
250         lockdep_assert_rq_held(rq_of_dl_rq(dl_ !! 122         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
251         dl_rq->running_bw += dl_bw;               123         dl_rq->running_bw += dl_bw;
252         SCHED_WARN_ON(dl_rq->running_bw < old)    124         SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
253         SCHED_WARN_ON(dl_rq->running_bw > dl_r    125         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
254         /* kick cpufreq (see the comment in ke    126         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
255         cpufreq_update_util(rq_of_dl_rq(dl_rq)    127         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
256 }                                                 128 }
257                                                   129 
258 static inline                                     130 static inline
259 void __sub_running_bw(u64 dl_bw, struct dl_rq     131 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
260 {                                                 132 {
261         u64 old = dl_rq->running_bw;              133         u64 old = dl_rq->running_bw;
262                                                   134 
263         lockdep_assert_rq_held(rq_of_dl_rq(dl_ !! 135         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
264         dl_rq->running_bw -= dl_bw;               136         dl_rq->running_bw -= dl_bw;
265         SCHED_WARN_ON(dl_rq->running_bw > old)    137         SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266         if (dl_rq->running_bw > old)              138         if (dl_rq->running_bw > old)
267                 dl_rq->running_bw = 0;            139                 dl_rq->running_bw = 0;
268         /* kick cpufreq (see the comment in ke    140         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
269         cpufreq_update_util(rq_of_dl_rq(dl_rq)    141         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
270 }                                                 142 }
271                                                   143 
272 static inline                                     144 static inline
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_r    145 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
274 {                                                 146 {
275         u64 old = dl_rq->this_bw;                 147         u64 old = dl_rq->this_bw;
276                                                   148 
277         lockdep_assert_rq_held(rq_of_dl_rq(dl_ !! 149         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
278         dl_rq->this_bw += dl_bw;                  150         dl_rq->this_bw += dl_bw;
279         SCHED_WARN_ON(dl_rq->this_bw < old); /    151         SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
280 }                                                 152 }
281                                                   153 
282 static inline                                     154 static inline
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_r    155 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
284 {                                                 156 {
285         u64 old = dl_rq->this_bw;                 157         u64 old = dl_rq->this_bw;
286                                                   158 
287         lockdep_assert_rq_held(rq_of_dl_rq(dl_ !! 159         lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
288         dl_rq->this_bw -= dl_bw;                  160         dl_rq->this_bw -= dl_bw;
289         SCHED_WARN_ON(dl_rq->this_bw > old); /    161         SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290         if (dl_rq->this_bw > old)                 162         if (dl_rq->this_bw > old)
291                 dl_rq->this_bw = 0;               163                 dl_rq->this_bw = 0;
292         SCHED_WARN_ON(dl_rq->running_bw > dl_r    164         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
293 }                                                 165 }
294                                                   166 
295 static inline                                     167 static inline
296 void add_rq_bw(struct sched_dl_entity *dl_se,     168 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
297 {                                                 169 {
298         if (!dl_entity_is_special(dl_se))         170         if (!dl_entity_is_special(dl_se))
299                 __add_rq_bw(dl_se->dl_bw, dl_r    171                 __add_rq_bw(dl_se->dl_bw, dl_rq);
300 }                                                 172 }
301                                                   173 
302 static inline                                     174 static inline
303 void sub_rq_bw(struct sched_dl_entity *dl_se,     175 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304 {                                                 176 {
305         if (!dl_entity_is_special(dl_se))         177         if (!dl_entity_is_special(dl_se))
306                 __sub_rq_bw(dl_se->dl_bw, dl_r    178                 __sub_rq_bw(dl_se->dl_bw, dl_rq);
307 }                                                 179 }
308                                                   180 
309 static inline                                     181 static inline
310 void add_running_bw(struct sched_dl_entity *dl    182 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311 {                                                 183 {
312         if (!dl_entity_is_special(dl_se))         184         if (!dl_entity_is_special(dl_se))
313                 __add_running_bw(dl_se->dl_bw,    185                 __add_running_bw(dl_se->dl_bw, dl_rq);
314 }                                                 186 }
315                                                   187 
316 static inline                                     188 static inline
317 void sub_running_bw(struct sched_dl_entity *dl    189 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
318 {                                                 190 {
319         if (!dl_entity_is_special(dl_se))         191         if (!dl_entity_is_special(dl_se))
320                 __sub_running_bw(dl_se->dl_bw,    192                 __sub_running_bw(dl_se->dl_bw, dl_rq);
321 }                                                 193 }
322                                                   194 
323 static void dl_rq_change_utilization(struct rq !! 195 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
324 {                                                 196 {
325         if (dl_se->dl_non_contending) {        !! 197         struct rq *rq;
326                 sub_running_bw(dl_se, &rq->dl) !! 198 
327                 dl_se->dl_non_contending = 0;  !! 199         BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
                                                   >> 200 
                                                   >> 201         if (task_on_rq_queued(p))
                                                   >> 202                 return;
328                                                   203 
                                                   >> 204         rq = task_rq(p);
                                                   >> 205         if (p->dl.dl_non_contending) {
                                                   >> 206                 sub_running_bw(&p->dl, &rq->dl);
                                                   >> 207                 p->dl.dl_non_contending = 0;
329                 /*                                208                 /*
330                  * If the timer handler is cur    209                  * If the timer handler is currently running and the
331                  * timer cannot be canceled, i !! 210                  * timer cannot be cancelled, inactive_task_timer()
332                  * will see that dl_not_conten    211                  * will see that dl_not_contending is not set, and
333                  * will not touch the rq's act    212                  * will not touch the rq's active utilization,
334                  * so we are still safe.          213                  * so we are still safe.
335                  */                               214                  */
336                 if (hrtimer_try_to_cancel(&dl_ !! 215                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
337                         if (!dl_server(dl_se)) !! 216                         put_task_struct(p);
338                                 put_task_struc << 
339                 }                              << 
340         }                                         217         }
341         __sub_rq_bw(dl_se->dl_bw, &rq->dl);    !! 218         __sub_rq_bw(p->dl.dl_bw, &rq->dl);
342         __add_rq_bw(new_bw, &rq->dl);             219         __add_rq_bw(new_bw, &rq->dl);
343 }                                                 220 }
344                                                   221 
345 static void dl_change_utilization(struct task_ << 
346 {                                              << 
347         WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_ << 
348                                                << 
349         if (task_on_rq_queued(p))              << 
350                 return;                        << 
351                                                << 
352         dl_rq_change_utilization(task_rq(p), & << 
353 }                                              << 
354                                                << 
355 static void __dl_clear_params(struct sched_dl_ << 
356                                                << 
357 /*                                                222 /*
358  * The utilization of a task cannot be immedia    223  * The utilization of a task cannot be immediately removed from
359  * the rq active utilization (running_bw) when    224  * the rq active utilization (running_bw) when the task blocks.
360  * Instead, we have to wait for the so called     225  * Instead, we have to wait for the so called "0-lag time".
361  *                                                226  *
362  * If a task blocks before the "0-lag time", a    227  * If a task blocks before the "0-lag time", a timer (the inactive
363  * timer) is armed, and running_bw is decrease    228  * timer) is armed, and running_bw is decreased when the timer
364  * fires.                                         229  * fires.
365  *                                                230  *
366  * If the task wakes up again before the inact    231  * If the task wakes up again before the inactive timer fires,
367  * the timer is canceled, whereas if the task  !! 232  * the timer is cancelled, whereas if the task wakes up after the
368  * inactive timer fired (and running_bw has be    233  * inactive timer fired (and running_bw has been decreased) the
369  * task's utilization has to be added to runni    234  * task's utilization has to be added to running_bw again.
370  * A flag in the deadline scheduling entity (d    235  * A flag in the deadline scheduling entity (dl_non_contending)
371  * is used to avoid race conditions between th    236  * is used to avoid race conditions between the inactive timer handler
372  * and task wakeups.                              237  * and task wakeups.
373  *                                                238  *
374  * The following diagram shows how running_bw     239  * The following diagram shows how running_bw is updated. A task is
375  * "ACTIVE" when its utilization contributes t    240  * "ACTIVE" when its utilization contributes to running_bw; an
376  * "ACTIVE contending" task is in the TASK_RUN    241  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
377  * "ACTIVE non contending" task is a blocked t    242  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
378  * has not passed yet. An "INACTIVE" task is a    243  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
379  * time already passed, which does not contrib    244  * time already passed, which does not contribute to running_bw anymore.
380  *                              +-------------    245  *                              +------------------+
381  *             wakeup           |    ACTIVE       246  *             wakeup           |    ACTIVE        |
382  *          +------------------>+   contending    247  *          +------------------>+   contending     |
383  *          | add_running_bw    |                 248  *          | add_running_bw    |                  |
384  *          |                   +----+------+-    249  *          |                   +----+------+------+
385  *          |                        |      ^     250  *          |                        |      ^
386  *          |                dequeue |      |     251  *          |                dequeue |      |
387  * +--------+-------+                |      |     252  * +--------+-------+                |      |
388  * |                |   t >= 0-lag   |      |     253  * |                |   t >= 0-lag   |      | wakeup
389  * |    INACTIVE    |<---------------+      |     254  * |    INACTIVE    |<---------------+      |
390  * |                | sub_running_bw |      |     255  * |                | sub_running_bw |      |
391  * +--------+-------+                |      |     256  * +--------+-------+                |      |
392  *          ^                        |      |     257  *          ^                        |      |
393  *          |              t < 0-lag |      |     258  *          |              t < 0-lag |      |
394  *          |                        |      |     259  *          |                        |      |
395  *          |                        V      |     260  *          |                        V      |
396  *          |                   +----+------+-    261  *          |                   +----+------+------+
397  *          | sub_running_bw    |    ACTIVE       262  *          | sub_running_bw    |    ACTIVE        |
398  *          +-------------------+                 263  *          +-------------------+                  |
399  *            inactive timer    |  non contend    264  *            inactive timer    |  non contending  |
400  *            fired             +-------------    265  *            fired             +------------------+
401  *                                                266  *
402  * The task_non_contending() function is invok    267  * The task_non_contending() function is invoked when a task
403  * blocks, and checks if the 0-lag time alread    268  * blocks, and checks if the 0-lag time already passed or
404  * not (in the first case, it directly updates    269  * not (in the first case, it directly updates running_bw;
405  * in the second case, it arms the inactive ti    270  * in the second case, it arms the inactive timer).
406  *                                                271  *
407  * The task_contending() function is invoked w    272  * The task_contending() function is invoked when a task wakes
408  * up, and checks if the task is still in the     273  * up, and checks if the task is still in the "ACTIVE non contending"
409  * state or not (in the second case, it update    274  * state or not (in the second case, it updates running_bw).
410  */                                               275  */
411 static void task_non_contending(struct sched_d !! 276 static void task_non_contending(struct task_struct *p)
412 {                                                 277 {
                                                   >> 278         struct sched_dl_entity *dl_se = &p->dl;
413         struct hrtimer *timer = &dl_se->inacti    279         struct hrtimer *timer = &dl_se->inactive_timer;
414         struct rq *rq = rq_of_dl_se(dl_se);    !! 280         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
415         struct dl_rq *dl_rq = &rq->dl;         !! 281         struct rq *rq = rq_of_dl_rq(dl_rq);
416         s64 zerolag_time;                         282         s64 zerolag_time;
417                                                   283 
418         /*                                        284         /*
419          * If this is a non-deadline task that    285          * If this is a non-deadline task that has been boosted,
420          * do nothing                             286          * do nothing
421          */                                       287          */
422         if (dl_se->dl_runtime == 0)               288         if (dl_se->dl_runtime == 0)
423                 return;                           289                 return;
424                                                   290 
425         if (dl_entity_is_special(dl_se))          291         if (dl_entity_is_special(dl_se))
426                 return;                           292                 return;
427                                                   293 
428         WARN_ON(dl_se->dl_non_contending);        294         WARN_ON(dl_se->dl_non_contending);
429                                                   295 
430         zerolag_time = dl_se->deadline -          296         zerolag_time = dl_se->deadline -
431                  div64_long((dl_se->runtime *     297                  div64_long((dl_se->runtime * dl_se->dl_period),
432                         dl_se->dl_runtime);       298                         dl_se->dl_runtime);
433                                                   299 
434         /*                                        300         /*
435          * Using relative times instead of the    301          * Using relative times instead of the absolute "0-lag time"
436          * allows to simplify the code            302          * allows to simplify the code
437          */                                       303          */
438         zerolag_time -= rq_clock(rq);             304         zerolag_time -= rq_clock(rq);
439                                                   305 
440         /*                                        306         /*
441          * If the "0-lag time" already passed,    307          * If the "0-lag time" already passed, decrease the active
442          * utilization now, instead of startin    308          * utilization now, instead of starting a timer
443          */                                       309          */
444         if ((zerolag_time < 0) || hrtimer_acti    310         if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
445                 if (dl_server(dl_se)) {        !! 311                 if (dl_task(p))
446                         sub_running_bw(dl_se,     312                         sub_running_bw(dl_se, dl_rq);
447                 } else {                       !! 313                 if (!dl_task(p) || p->state == TASK_DEAD) {
448                         struct task_struct *p  !! 314                         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
449                                                   315 
450                         if (dl_task(p))        !! 316                         if (p->state == TASK_DEAD)
451                                 sub_running_bw !! 317                                 sub_rq_bw(&p->dl, &rq->dl);
452                                                !! 318                         raw_spin_lock(&dl_b->lock);
453                         if (!dl_task(p) || REA !! 319                         __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
454                                 struct dl_bw * !! 320                         __dl_clear_params(p);
455                                                !! 321                         raw_spin_unlock(&dl_b->lock);
456                                 if (READ_ONCE( << 
457                                         sub_rq << 
458                                 raw_spin_lock( << 
459                                 __dl_sub(dl_b, << 
460                                 raw_spin_unloc << 
461                                 __dl_clear_par << 
462                         }                      << 
463                 }                                 322                 }
464                                                   323 
465                 return;                           324                 return;
466         }                                         325         }
467                                                   326 
468         dl_se->dl_non_contending = 1;             327         dl_se->dl_non_contending = 1;
469         if (!dl_server(dl_se))                 !! 328         get_task_struct(p);
470                 get_task_struct(dl_task_of(dl_ << 
471                                                << 
472         hrtimer_start(timer, ns_to_ktime(zerol    329         hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
473 }                                                 330 }
474                                                   331 
475 static void task_contending(struct sched_dl_en    332 static void task_contending(struct sched_dl_entity *dl_se, int flags)
476 {                                                 333 {
477         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    334         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
478                                                   335 
479         /*                                        336         /*
480          * If this is a non-deadline task that    337          * If this is a non-deadline task that has been boosted,
481          * do nothing                             338          * do nothing
482          */                                       339          */
483         if (dl_se->dl_runtime == 0)               340         if (dl_se->dl_runtime == 0)
484                 return;                           341                 return;
485                                                   342 
486         if (flags & ENQUEUE_MIGRATED)             343         if (flags & ENQUEUE_MIGRATED)
487                 add_rq_bw(dl_se, dl_rq);          344                 add_rq_bw(dl_se, dl_rq);
488                                                   345 
489         if (dl_se->dl_non_contending) {           346         if (dl_se->dl_non_contending) {
490                 dl_se->dl_non_contending = 0;     347                 dl_se->dl_non_contending = 0;
491                 /*                                348                 /*
492                  * If the timer handler is cur    349                  * If the timer handler is currently running and the
493                  * timer cannot be canceled, i !! 350                  * timer cannot be cancelled, inactive_task_timer()
494                  * will see that dl_not_conten    351                  * will see that dl_not_contending is not set, and
495                  * will not touch the rq's act    352                  * will not touch the rq's active utilization,
496                  * so we are still safe.          353                  * so we are still safe.
497                  */                               354                  */
498                 if (hrtimer_try_to_cancel(&dl_ !! 355                 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
499                         if (!dl_server(dl_se)) !! 356                         put_task_struct(dl_task_of(dl_se));
500                                 put_task_struc << 
501                 }                              << 
502         } else {                                  357         } else {
503                 /*                                358                 /*
504                  * Since "dl_non_contending" i    359                  * Since "dl_non_contending" is not set, the
505                  * task's utilization has alre    360                  * task's utilization has already been removed from
506                  * active utilization (either     361                  * active utilization (either when the task blocked,
507                  * when the "inactive timer" f    362                  * when the "inactive timer" fired).
508                  * So, add it back.               363                  * So, add it back.
509                  */                               364                  */
510                 add_running_bw(dl_se, dl_rq);     365                 add_running_bw(dl_se, dl_rq);
511         }                                         366         }
512 }                                                 367 }
513                                                   368 
514 static inline int is_leftmost(struct sched_dl_ !! 369 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
515 {                                                 370 {
516         return rb_first_cached(&dl_rq->root) = !! 371         struct sched_dl_entity *dl_se = &p->dl;
                                                   >> 372 
                                                   >> 373         return dl_rq->root.rb_leftmost == &dl_se->rb_node;
517 }                                                 374 }
518                                                   375 
519 static void init_dl_rq_bw_ratio(struct dl_rq *    376 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
520                                                   377 
                                                   >> 378 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
                                                   >> 379 {
                                                   >> 380         raw_spin_lock_init(&dl_b->dl_runtime_lock);
                                                   >> 381         dl_b->dl_period = period;
                                                   >> 382         dl_b->dl_runtime = runtime;
                                                   >> 383 }
                                                   >> 384 
521 void init_dl_bw(struct dl_bw *dl_b)               385 void init_dl_bw(struct dl_bw *dl_b)
522 {                                                 386 {
523         raw_spin_lock_init(&dl_b->lock);          387         raw_spin_lock_init(&dl_b->lock);
                                                   >> 388         raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
524         if (global_rt_runtime() == RUNTIME_INF    389         if (global_rt_runtime() == RUNTIME_INF)
525                 dl_b->bw = -1;                    390                 dl_b->bw = -1;
526         else                                      391         else
527                 dl_b->bw = to_ratio(global_rt_    392                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
                                                   >> 393         raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
528         dl_b->total_bw = 0;                       394         dl_b->total_bw = 0;
529 }                                                 395 }
530                                                   396 
531 void init_dl_rq(struct dl_rq *dl_rq)              397 void init_dl_rq(struct dl_rq *dl_rq)
532 {                                                 398 {
533         dl_rq->root = RB_ROOT_CACHED;             399         dl_rq->root = RB_ROOT_CACHED;
534                                                   400 
535 #ifdef CONFIG_SMP                                 401 #ifdef CONFIG_SMP
536         /* zero means no -deadline tasks */       402         /* zero means no -deadline tasks */
537         dl_rq->earliest_dl.curr = dl_rq->earli    403         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
538                                                   404 
                                                   >> 405         dl_rq->dl_nr_migratory = 0;
539         dl_rq->overloaded = 0;                    406         dl_rq->overloaded = 0;
540         dl_rq->pushable_dl_tasks_root = RB_ROO    407         dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
541 #else                                             408 #else
542         init_dl_bw(&dl_rq->dl_bw);                409         init_dl_bw(&dl_rq->dl_bw);
543 #endif                                            410 #endif
544                                                   411 
545         dl_rq->running_bw = 0;                    412         dl_rq->running_bw = 0;
546         dl_rq->this_bw = 0;                       413         dl_rq->this_bw = 0;
547         init_dl_rq_bw_ratio(dl_rq);               414         init_dl_rq_bw_ratio(dl_rq);
548 }                                                 415 }
549                                                   416 
550 #ifdef CONFIG_SMP                                 417 #ifdef CONFIG_SMP
551                                                   418 
552 static inline int dl_overloaded(struct rq *rq)    419 static inline int dl_overloaded(struct rq *rq)
553 {                                                 420 {
554         return atomic_read(&rq->rd->dlo_count)    421         return atomic_read(&rq->rd->dlo_count);
555 }                                                 422 }
556                                                   423 
557 static inline void dl_set_overload(struct rq *    424 static inline void dl_set_overload(struct rq *rq)
558 {                                                 425 {
559         if (!rq->online)                          426         if (!rq->online)
560                 return;                           427                 return;
561                                                   428 
562         cpumask_set_cpu(rq->cpu, rq->rd->dlo_m    429         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
563         /*                                        430         /*
564          * Must be visible before the overload    431          * Must be visible before the overload count is
565          * set (as in sched_rt.c).                432          * set (as in sched_rt.c).
566          *                                        433          *
567          * Matched by the barrier in pull_dl_t    434          * Matched by the barrier in pull_dl_task().
568          */                                       435          */
569         smp_wmb();                                436         smp_wmb();
570         atomic_inc(&rq->rd->dlo_count);           437         atomic_inc(&rq->rd->dlo_count);
571 }                                                 438 }
572                                                   439 
573 static inline void dl_clear_overload(struct rq    440 static inline void dl_clear_overload(struct rq *rq)
574 {                                                 441 {
575         if (!rq->online)                          442         if (!rq->online)
576                 return;                           443                 return;
577                                                   444 
578         atomic_dec(&rq->rd->dlo_count);           445         atomic_dec(&rq->rd->dlo_count);
579         cpumask_clear_cpu(rq->cpu, rq->rd->dlo    446         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
580 }                                                 447 }
581                                                   448 
582 #define __node_2_pdl(node) \                   !! 449 static void update_dl_migration(struct dl_rq *dl_rq)
583         rb_entry((node), struct task_struct, p !! 450 {
                                                   >> 451         if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
                                                   >> 452                 if (!dl_rq->overloaded) {
                                                   >> 453                         dl_set_overload(rq_of_dl_rq(dl_rq));
                                                   >> 454                         dl_rq->overloaded = 1;
                                                   >> 455                 }
                                                   >> 456         } else if (dl_rq->overloaded) {
                                                   >> 457                 dl_clear_overload(rq_of_dl_rq(dl_rq));
                                                   >> 458                 dl_rq->overloaded = 0;
                                                   >> 459         }
                                                   >> 460 }
584                                                   461 
585 static inline bool __pushable_less(struct rb_n !! 462 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
586 {                                                 463 {
587         return dl_entity_preempt(&__node_2_pdl !! 464         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 465 
                                                   >> 466         if (p->nr_cpus_allowed > 1)
                                                   >> 467                 dl_rq->dl_nr_migratory++;
                                                   >> 468 
                                                   >> 469         update_dl_migration(dl_rq);
588 }                                                 470 }
589                                                   471 
590 static inline int has_pushable_dl_tasks(struct !! 472 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
591 {                                                 473 {
592         return !RB_EMPTY_ROOT(&rq->dl.pushable !! 474         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 475 
                                                   >> 476         if (p->nr_cpus_allowed > 1)
                                                   >> 477                 dl_rq->dl_nr_migratory--;
                                                   >> 478 
                                                   >> 479         update_dl_migration(dl_rq);
593 }                                                 480 }
594                                                   481 
595 /*                                                482 /*
596  * The list of pushable -deadline task is not     483  * The list of pushable -deadline task is not a plist, like in
597  * sched_rt.c, it is an rb-tree with tasks ord    484  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
598  */                                               485  */
599 static void enqueue_pushable_dl_task(struct rq    486 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
600 {                                                 487 {
601         struct rb_node *leftmost;              !! 488         struct dl_rq *dl_rq = &rq->dl;
602                                                !! 489         struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
603         WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushab !! 490         struct rb_node *parent = NULL;
                                                   >> 491         struct task_struct *entry;
                                                   >> 492         bool leftmost = true;
                                                   >> 493 
                                                   >> 494         BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
                                                   >> 495 
                                                   >> 496         while (*link) {
                                                   >> 497                 parent = *link;
                                                   >> 498                 entry = rb_entry(parent, struct task_struct,
                                                   >> 499                                  pushable_dl_tasks);
                                                   >> 500                 if (dl_entity_preempt(&p->dl, &entry->dl))
                                                   >> 501                         link = &parent->rb_left;
                                                   >> 502                 else {
                                                   >> 503                         link = &parent->rb_right;
                                                   >> 504                         leftmost = false;
                                                   >> 505                 }
                                                   >> 506         }
604                                                   507 
605         leftmost = rb_add_cached(&p->pushable_ << 
606                                  &rq->dl.pusha << 
607                                  __pushable_le << 
608         if (leftmost)                             508         if (leftmost)
609                 rq->dl.earliest_dl.next = p->d !! 509                 dl_rq->earliest_dl.next = p->dl.deadline;
610                                                   510 
611         if (!rq->dl.overloaded) {              !! 511         rb_link_node(&p->pushable_dl_tasks, parent, link);
612                 dl_set_overload(rq);           !! 512         rb_insert_color_cached(&p->pushable_dl_tasks,
613                 rq->dl.overloaded = 1;         !! 513                                &dl_rq->pushable_dl_tasks_root, leftmost);
614         }                                      << 
615 }                                                 514 }
616                                                   515 
617 static void dequeue_pushable_dl_task(struct rq    516 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
618 {                                                 517 {
619         struct dl_rq *dl_rq = &rq->dl;            518         struct dl_rq *dl_rq = &rq->dl;
620         struct rb_root_cached *root = &dl_rq-> << 
621         struct rb_node *leftmost;              << 
622                                                   519 
623         if (RB_EMPTY_NODE(&p->pushable_dl_task    520         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624                 return;                           521                 return;
625                                                   522 
626         leftmost = rb_erase_cached(&p->pushabl !! 523         if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
627         if (leftmost)                          !! 524                 struct rb_node *next_node;
628                 dl_rq->earliest_dl.next = __no !! 525 
                                                   >> 526                 next_node = rb_next(&p->pushable_dl_tasks);
                                                   >> 527                 if (next_node) {
                                                   >> 528                         dl_rq->earliest_dl.next = rb_entry(next_node,
                                                   >> 529                                 struct task_struct, pushable_dl_tasks)->dl.deadline;
                                                   >> 530                 }
                                                   >> 531         }
629                                                   532 
                                                   >> 533         rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
630         RB_CLEAR_NODE(&p->pushable_dl_tasks);     534         RB_CLEAR_NODE(&p->pushable_dl_tasks);
                                                   >> 535 }
631                                                   536 
632         if (!has_pushable_dl_tasks(rq) && rq-> !! 537 static inline int has_pushable_dl_tasks(struct rq *rq)
633                 dl_clear_overload(rq);         !! 538 {
634                 rq->dl.overloaded = 0;         !! 539         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
635         }                                      << 
636 }                                                 540 }
637                                                   541 
638 static int push_dl_task(struct rq *rq);           542 static int push_dl_task(struct rq *rq);
639                                                   543 
640 static inline bool need_pull_dl_task(struct rq    544 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
641 {                                                 545 {
642         return rq->online && dl_task(prev);    !! 546         return dl_task(prev);
643 }                                                 547 }
644                                                   548 
645 static DEFINE_PER_CPU(struct balance_callback, !! 549 static DEFINE_PER_CPU(struct callback_head, dl_push_head);
646 static DEFINE_PER_CPU(struct balance_callback, !! 550 static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
647                                                   551 
648 static void push_dl_tasks(struct rq *);           552 static void push_dl_tasks(struct rq *);
649 static void pull_dl_task(struct rq *);            553 static void pull_dl_task(struct rq *);
650                                                   554 
651 static inline void deadline_queue_push_tasks(s    555 static inline void deadline_queue_push_tasks(struct rq *rq)
652 {                                                 556 {
653         if (!has_pushable_dl_tasks(rq))           557         if (!has_pushable_dl_tasks(rq))
654                 return;                           558                 return;
655                                                   559 
656         queue_balance_callback(rq, &per_cpu(dl    560         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
657 }                                                 561 }
658                                                   562 
659 static inline void deadline_queue_pull_task(st    563 static inline void deadline_queue_pull_task(struct rq *rq)
660 {                                                 564 {
661         queue_balance_callback(rq, &per_cpu(dl    565         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
662 }                                                 566 }
663                                                   567 
664 static struct rq *find_lock_later_rq(struct ta    568 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
665                                                   569 
666 static struct rq *dl_task_offline_migration(st    570 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
667 {                                                 571 {
668         struct rq *later_rq = NULL;               572         struct rq *later_rq = NULL;
669         struct dl_bw *dl_b;                       573         struct dl_bw *dl_b;
670                                                   574 
671         later_rq = find_lock_later_rq(p, rq);     575         later_rq = find_lock_later_rq(p, rq);
672         if (!later_rq) {                          576         if (!later_rq) {
673                 int cpu;                          577                 int cpu;
674                                                   578 
675                 /*                                579                 /*
676                  * If we cannot preempt any rq    580                  * If we cannot preempt any rq, fall back to pick any
677                  * online CPU:                    581                  * online CPU:
678                  */                               582                  */
679                 cpu = cpumask_any_and(cpu_acti    583                 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
680                 if (cpu >= nr_cpu_ids) {          584                 if (cpu >= nr_cpu_ids) {
681                         /*                        585                         /*
682                          * Failed to find any     586                          * Failed to find any suitable CPU.
683                          * The task will never    587                          * The task will never come back!
684                          */                       588                          */
685                         WARN_ON_ONCE(dl_bandwi !! 589                         BUG_ON(dl_bandwidth_enabled());
686                                                   590 
687                         /*                        591                         /*
688                          * If admission contro    592                          * If admission control is disabled we
689                          * try a little harder    593                          * try a little harder to let the task
690                          * run.                   594                          * run.
691                          */                       595                          */
692                         cpu = cpumask_any(cpu_    596                         cpu = cpumask_any(cpu_active_mask);
693                 }                                 597                 }
694                 later_rq = cpu_rq(cpu);           598                 later_rq = cpu_rq(cpu);
695                 double_lock_balance(rq, later_    599                 double_lock_balance(rq, later_rq);
696         }                                         600         }
697                                                   601 
698         if (p->dl.dl_non_contending || p->dl.d    602         if (p->dl.dl_non_contending || p->dl.dl_throttled) {
699                 /*                                603                 /*
700                  * Inactive timer is armed (or    604                  * Inactive timer is armed (or callback is running, but
701                  * waiting for us to release r    605                  * waiting for us to release rq locks). In any case, when it
702                  * will fire (or continue), it    606                  * will fire (or continue), it will see running_bw of this
703                  * task migrated to later_rq (    607                  * task migrated to later_rq (and correctly handle it).
704                  */                               608                  */
705                 sub_running_bw(&p->dl, &rq->dl    609                 sub_running_bw(&p->dl, &rq->dl);
706                 sub_rq_bw(&p->dl, &rq->dl);       610                 sub_rq_bw(&p->dl, &rq->dl);
707                                                   611 
708                 add_rq_bw(&p->dl, &later_rq->d    612                 add_rq_bw(&p->dl, &later_rq->dl);
709                 add_running_bw(&p->dl, &later_    613                 add_running_bw(&p->dl, &later_rq->dl);
710         } else {                                  614         } else {
711                 sub_rq_bw(&p->dl, &rq->dl);       615                 sub_rq_bw(&p->dl, &rq->dl);
712                 add_rq_bw(&p->dl, &later_rq->d    616                 add_rq_bw(&p->dl, &later_rq->dl);
713         }                                         617         }
714                                                   618 
715         /*                                        619         /*
716          * And we finally need to fix up root_ !! 620          * And we finally need to fixup root_domain(s) bandwidth accounting,
717          * since p is still hanging out in the    621          * since p is still hanging out in the old (now moved to default) root
718          * domain.                                622          * domain.
719          */                                       623          */
720         dl_b = &rq->rd->dl_bw;                    624         dl_b = &rq->rd->dl_bw;
721         raw_spin_lock(&dl_b->lock);               625         raw_spin_lock(&dl_b->lock);
722         __dl_sub(dl_b, p->dl.dl_bw, cpumask_we    626         __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
723         raw_spin_unlock(&dl_b->lock);             627         raw_spin_unlock(&dl_b->lock);
724                                                   628 
725         dl_b = &later_rq->rd->dl_bw;              629         dl_b = &later_rq->rd->dl_bw;
726         raw_spin_lock(&dl_b->lock);               630         raw_spin_lock(&dl_b->lock);
727         __dl_add(dl_b, p->dl.dl_bw, cpumask_we    631         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
728         raw_spin_unlock(&dl_b->lock);             632         raw_spin_unlock(&dl_b->lock);
729                                                   633 
730         set_task_cpu(p, later_rq->cpu);           634         set_task_cpu(p, later_rq->cpu);
731         double_unlock_balance(later_rq, rq);      635         double_unlock_balance(later_rq, rq);
732                                                   636 
733         return later_rq;                          637         return later_rq;
734 }                                                 638 }
735                                                   639 
736 #else                                             640 #else
737                                                   641 
738 static inline                                     642 static inline
739 void enqueue_pushable_dl_task(struct rq *rq, s    643 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740 {                                                 644 {
741 }                                                 645 }
742                                                   646 
743 static inline                                     647 static inline
744 void dequeue_pushable_dl_task(struct rq *rq, s    648 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
745 {                                                 649 {
746 }                                                 650 }
747                                                   651 
748 static inline                                     652 static inline
749 void inc_dl_migration(struct sched_dl_entity *    653 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750 {                                                 654 {
751 }                                                 655 }
752                                                   656 
753 static inline                                     657 static inline
754 void dec_dl_migration(struct sched_dl_entity *    658 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755 {                                                 659 {
756 }                                                 660 }
757                                                   661 
                                                   >> 662 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
                                                   >> 663 {
                                                   >> 664         return false;
                                                   >> 665 }
                                                   >> 666 
                                                   >> 667 static inline void pull_dl_task(struct rq *rq)
                                                   >> 668 {
                                                   >> 669 }
                                                   >> 670 
758 static inline void deadline_queue_push_tasks(s    671 static inline void deadline_queue_push_tasks(struct rq *rq)
759 {                                                 672 {
760 }                                                 673 }
761                                                   674 
762 static inline void deadline_queue_pull_task(st    675 static inline void deadline_queue_pull_task(struct rq *rq)
763 {                                                 676 {
764 }                                                 677 }
765 #endif /* CONFIG_SMP */                           678 #endif /* CONFIG_SMP */
766                                                   679 
767 static void                                    << 
768 enqueue_dl_entity(struct sched_dl_entity *dl_s << 
769 static void enqueue_task_dl(struct rq *rq, str    680 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
770 static void dequeue_dl_entity(struct sched_dl_ !! 681 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
771 static void wakeup_preempt_dl(struct rq *rq, s !! 682 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
772                                                << 
773 static inline void replenish_dl_new_period(str << 
774                                             st << 
775 {                                              << 
776         /* for non-boosted task, pi_of(dl_se)  << 
777         dl_se->deadline = rq_clock(rq) + pi_of << 
778         dl_se->runtime = pi_of(dl_se)->dl_runt << 
779                                                << 
780         /*                                     << 
781          * If it is a deferred reservation, an << 
782          * is not handling an starvation case, << 
783          */                                    << 
784         if (dl_se->dl_defer & !dl_se->dl_defer << 
785                 dl_se->dl_throttled = 1;       << 
786                 dl_se->dl_defer_armed = 1;     << 
787         }                                      << 
788 }                                              << 
789                                                   683 
790 /*                                                684 /*
791  * We are being explicitly informed that a new    685  * We are being explicitly informed that a new instance is starting,
792  * and this means that:                           686  * and this means that:
793  *  - the absolute deadline of the entity has     687  *  - the absolute deadline of the entity has to be placed at
794  *    current time + relative deadline;           688  *    current time + relative deadline;
795  *  - the runtime of the entity has to be set     689  *  - the runtime of the entity has to be set to the maximum value.
796  *                                                690  *
797  * The capability of specifying such event is     691  * The capability of specifying such event is useful whenever a -deadline
798  * entity wants to (try to!) synchronize its b    692  * entity wants to (try to!) synchronize its behaviour with the scheduler's
799  * one, and to (try to!) reconcile itself with    693  * one, and to (try to!) reconcile itself with its own scheduling
800  * parameters.                                    694  * parameters.
801  */                                               695  */
802 static inline void setup_new_dl_entity(struct     696 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
803 {                                                 697 {
804         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    698         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
805         struct rq *rq = rq_of_dl_rq(dl_rq);       699         struct rq *rq = rq_of_dl_rq(dl_rq);
806                                                   700 
807         WARN_ON(is_dl_boosted(dl_se));         !! 701         WARN_ON(dl_se->dl_boosted);
808         WARN_ON(dl_time_before(rq_clock(rq), d    702         WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
809                                                   703 
810         /*                                        704         /*
811          * We are racing with the deadline tim    705          * We are racing with the deadline timer. So, do nothing because
812          * the deadline timer handler will tak    706          * the deadline timer handler will take care of properly recharging
813          * the runtime and postponing the dead    707          * the runtime and postponing the deadline
814          */                                       708          */
815         if (dl_se->dl_throttled)                  709         if (dl_se->dl_throttled)
816                 return;                           710                 return;
817                                                   711 
818         /*                                        712         /*
819          * We use the regular wall clock time     713          * We use the regular wall clock time to set deadlines in the
820          * future; in fact, we must consider e    714          * future; in fact, we must consider execution overheads (time
821          * spent on hardirq context, etc.).       715          * spent on hardirq context, etc.).
822          */                                       716          */
823         replenish_dl_new_period(dl_se, rq);    !! 717         dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
                                                   >> 718         dl_se->runtime = dl_se->dl_runtime;
824 }                                                 719 }
825                                                   720 
826 static int start_dl_timer(struct sched_dl_enti << 
827 static bool dl_entity_overflow(struct sched_dl << 
828                                                << 
829 /*                                                721 /*
830  * Pure Earliest Deadline First (EDF) scheduli    722  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
831  * possibility of a entity lasting more than w    723  * possibility of a entity lasting more than what it declared, and thus
832  * exhausting its runtime.                        724  * exhausting its runtime.
833  *                                                725  *
834  * Here we are interested in making runtime ov    726  * Here we are interested in making runtime overrun possible, but we do
835  * not want a entity which is misbehaving to a    727  * not want a entity which is misbehaving to affect the scheduling of all
836  * other entities.                                728  * other entities.
837  * Therefore, a budgeting strategy called Cons    729  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
838  * is used, in order to confine each entity wi    730  * is used, in order to confine each entity within its own bandwidth.
839  *                                                731  *
840  * This function deals exactly with that, and     732  * This function deals exactly with that, and ensures that when the runtime
841  * of a entity is replenished, its deadline is    733  * of a entity is replenished, its deadline is also postponed. That ensures
842  * the overrunning entity can't interfere with    734  * the overrunning entity can't interfere with other entity in the system and
843  * can't make them miss their deadlines. Reaso    735  * can't make them miss their deadlines. Reasons why this kind of overruns
844  * could happen are, typically, a entity volun    736  * could happen are, typically, a entity voluntarily trying to overcome its
845  * runtime, or it just underestimated it durin    737  * runtime, or it just underestimated it during sched_setattr().
846  */                                               738  */
847 static void replenish_dl_entity(struct sched_d !! 739 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 740                                 struct sched_dl_entity *pi_se)
848 {                                                 741 {
849         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    742         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
850         struct rq *rq = rq_of_dl_rq(dl_rq);       743         struct rq *rq = rq_of_dl_rq(dl_rq);
851                                                   744 
852         WARN_ON_ONCE(pi_of(dl_se)->dl_runtime  !! 745         BUG_ON(pi_se->dl_runtime <= 0);
853                                                   746 
854         /*                                        747         /*
855          * This could be the case for a !-dl t    748          * This could be the case for a !-dl task that is boosted.
856          * Just go with full inherited paramet    749          * Just go with full inherited parameters.
857          *                                     << 
858          * Or, it could be the case of a defer << 
859          * was not able to consume its runtime << 
860          * reached this point with current u > << 
861          *                                     << 
862          * In both cases, set a new period.    << 
863          */                                       750          */
864         if (dl_se->dl_deadline == 0 ||         !! 751         if (dl_se->dl_deadline == 0) {
865             (dl_se->dl_defer_armed && dl_entit !! 752                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
866                 dl_se->deadline = rq_clock(rq) !! 753                 dl_se->runtime = pi_se->dl_runtime;
867                 dl_se->runtime = pi_of(dl_se)- << 
868         }                                         754         }
869                                                   755 
870         if (dl_se->dl_yielded && dl_se->runtim    756         if (dl_se->dl_yielded && dl_se->runtime > 0)
871                 dl_se->runtime = 0;               757                 dl_se->runtime = 0;
872                                                   758 
873         /*                                        759         /*
874          * We keep moving the deadline away un    760          * We keep moving the deadline away until we get some
875          * available runtime for the entity. T    761          * available runtime for the entity. This ensures correct
876          * handling of situations where the ru    762          * handling of situations where the runtime overrun is
877          * arbitrary large.                       763          * arbitrary large.
878          */                                       764          */
879         while (dl_se->runtime <= 0) {             765         while (dl_se->runtime <= 0) {
880                 dl_se->deadline += pi_of(dl_se !! 766                 dl_se->deadline += pi_se->dl_period;
881                 dl_se->runtime += pi_of(dl_se) !! 767                 dl_se->runtime += pi_se->dl_runtime;
882         }                                         768         }
883                                                   769 
884         /*                                        770         /*
885          * At this point, the deadline really     771          * At this point, the deadline really should be "in
886          * the future" with respect to rq->clo    772          * the future" with respect to rq->clock. If it's
887          * not, we are, for some reason, laggi    773          * not, we are, for some reason, lagging too much!
888          * Anyway, after having warn userspace    774          * Anyway, after having warn userspace abut that,
889          * we still try to keep the things run    775          * we still try to keep the things running by
890          * resetting the deadline and the budg    776          * resetting the deadline and the budget of the
891          * entity.                                777          * entity.
892          */                                       778          */
893         if (dl_time_before(dl_se->deadline, rq    779         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
894                 printk_deferred_once("sched: D    780                 printk_deferred_once("sched: DL replenish lagged too much\n");
895                 replenish_dl_new_period(dl_se, !! 781                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                                                   >> 782                 dl_se->runtime = pi_se->dl_runtime;
896         }                                         783         }
897                                                   784 
898         if (dl_se->dl_yielded)                    785         if (dl_se->dl_yielded)
899                 dl_se->dl_yielded = 0;            786                 dl_se->dl_yielded = 0;
900         if (dl_se->dl_throttled)                  787         if (dl_se->dl_throttled)
901                 dl_se->dl_throttled = 0;          788                 dl_se->dl_throttled = 0;
902                                                << 
903         /*                                     << 
904          * If this is the replenishment of a d << 
905          * clear the flag and return.          << 
906          */                                    << 
907         if (dl_se->dl_defer_armed) {           << 
908                 dl_se->dl_defer_armed = 0;     << 
909                 return;                        << 
910         }                                      << 
911                                                << 
912         /*                                     << 
913          * A this point, if the deferred serve << 
914          * is in the future, if it is not runn << 
915          * and arm the defer timer.            << 
916          */                                    << 
917         if (dl_se->dl_defer && !dl_se->dl_defe << 
918             dl_time_before(rq_clock(dl_se->rq) << 
919                 if (!is_dl_boosted(dl_se) && d << 
920                                                << 
921                         /*                     << 
922                          * Set dl_se->dl_defer << 
923                          * inform the start_dl << 
924                          * activation.         << 
925                          */                    << 
926                         dl_se->dl_defer_armed  << 
927                         dl_se->dl_throttled =  << 
928                         if (!start_dl_timer(dl << 
929                                 /*             << 
930                                  * If for what << 
931                                  * queued but  << 
932                                  * deferrable  << 
933                                  */            << 
934                                 hrtimer_try_to << 
935                                 dl_se->dl_defe << 
936                                 dl_se->dl_thro << 
937                         }                      << 
938                 }                              << 
939         }                                      << 
940 }                                                 789 }
941                                                   790 
942 /*                                                791 /*
943  * Here we check if --at time t-- an entity (w    792  * Here we check if --at time t-- an entity (which is probably being
944  * [re]activated or, in general, enqueued) can    793  * [re]activated or, in general, enqueued) can use its remaining runtime
945  * and its current deadline _without_ exceedin    794  * and its current deadline _without_ exceeding the bandwidth it is
946  * assigned (function returns true if it can't    795  * assigned (function returns true if it can't). We are in fact applying
947  * one of the CBS rules: when a task wakes up,    796  * one of the CBS rules: when a task wakes up, if the residual runtime
948  * over residual deadline fits within the allo    797  * over residual deadline fits within the allocated bandwidth, then we
949  * can keep the current (absolute) deadline an    798  * can keep the current (absolute) deadline and residual budget without
950  * disrupting the schedulability of the system    799  * disrupting the schedulability of the system. Otherwise, we should
951  * refill the runtime and set the deadline a p    800  * refill the runtime and set the deadline a period in the future,
952  * because keeping the current (absolute) dead    801  * because keeping the current (absolute) deadline of the task would
953  * result in breaking guarantees promised to o    802  * result in breaking guarantees promised to other tasks (refer to
954  * Documentation/scheduler/sched-deadline.rst     803  * Documentation/scheduler/sched-deadline.rst for more information).
955  *                                                804  *
956  * This function returns true if:                 805  * This function returns true if:
957  *                                                806  *
958  *   runtime / (deadline - t) > dl_runtime / d    807  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
959  *                                                808  *
960  * IOW we can't recycle current parameters.       809  * IOW we can't recycle current parameters.
961  *                                                810  *
962  * Notice that the bandwidth check is done aga    811  * Notice that the bandwidth check is done against the deadline. For
963  * task with deadline equal to period this is     812  * task with deadline equal to period this is the same of using
964  * dl_period instead of dl_deadline in the equ    813  * dl_period instead of dl_deadline in the equation above.
965  */                                               814  */
966 static bool dl_entity_overflow(struct sched_dl !! 815 static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
                                                   >> 816                                struct sched_dl_entity *pi_se, u64 t)
967 {                                                 817 {
968         u64 left, right;                          818         u64 left, right;
969                                                   819 
970         /*                                        820         /*
971          * left and right are the two sides of    821          * left and right are the two sides of the equation above,
972          * after a bit of shuffling to use mul    822          * after a bit of shuffling to use multiplications instead
973          * of divisions.                          823          * of divisions.
974          *                                        824          *
975          * Note that none of the time values i    825          * Note that none of the time values involved in the two
976          * multiplications are absolute: dl_de    826          * multiplications are absolute: dl_deadline and dl_runtime
977          * are the relative deadline and the m    827          * are the relative deadline and the maximum runtime of each
978          * instance, runtime is the runtime le    828          * instance, runtime is the runtime left for the last instance
979          * and (deadline - t), since t is rq->    829          * and (deadline - t), since t is rq->clock, is the time left
980          * to the (absolute) deadline. Even if    830          * to the (absolute) deadline. Even if overflowing the u64 type
981          * is very unlikely to occur in both c    831          * is very unlikely to occur in both cases, here we scale down
982          * as we want to avoid that risk at al    832          * as we want to avoid that risk at all. Scaling down by 10
983          * means that we reduce granularity to    833          * means that we reduce granularity to 1us. We are fine with it,
984          * since this is only a true/false che    834          * since this is only a true/false check and, anyway, thinking
985          * of anything below microseconds reso    835          * of anything below microseconds resolution is actually fiction
986          * (but still we want to give the user    836          * (but still we want to give the user that illusion >;).
987          */                                       837          */
988         left = (pi_of(dl_se)->dl_deadline >> D !! 838         left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
989         right = ((dl_se->deadline - t) >> DL_S    839         right = ((dl_se->deadline - t) >> DL_SCALE) *
990                 (pi_of(dl_se)->dl_runtime >> D !! 840                 (pi_se->dl_runtime >> DL_SCALE);
991                                                   841 
992         return dl_time_before(right, left);       842         return dl_time_before(right, left);
993 }                                                 843 }
994                                                   844 
995 /*                                                845 /*
996  * Revised wakeup rule [1]: For self-suspendin    846  * Revised wakeup rule [1]: For self-suspending tasks, rather then
997  * re-initializing task's runtime and deadline    847  * re-initializing task's runtime and deadline, the revised wakeup
998  * rule adjusts the task's runtime to avoid th    848  * rule adjusts the task's runtime to avoid the task to overrun its
999  * density.                                       849  * density.
1000  *                                               850  *
1001  * Reasoning: a task may overrun the density     851  * Reasoning: a task may overrun the density if:
1002  *    runtime / (deadline - t) > dl_runtime /    852  *    runtime / (deadline - t) > dl_runtime / dl_deadline
1003  *                                               853  *
1004  * Therefore, runtime can be adjusted to:        854  * Therefore, runtime can be adjusted to:
1005  *     runtime = (dl_runtime / dl_deadline) *    855  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
1006  *                                               856  *
1007  * In such way that runtime will be equal to     857  * In such way that runtime will be equal to the maximum density
1008  * the task can use without breaking any rule    858  * the task can use without breaking any rule.
1009  *                                               859  *
1010  * [1] Luca Abeni, Giuseppe Lipari, and Juri     860  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
1011  * bandwidth server revisited. SIGBED Rev. 11    861  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
1012  */                                              862  */
1013 static void                                      863 static void
1014 update_dl_revised_wakeup(struct sched_dl_enti    864 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
1015 {                                                865 {
1016         u64 laxity = dl_se->deadline - rq_clo    866         u64 laxity = dl_se->deadline - rq_clock(rq);
1017                                                  867 
1018         /*                                       868         /*
1019          * If the task has deadline < period,    869          * If the task has deadline < period, and the deadline is in the past,
1020          * it should already be throttled bef    870          * it should already be throttled before this check.
1021          *                                       871          *
1022          * See update_dl_entity() comments fo    872          * See update_dl_entity() comments for further details.
1023          */                                      873          */
1024         WARN_ON(dl_time_before(dl_se->deadlin    874         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1025                                                  875 
1026         dl_se->runtime = (dl_se->dl_density *    876         dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
1027 }                                                877 }
1028                                                  878 
1029 /*                                               879 /*
1030  * Regarding the deadline, a task with implic    880  * Regarding the deadline, a task with implicit deadline has a relative
1031  * deadline == relative period. A task with c    881  * deadline == relative period. A task with constrained deadline has a
1032  * relative deadline <= relative period.         882  * relative deadline <= relative period.
1033  *                                               883  *
1034  * We support constrained deadline tasks. How    884  * We support constrained deadline tasks. However, there are some restrictions
1035  * applied only for tasks which do not have a    885  * applied only for tasks which do not have an implicit deadline. See
1036  * update_dl_entity() to know more about such    886  * update_dl_entity() to know more about such restrictions.
1037  *                                               887  *
1038  * The dl_is_implicit() returns true if the t    888  * The dl_is_implicit() returns true if the task has an implicit deadline.
1039  */                                              889  */
1040 static inline bool dl_is_implicit(struct sche    890 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1041 {                                                891 {
1042         return dl_se->dl_deadline == dl_se->d    892         return dl_se->dl_deadline == dl_se->dl_period;
1043 }                                                893 }
1044                                                  894 
1045 /*                                               895 /*
1046  * When a deadline entity is placed in the ru    896  * When a deadline entity is placed in the runqueue, its runtime and deadline
1047  * might need to be updated. This is done by     897  * might need to be updated. This is done by a CBS wake up rule. There are two
1048  * different rules: 1) the original CBS; and     898  * different rules: 1) the original CBS; and 2) the Revisited CBS.
1049  *                                               899  *
1050  * When the task is starting a new period, th    900  * When the task is starting a new period, the Original CBS is used. In this
1051  * case, the runtime is replenished and a new    901  * case, the runtime is replenished and a new absolute deadline is set.
1052  *                                               902  *
1053  * When a task is queued before the begin of     903  * When a task is queued before the begin of the next period, using the
1054  * remaining runtime and deadline could make     904  * remaining runtime and deadline could make the entity to overflow, see
1055  * dl_entity_overflow() to find more about ru    905  * dl_entity_overflow() to find more about runtime overflow. When such case
1056  * is detected, the runtime and deadline need    906  * is detected, the runtime and deadline need to be updated.
1057  *                                               907  *
1058  * If the task has an implicit deadline, i.e.    908  * If the task has an implicit deadline, i.e., deadline == period, the Original
1059  * CBS is applied. The runtime is replenished !! 909  * CBS is applied. the runtime is replenished and a new absolute deadline is
1060  * set, as in the previous cases.                910  * set, as in the previous cases.
1061  *                                               911  *
1062  * However, the Original CBS does not work pr    912  * However, the Original CBS does not work properly for tasks with
1063  * deadline < period, which are said to have     913  * deadline < period, which are said to have a constrained deadline. By
1064  * applying the Original CBS, a constrained d    914  * applying the Original CBS, a constrained deadline task would be able to run
1065  * runtime/deadline in a period. With deadlin    915  * runtime/deadline in a period. With deadline < period, the task would
1066  * overrun the runtime/period allowed bandwid    916  * overrun the runtime/period allowed bandwidth, breaking the admission test.
1067  *                                               917  *
1068  * In order to prevent this misbehave, the Re    918  * In order to prevent this misbehave, the Revisited CBS is used for
1069  * constrained deadline tasks when a runtime     919  * constrained deadline tasks when a runtime overflow is detected. In the
1070  * Revisited CBS, rather than replenishing &     920  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1071  * the remaining runtime of the task is reduc    921  * the remaining runtime of the task is reduced to avoid runtime overflow.
1072  * Please refer to the comments update_dl_rev    922  * Please refer to the comments update_dl_revised_wakeup() function to find
1073  * more about the Revised CBS rule.              923  * more about the Revised CBS rule.
1074  */                                              924  */
1075 static void update_dl_entity(struct sched_dl_ !! 925 static void update_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 926                              struct sched_dl_entity *pi_se)
1076 {                                                927 {
1077         struct rq *rq = rq_of_dl_se(dl_se);   !! 928         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
                                                   >> 929         struct rq *rq = rq_of_dl_rq(dl_rq);
1078                                                  930 
1079         if (dl_time_before(dl_se->deadline, r    931         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1080             dl_entity_overflow(dl_se, rq_cloc !! 932             dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
1081                                                  933 
1082                 if (unlikely(!dl_is_implicit(    934                 if (unlikely(!dl_is_implicit(dl_se) &&
1083                              !dl_time_before(    935                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1084                              !is_dl_boosted(d !! 936                              !dl_se->dl_boosted)){
1085                         update_dl_revised_wak    937                         update_dl_revised_wakeup(dl_se, rq);
1086                         return;                  938                         return;
1087                 }                                939                 }
1088                                                  940 
1089                 replenish_dl_new_period(dl_se !! 941                 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1090         } else if (dl_server(dl_se) && dl_se- !! 942                 dl_se->runtime = pi_se->dl_runtime;
1091                 /*                            << 
1092                  * The server can still use i << 
1093                  * it left the dl_defer_runni << 
1094                  */                           << 
1095                 if (!dl_se->dl_defer_running) << 
1096                         dl_se->dl_defer_armed << 
1097                         dl_se->dl_throttled = << 
1098                 }                             << 
1099         }                                        943         }
1100 }                                                944 }
1101                                                  945 
1102 static inline u64 dl_next_period(struct sched    946 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1103 {                                                947 {
1104         return dl_se->deadline - dl_se->dl_de    948         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1105 }                                                949 }
1106                                                  950 
1107 /*                                               951 /*
1108  * If the entity depleted all its runtime, an    952  * If the entity depleted all its runtime, and if we want it to sleep
1109  * while waiting for some new execution time     953  * while waiting for some new execution time to become available, we
1110  * set the bandwidth replenishment timer to t    954  * set the bandwidth replenishment timer to the replenishment instant
1111  * and try to activate it.                       955  * and try to activate it.
1112  *                                               956  *
1113  * Notice that it is important for the caller    957  * Notice that it is important for the caller to know if the timer
1114  * actually started or not (i.e., the repleni    958  * actually started or not (i.e., the replenishment instant is in
1115  * the future or in the past).                   959  * the future or in the past).
1116  */                                              960  */
1117 static int start_dl_timer(struct sched_dl_ent !! 961 static int start_dl_timer(struct task_struct *p)
1118 {                                                962 {
                                                   >> 963         struct sched_dl_entity *dl_se = &p->dl;
1119         struct hrtimer *timer = &dl_se->dl_ti    964         struct hrtimer *timer = &dl_se->dl_timer;
1120         struct dl_rq *dl_rq = dl_rq_of_se(dl_ !! 965         struct rq *rq = task_rq(p);
1121         struct rq *rq = rq_of_dl_rq(dl_rq);   << 
1122         ktime_t now, act;                        966         ktime_t now, act;
1123         s64 delta;                               967         s64 delta;
1124                                                  968 
1125         lockdep_assert_rq_held(rq);           !! 969         lockdep_assert_held(&rq->lock);
1126                                                  970 
1127         /*                                       971         /*
1128          * We want the timer to fire at the d    972          * We want the timer to fire at the deadline, but considering
1129          * that it is actually coming from rq    973          * that it is actually coming from rq->clock and not from
1130          * hrtimer's time base reading.          974          * hrtimer's time base reading.
1131          *                                    !! 975          */
1132          * The deferred reservation will have !! 976         act = ns_to_ktime(dl_next_period(dl_se));
1133          * (deadline - runtime). At that poin << 
1134          * if the current deadline can be use << 
1135          * required to avoid add too much pre << 
1136          * (current u > U).                   << 
1137          */                                   << 
1138         if (dl_se->dl_defer_armed) {          << 
1139                 WARN_ON_ONCE(!dl_se->dl_throt << 
1140                 act = ns_to_ktime(dl_se->dead << 
1141         } else {                              << 
1142                 /* act = deadline - rel-deadl << 
1143                 act = ns_to_ktime(dl_next_per << 
1144         }                                     << 
1145                                               << 
1146         now = hrtimer_cb_get_time(timer);        977         now = hrtimer_cb_get_time(timer);
1147         delta = ktime_to_ns(now) - rq_clock(r    978         delta = ktime_to_ns(now) - rq_clock(rq);
1148         act = ktime_add_ns(act, delta);          979         act = ktime_add_ns(act, delta);
1149                                                  980 
1150         /*                                       981         /*
1151          * If the expiry time already passed,    982          * If the expiry time already passed, e.g., because the value
1152          * chosen as the deadline is too smal    983          * chosen as the deadline is too small, don't even try to
1153          * start the timer in the past!          984          * start the timer in the past!
1154          */                                      985          */
1155         if (ktime_us_delta(act, now) < 0)        986         if (ktime_us_delta(act, now) < 0)
1156                 return 0;                        987                 return 0;
1157                                                  988 
1158         /*                                       989         /*
1159          * !enqueued will guarantee another c    990          * !enqueued will guarantee another callback; even if one is already in
1160          * progress. This ensures a balanced     991          * progress. This ensures a balanced {get,put}_task_struct().
1161          *                                       992          *
1162          * The race against __run_timer() cle    993          * The race against __run_timer() clearing the enqueued state is
1163          * harmless because we're holding tas    994          * harmless because we're holding task_rq()->lock, therefore the timer
1164          * expiring after we've done the chec    995          * expiring after we've done the check will wait on its task_rq_lock()
1165          * and observe our state.                996          * and observe our state.
1166          */                                      997          */
1167         if (!hrtimer_is_queued(timer)) {         998         if (!hrtimer_is_queued(timer)) {
1168                 if (!dl_server(dl_se))        !! 999                 get_task_struct(p);
1169                         get_task_struct(dl_ta << 
1170                 hrtimer_start(timer, act, HRT    1000                 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1171         }                                        1001         }
1172                                                  1002 
1173         return 1;                                1003         return 1;
1174 }                                                1004 }
1175                                                  1005 
1176 static void __push_dl_task(struct rq *rq, str << 
1177 {                                             << 
1178 #ifdef CONFIG_SMP                             << 
1179         /*                                    << 
1180          * Queueing this task back might have << 
1181          * to kick someone away.              << 
1182          */                                   << 
1183         if (has_pushable_dl_tasks(rq)) {      << 
1184                 /*                            << 
1185                  * Nothing relies on rq->lock << 
1186                  * rq->lock.                  << 
1187                  */                           << 
1188                 rq_unpin_lock(rq, rf);        << 
1189                 push_dl_task(rq);             << 
1190                 rq_repin_lock(rq, rf);        << 
1191         }                                     << 
1192 #endif                                        << 
1193 }                                             << 
1194                                               << 
1195 /* a defer timer will not be reset if the run << 
1196 static const u64 dl_server_min_res = 1 * NSEC << 
1197                                               << 
1198 static enum hrtimer_restart dl_server_timer(s << 
1199 {                                             << 
1200         struct rq *rq = rq_of_dl_se(dl_se);   << 
1201         u64 fw;                               << 
1202                                               << 
1203         scoped_guard (rq_lock, rq) {          << 
1204                 struct rq_flags *rf = &scope. << 
1205                                               << 
1206                 if (!dl_se->dl_throttled || ! << 
1207                         return HRTIMER_NOREST << 
1208                                               << 
1209                 sched_clock_tick();           << 
1210                 update_rq_clock(rq);          << 
1211                                               << 
1212                 if (!dl_se->dl_runtime)       << 
1213                         return HRTIMER_NOREST << 
1214                                               << 
1215                 if (!dl_se->server_has_tasks( << 
1216                         replenish_dl_entity(d << 
1217                         return HRTIMER_NOREST << 
1218                 }                             << 
1219                                               << 
1220                 if (dl_se->dl_defer_armed) {  << 
1221                         /*                    << 
1222                          * First check if the << 
1223                          * If so, it is possi << 
1224                          * of time. The dl_se << 
1225                          * forwarding the tim << 
1226                          */                   << 
1227                         if (dl_time_before(rq << 
1228                                            (d << 
1229                                               << 
1230                                 /* reset the  << 
1231                                 fw = dl_se->d << 
1232                                               << 
1233                                 hrtimer_forwa << 
1234                                 return HRTIME << 
1235                         }                     << 
1236                                               << 
1237                         dl_se->dl_defer_runni << 
1238                 }                             << 
1239                                               << 
1240                 enqueue_dl_entity(dl_se, ENQU << 
1241                                               << 
1242                 if (!dl_task(dl_se->rq->curr) << 
1243                         resched_curr(rq);     << 
1244                                               << 
1245                 __push_dl_task(rq, rf);       << 
1246         }                                     << 
1247                                               << 
1248         return HRTIMER_NORESTART;             << 
1249 }                                             << 
1250                                               << 
1251 /*                                               1006 /*
1252  * This is the bandwidth enforcement timer ca    1007  * This is the bandwidth enforcement timer callback. If here, we know
1253  * a task is not on its dl_rq, since the fact    1008  * a task is not on its dl_rq, since the fact that the timer was running
1254  * means the task is throttled and needs a ru    1009  * means the task is throttled and needs a runtime replenishment.
1255  *                                               1010  *
1256  * However, what we actually do depends on th    1011  * However, what we actually do depends on the fact the task is active,
1257  * (it is on its rq) or has been removed from    1012  * (it is on its rq) or has been removed from there by a call to
1258  * dequeue_task_dl(). In the former case we m    1013  * dequeue_task_dl(). In the former case we must issue the runtime
1259  * replenishment and add the task back to the    1014  * replenishment and add the task back to the dl_rq; in the latter, we just
1260  * do nothing but clearing dl_throttled, so t    1015  * do nothing but clearing dl_throttled, so that runtime and deadline
1261  * updating (and the queueing back to dl_rq)     1016  * updating (and the queueing back to dl_rq) will be done by the
1262  * next call to enqueue_task_dl().               1017  * next call to enqueue_task_dl().
1263  */                                              1018  */
1264 static enum hrtimer_restart dl_task_timer(str    1019 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1265 {                                                1020 {
1266         struct sched_dl_entity *dl_se = conta    1021         struct sched_dl_entity *dl_se = container_of(timer,
1267                                                  1022                                                      struct sched_dl_entity,
1268                                                  1023                                                      dl_timer);
1269         struct task_struct *p;                !! 1024         struct task_struct *p = dl_task_of(dl_se);
1270         struct rq_flags rf;                      1025         struct rq_flags rf;
1271         struct rq *rq;                           1026         struct rq *rq;
1272                                                  1027 
1273         if (dl_server(dl_se))                 << 
1274                 return dl_server_timer(timer, << 
1275                                               << 
1276         p = dl_task_of(dl_se);                << 
1277         rq = task_rq_lock(p, &rf);               1028         rq = task_rq_lock(p, &rf);
1278                                                  1029 
1279         /*                                       1030         /*
1280          * The task might have changed its sc    1031          * The task might have changed its scheduling policy to something
1281          * different than SCHED_DEADLINE (thr    1032          * different than SCHED_DEADLINE (through switched_from_dl()).
1282          */                                      1033          */
1283         if (!dl_task(p))                         1034         if (!dl_task(p))
1284                 goto unlock;                     1035                 goto unlock;
1285                                                  1036 
1286         /*                                       1037         /*
1287          * The task might have been boosted b    1038          * The task might have been boosted by someone else and might be in the
1288          * boosting/deboosting path, its not     1039          * boosting/deboosting path, its not throttled.
1289          */                                      1040          */
1290         if (is_dl_boosted(dl_se))             !! 1041         if (dl_se->dl_boosted)
1291                 goto unlock;                     1042                 goto unlock;
1292                                                  1043 
1293         /*                                       1044         /*
1294          * Spurious timer due to start_dl_tim    1045          * Spurious timer due to start_dl_timer() race; or we already received
1295          * a replenishment from rt_mutex_setp    1046          * a replenishment from rt_mutex_setprio().
1296          */                                      1047          */
1297         if (!dl_se->dl_throttled)                1048         if (!dl_se->dl_throttled)
1298                 goto unlock;                     1049                 goto unlock;
1299                                                  1050 
1300         sched_clock_tick();                      1051         sched_clock_tick();
1301         update_rq_clock(rq);                     1052         update_rq_clock(rq);
1302                                                  1053 
1303         /*                                       1054         /*
1304          * If the throttle happened during sc    1055          * If the throttle happened during sched-out; like:
1305          *                                       1056          *
1306          *   schedule()                          1057          *   schedule()
1307          *     deactivate_task()                 1058          *     deactivate_task()
1308          *       dequeue_task_dl()               1059          *       dequeue_task_dl()
1309          *         update_curr_dl()              1060          *         update_curr_dl()
1310          *           start_dl_timer()            1061          *           start_dl_timer()
1311          *         __dequeue_task_dl()           1062          *         __dequeue_task_dl()
1312          *     prev->on_rq = 0;                  1063          *     prev->on_rq = 0;
1313          *                                       1064          *
1314          * We can be both throttled and !queu    1065          * We can be both throttled and !queued. Replenish the counter
1315          * but do not enqueue -- wait for our    1066          * but do not enqueue -- wait for our wakeup to do that.
1316          */                                      1067          */
1317         if (!task_on_rq_queued(p)) {             1068         if (!task_on_rq_queued(p)) {
1318                 replenish_dl_entity(dl_se);   !! 1069                 replenish_dl_entity(dl_se, dl_se);
1319                 goto unlock;                     1070                 goto unlock;
1320         }                                        1071         }
1321                                                  1072 
1322 #ifdef CONFIG_SMP                                1073 #ifdef CONFIG_SMP
1323         if (unlikely(!rq->online)) {             1074         if (unlikely(!rq->online)) {
1324                 /*                               1075                 /*
1325                  * If the runqueue is no long    1076                  * If the runqueue is no longer available, migrate the
1326                  * task elsewhere. This neces    1077                  * task elsewhere. This necessarily changes rq.
1327                  */                              1078                  */
1328                 lockdep_unpin_lock(__rq_lockp !! 1079                 lockdep_unpin_lock(&rq->lock, rf.cookie);
1329                 rq = dl_task_offline_migratio    1080                 rq = dl_task_offline_migration(rq, p);
1330                 rf.cookie = lockdep_pin_lock( !! 1081                 rf.cookie = lockdep_pin_lock(&rq->lock);
1331                 update_rq_clock(rq);             1082                 update_rq_clock(rq);
1332                                                  1083 
1333                 /*                               1084                 /*
1334                  * Now that the task has been    1085                  * Now that the task has been migrated to the new RQ and we
1335                  * have that locked, proceed     1086                  * have that locked, proceed as normal and enqueue the task
1336                  * there.                        1087                  * there.
1337                  */                              1088                  */
1338         }                                        1089         }
1339 #endif                                           1090 #endif
1340                                                  1091 
1341         enqueue_task_dl(rq, p, ENQUEUE_REPLEN    1092         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1342         if (dl_task(rq->curr))                   1093         if (dl_task(rq->curr))
1343                 wakeup_preempt_dl(rq, p, 0);  !! 1094                 check_preempt_curr_dl(rq, p, 0);
1344         else                                     1095         else
1345                 resched_curr(rq);                1096                 resched_curr(rq);
1346                                                  1097 
1347         __push_dl_task(rq, &rf);              !! 1098 #ifdef CONFIG_SMP
                                                   >> 1099         /*
                                                   >> 1100          * Queueing this task back might have overloaded rq, check if we need
                                                   >> 1101          * to kick someone away.
                                                   >> 1102          */
                                                   >> 1103         if (has_pushable_dl_tasks(rq)) {
                                                   >> 1104                 /*
                                                   >> 1105                  * Nothing relies on rq->lock after this, so its safe to drop
                                                   >> 1106                  * rq->lock.
                                                   >> 1107                  */
                                                   >> 1108                 rq_unpin_lock(rq, &rf);
                                                   >> 1109                 push_dl_task(rq);
                                                   >> 1110                 rq_repin_lock(rq, &rf);
                                                   >> 1111         }
                                                   >> 1112 #endif
1348                                                  1113 
1349 unlock:                                          1114 unlock:
1350         task_rq_unlock(rq, p, &rf);              1115         task_rq_unlock(rq, p, &rf);
1351                                                  1116 
1352         /*                                       1117         /*
1353          * This can free the task_struct, inc    1118          * This can free the task_struct, including this hrtimer, do not touch
1354          * anything related to that after thi    1119          * anything related to that after this.
1355          */                                      1120          */
1356         put_task_struct(p);                      1121         put_task_struct(p);
1357                                                  1122 
1358         return HRTIMER_NORESTART;                1123         return HRTIMER_NORESTART;
1359 }                                                1124 }
1360                                                  1125 
1361 static void init_dl_task_timer(struct sched_d !! 1126 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1362 {                                                1127 {
1363         struct hrtimer *timer = &dl_se->dl_ti    1128         struct hrtimer *timer = &dl_se->dl_timer;
1364                                                  1129 
1365         hrtimer_init(timer, CLOCK_MONOTONIC,     1130         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1366         timer->function = dl_task_timer;         1131         timer->function = dl_task_timer;
1367 }                                                1132 }
1368                                                  1133 
1369 /*                                               1134 /*
1370  * During the activation, CBS checks if it ca    1135  * During the activation, CBS checks if it can reuse the current task's
1371  * runtime and period. If the deadline of the    1136  * runtime and period. If the deadline of the task is in the past, CBS
1372  * cannot use the runtime, and so it replenis    1137  * cannot use the runtime, and so it replenishes the task. This rule
1373  * works fine for implicit deadline tasks (de    1138  * works fine for implicit deadline tasks (deadline == period), and the
1374  * CBS was designed for implicit deadline tas    1139  * CBS was designed for implicit deadline tasks. However, a task with
1375  * constrained deadline (deadline < period) m    1140  * constrained deadline (deadline < period) might be awakened after the
1376  * deadline, but before the next period. In t    1141  * deadline, but before the next period. In this case, replenishing the
1377  * task would allow it to run for runtime / d    1142  * task would allow it to run for runtime / deadline. As in this case
1378  * deadline < period, CBS enables a task to r    1143  * deadline < period, CBS enables a task to run for more than the
1379  * runtime / period. In a very loaded system,    1144  * runtime / period. In a very loaded system, this can cause a domino
1380  * effect, making other tasks miss their dead    1145  * effect, making other tasks miss their deadlines.
1381  *                                               1146  *
1382  * To avoid this problem, in the activation o    1147  * To avoid this problem, in the activation of a constrained deadline
1383  * task after the deadline but before the nex    1148  * task after the deadline but before the next period, throttle the
1384  * task and set the replenishing timer to the    1149  * task and set the replenishing timer to the begin of the next period,
1385  * unless it is boosted.                         1150  * unless it is boosted.
1386  */                                              1151  */
1387 static inline void dl_check_constrained_dl(st    1152 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1388 {                                                1153 {
1389         struct rq *rq = rq_of_dl_se(dl_se);   !! 1154         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 1155         struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1390                                                  1156 
1391         if (dl_time_before(dl_se->deadline, r    1157         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1392             dl_time_before(rq_clock(rq), dl_n    1158             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1393                 if (unlikely(is_dl_boosted(dl !! 1159                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1394                         return;                  1160                         return;
1395                 dl_se->dl_throttled = 1;         1161                 dl_se->dl_throttled = 1;
1396                 if (dl_se->runtime > 0)          1162                 if (dl_se->runtime > 0)
1397                         dl_se->runtime = 0;      1163                         dl_se->runtime = 0;
1398         }                                        1164         }
1399 }                                                1165 }
1400                                                  1166 
1401 static                                           1167 static
1402 int dl_runtime_exceeded(struct sched_dl_entit    1168 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1403 {                                                1169 {
1404         return (dl_se->runtime <= 0);            1170         return (dl_se->runtime <= 0);
1405 }                                                1171 }
1406                                                  1172 
                                                   >> 1173 extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
                                                   >> 1174 
1407 /*                                               1175 /*
1408  * This function implements the GRUB accounti !! 1176  * This function implements the GRUB accounting rule:
1409  * GRUB reclaiming algorithm, the runtime is  !! 1177  * according to the GRUB reclaiming algorithm, the runtime is
1410  * but as "dq = -(max{u, (Umax - Uinact - Uex !! 1178  * not decreased as "dq = -dt", but as
                                                   >> 1179  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1411  * where u is the utilization of the task, Um    1180  * where u is the utilization of the task, Umax is the maximum reclaimable
1412  * utilization, Uinact is the (per-runqueue)     1181  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1413  * as the difference between the "total runqu    1182  * as the difference between the "total runqueue utilization" and the
1414  * "runqueue active utilization", and Uextra  !! 1183  * runqueue active utilization, and Uextra is the (per runqueue) extra
1415  * reclaimable utilization.                      1184  * reclaimable utilization.
1416  * Since rq->dl.running_bw and rq->dl.this_bw !! 1185  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1417  * by 2^BW_SHIFT, the result has to be shifte !! 1186  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1418  * Since rq->dl.bw_ratio contains 1 / Umax mu !! 1187  * BW_SHIFT.
1419  * is multiplied by rq->dl.bw_ratio and shift !! 1188  * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1420  * Since delta is a 64 bit variable, to have  !! 1189  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1421  * larger than 2^(64 - 20 - 8), which is more !! 1190  * Since delta is a 64 bit variable, to have an overflow its value
1422  * not an issue here.                         !! 1191  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
                                                   >> 1192  * So, overflow is not an issue here.
1423  */                                              1193  */
1424 static u64 grub_reclaim(u64 delta, struct rq     1194 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1425 {                                                1195 {
1426         u64 u_act;                            << 
1427         u64 u_inact = rq->dl.this_bw - rq->dl    1196         u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
                                                   >> 1197         u64 u_act;
                                                   >> 1198         u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1428                                                  1199 
1429         /*                                       1200         /*
1430          * Instead of computing max{u, (u_max !! 1201          * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1431          * compare u_inact + u_extra with u_m !! 1202          * we compare u_inact + rq->dl.extra_bw with
1432          * can be larger than u_max. So, u_ma !! 1203          * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1433          * negative leading to wrong results. !! 1204          * u_inact + rq->dl.extra_bw can be larger than
                                                   >> 1205          * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
                                                   >> 1206          * leading to wrong results)
1434          */                                      1207          */
1435         if (u_inact + rq->dl.extra_bw > rq->d !! 1208         if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1436                 u_act = dl_se->dl_bw;         !! 1209                 u_act = u_act_min;
1437         else                                     1210         else
1438                 u_act = rq->dl.max_bw - u_ina !! 1211                 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1439                                                  1212 
1440         u_act = (u_act * rq->dl.bw_ratio) >>  << 
1441         return (delta * u_act) >> BW_SHIFT;      1213         return (delta * u_act) >> BW_SHIFT;
1442 }                                                1214 }
1443                                                  1215 
1444 s64 dl_scaled_delta_exec(struct rq *rq, struc !! 1216 /*
                                                   >> 1217  * Update the current task's runtime statistics (provided it is still
                                                   >> 1218  * a -deadline task and has not been removed from the dl_rq).
                                                   >> 1219  */
                                                   >> 1220 static void update_curr_dl(struct rq *rq)
1445 {                                                1221 {
1446         s64 scaled_delta_exec;                !! 1222         struct task_struct *curr = rq->curr;
                                                   >> 1223         struct sched_dl_entity *dl_se = &curr->dl;
                                                   >> 1224         u64 delta_exec, scaled_delta_exec;
                                                   >> 1225         int cpu = cpu_of(rq);
                                                   >> 1226         u64 now;
                                                   >> 1227 
                                                   >> 1228         if (!dl_task(curr) || !on_dl_rq(dl_se))
                                                   >> 1229                 return;
                                                   >> 1230 
                                                   >> 1231         /*
                                                   >> 1232          * Consumed budget is computed considering the time as
                                                   >> 1233          * observed by schedulable tasks (excluding time spent
                                                   >> 1234          * in hardirq context, etc.). Deadlines are instead
                                                   >> 1235          * computed using hard walltime. This seems to be the more
                                                   >> 1236          * natural solution, but the full ramifications of this
                                                   >> 1237          * approach need further study.
                                                   >> 1238          */
                                                   >> 1239         now = rq_clock_task(rq);
                                                   >> 1240         delta_exec = now - curr->se.exec_start;
                                                   >> 1241         if (unlikely((s64)delta_exec <= 0)) {
                                                   >> 1242                 if (unlikely(dl_se->dl_yielded))
                                                   >> 1243                         goto throttle;
                                                   >> 1244                 return;
                                                   >> 1245         }
                                                   >> 1246 
                                                   >> 1247         schedstat_set(curr->se.statistics.exec_max,
                                                   >> 1248                       max(curr->se.statistics.exec_max, delta_exec));
                                                   >> 1249 
                                                   >> 1250         curr->se.sum_exec_runtime += delta_exec;
                                                   >> 1251         account_group_exec_runtime(curr, delta_exec);
                                                   >> 1252 
                                                   >> 1253         curr->se.exec_start = now;
                                                   >> 1254         cgroup_account_cputime(curr, delta_exec);
                                                   >> 1255 
                                                   >> 1256         if (dl_entity_is_special(dl_se))
                                                   >> 1257                 return;
1447                                                  1258 
1448         /*                                       1259         /*
1449          * For tasks that participate in GRUB    1260          * For tasks that participate in GRUB, we implement GRUB-PA: the
1450          * spare reclaimed bandwidth is used     1261          * spare reclaimed bandwidth is used to clock down frequency.
1451          *                                       1262          *
1452          * For the others, we still need to s    1263          * For the others, we still need to scale reservation parameters
1453          * according to current frequency and    1264          * according to current frequency and CPU maximum capacity.
1454          */                                      1265          */
1455         if (unlikely(dl_se->flags & SCHED_FLA    1266         if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1456                 scaled_delta_exec = grub_recl !! 1267                 scaled_delta_exec = grub_reclaim(delta_exec,
                                                   >> 1268                                                  rq,
                                                   >> 1269                                                  &curr->dl);
1457         } else {                                 1270         } else {
1458                 int cpu = cpu_of(rq);         << 
1459                 unsigned long scale_freq = ar    1271                 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1460                 unsigned long scale_cpu = arc    1272                 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1461                                                  1273 
1462                 scaled_delta_exec = cap_scale    1274                 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1463                 scaled_delta_exec = cap_scale    1275                 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1464         }                                        1276         }
1465                                                  1277 
1466         return scaled_delta_exec;             << 
1467 }                                             << 
1468                                               << 
1469 static inline void                            << 
1470 update_stats_dequeue_dl(struct dl_rq *dl_rq,  << 
1471                         int flags);           << 
1472 static void update_curr_dl_se(struct rq *rq,  << 
1473 {                                             << 
1474         s64 scaled_delta_exec;                << 
1475                                               << 
1476         if (unlikely(delta_exec <= 0)) {      << 
1477                 if (unlikely(dl_se->dl_yielde << 
1478                         goto throttle;        << 
1479                 return;                       << 
1480         }                                     << 
1481                                               << 
1482         if (dl_server(dl_se) && dl_se->dl_thr << 
1483                 return;                       << 
1484                                               << 
1485         if (dl_entity_is_special(dl_se))      << 
1486                 return;                       << 
1487                                               << 
1488         scaled_delta_exec = dl_scaled_delta_e << 
1489                                               << 
1490         dl_se->runtime -= scaled_delta_exec;     1278         dl_se->runtime -= scaled_delta_exec;
1491                                                  1279 
1492         /*                                    << 
1493          * The fair server can consume its ru << 
1494          * running as regular CFS).           << 
1495          *                                    << 
1496          * If the server consumes its entire  << 
1497          * is not required for the current pe << 
1498          * starting a new period, pushing the << 
1499          */                                   << 
1500         if (dl_se->dl_defer && dl_se->dl_thro << 
1501                 /*                            << 
1502                  * If the server was previous << 
1503                  * took place, it this point  << 
1504                  * was able to get runtime in << 
1505                  * state.                     << 
1506                  */                           << 
1507                 dl_se->dl_defer_running = 0;  << 
1508                                               << 
1509                 hrtimer_try_to_cancel(&dl_se- << 
1510                                               << 
1511                 replenish_dl_new_period(dl_se << 
1512                                               << 
1513                 /*                            << 
1514                  * Not being able to start th << 
1515                  * be started for whatever re << 
1516                  * and queue right away. Othe << 
1517                  * to what enqueue_dl_entity( << 
1518                  */                           << 
1519                 WARN_ON_ONCE(!start_dl_timer( << 
1520                                               << 
1521                 return;                       << 
1522         }                                     << 
1523                                               << 
1524 throttle:                                        1280 throttle:
1525         if (dl_runtime_exceeded(dl_se) || dl_    1281         if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1526                 dl_se->dl_throttled = 1;         1282                 dl_se->dl_throttled = 1;
1527                                                  1283 
1528                 /* If requested, inform the u    1284                 /* If requested, inform the user about runtime overruns. */
1529                 if (dl_runtime_exceeded(dl_se    1285                 if (dl_runtime_exceeded(dl_se) &&
1530                     (dl_se->flags & SCHED_FLA    1286                     (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1531                         dl_se->dl_overrun = 1    1287                         dl_se->dl_overrun = 1;
1532                                                  1288 
1533                 dequeue_dl_entity(dl_se, 0);  !! 1289                 __dequeue_task_dl(rq, curr, 0);
1534                 if (!dl_server(dl_se)) {      !! 1290                 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1535                         update_stats_dequeue_ !! 1291                         enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1536                         dequeue_pushable_dl_t << 
1537                 }                             << 
1538                                                  1292 
1539                 if (unlikely(is_dl_boosted(dl !! 1293                 if (!is_leftmost(curr, &rq->dl))
1540                         if (dl_server(dl_se)) << 
1541                                 enqueue_dl_en << 
1542                         else                  << 
1543                                 enqueue_task_ << 
1544                 }                             << 
1545                                               << 
1546                 if (!is_leftmost(dl_se, &rq-> << 
1547                         resched_curr(rq);        1294                         resched_curr(rq);
1548         }                                        1295         }
1549                                                  1296 
1550         /*                                       1297         /*
1551          * The fair server (sole dl_server) d << 
1552          * workload because it is running fai << 
1553          */                                   << 
1554         if (dl_se == &rq->fair_server)        << 
1555                 return;                       << 
1556                                               << 
1557 #ifdef CONFIG_RT_GROUP_SCHED                  << 
1558         /*                                    << 
1559          * Because -- for now -- we share the    1298          * Because -- for now -- we share the rt bandwidth, we need to
1560          * account our runtime there too, oth    1299          * account our runtime there too, otherwise actual rt tasks
1561          * would be able to exceed the shared    1300          * would be able to exceed the shared quota.
1562          *                                       1301          *
1563          * Account to the root rt group for n    1302          * Account to the root rt group for now.
1564          *                                       1303          *
1565          * The solution we're working towards    1304          * The solution we're working towards is having the RT groups scheduled
1566          * using deadline servers -- however     1305          * using deadline servers -- however there's a few nasties to figure
1567          * out before that can happen.           1306          * out before that can happen.
1568          */                                      1307          */
1569         if (rt_bandwidth_enabled()) {            1308         if (rt_bandwidth_enabled()) {
1570                 struct rt_rq *rt_rq = &rq->rt    1309                 struct rt_rq *rt_rq = &rq->rt;
1571                                                  1310 
1572                 raw_spin_lock(&rt_rq->rt_runt    1311                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1573                 /*                               1312                 /*
1574                  * We'll let actual RT tasks     1313                  * We'll let actual RT tasks worry about the overflow here, we
1575                  * have our own CBS to keep u    1314                  * have our own CBS to keep us inline; only account when RT
1576                  * bandwidth is relevant.        1315                  * bandwidth is relevant.
1577                  */                              1316                  */
1578                 if (sched_rt_bandwidth_accoun    1317                 if (sched_rt_bandwidth_account(rt_rq))
1579                         rt_rq->rt_time += del    1318                         rt_rq->rt_time += delta_exec;
1580                 raw_spin_unlock(&rt_rq->rt_ru    1319                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1581         }                                        1320         }
1582 #endif                                        << 
1583 }                                             << 
1584                                               << 
1585 /*                                            << 
1586  * In the non-defer mode, the idle time is no << 
1587  * server provides a guarantee.               << 
1588  *                                            << 
1589  * If the dl_server is in defer mode, the idl << 
1590  * as time available for the fair server, avo << 
1591  * rt scheduler that did not consumed that ti << 
1592  */                                           << 
1593 void dl_server_update_idle_time(struct rq *rq << 
1594 {                                             << 
1595         s64 delta_exec, scaled_delta_exec;    << 
1596                                               << 
1597         if (!rq->fair_server.dl_defer)        << 
1598                 return;                       << 
1599                                               << 
1600         /* no need to discount more */        << 
1601         if (rq->fair_server.runtime < 0)      << 
1602                 return;                       << 
1603                                               << 
1604         delta_exec = rq_clock_task(rq) - p->s << 
1605         if (delta_exec < 0)                   << 
1606                 return;                       << 
1607                                               << 
1608         scaled_delta_exec = dl_scaled_delta_e << 
1609                                               << 
1610         rq->fair_server.runtime -= scaled_del << 
1611                                               << 
1612         if (rq->fair_server.runtime < 0) {    << 
1613                 rq->fair_server.dl_defer_runn << 
1614                 rq->fair_server.runtime = 0;  << 
1615         }                                     << 
1616                                               << 
1617         p->se.exec_start = rq_clock_task(rq); << 
1618 }                                             << 
1619                                               << 
1620 void dl_server_update(struct sched_dl_entity  << 
1621 {                                             << 
1622         /* 0 runtime = fair server disabled * << 
1623         if (dl_se->dl_runtime)                << 
1624                 update_curr_dl_se(dl_se->rq,  << 
1625 }                                             << 
1626                                               << 
1627 void dl_server_start(struct sched_dl_entity * << 
1628 {                                             << 
1629         struct rq *rq = dl_se->rq;            << 
1630                                               << 
1631         /*                                    << 
1632          * XXX: the apply do not work fine at << 
1633          * fair server because things are not << 
1634          * this before getting generic.       << 
1635          */                                   << 
1636         if (!dl_server(dl_se)) {              << 
1637                 u64 runtime =  50 * NSEC_PER_ << 
1638                 u64 period = 1000 * NSEC_PER_ << 
1639                                               << 
1640                 dl_server_apply_params(dl_se, << 
1641                                               << 
1642                 dl_se->dl_server = 1;         << 
1643                 dl_se->dl_defer = 1;          << 
1644                 setup_new_dl_entity(dl_se);   << 
1645         }                                     << 
1646                                               << 
1647         if (!dl_se->dl_runtime)               << 
1648                 return;                       << 
1649                                               << 
1650         enqueue_dl_entity(dl_se, ENQUEUE_WAKE << 
1651         if (!dl_task(dl_se->rq->curr) || dl_e << 
1652                 resched_curr(dl_se->rq);      << 
1653 }                                             << 
1654                                               << 
1655 void dl_server_stop(struct sched_dl_entity *d << 
1656 {                                             << 
1657         if (!dl_se->dl_runtime)               << 
1658                 return;                       << 
1659                                               << 
1660         dequeue_dl_entity(dl_se, DEQUEUE_SLEE << 
1661         hrtimer_try_to_cancel(&dl_se->dl_time << 
1662         dl_se->dl_defer_armed = 0;            << 
1663         dl_se->dl_throttled = 0;              << 
1664 }                                             << 
1665                                               << 
1666 void dl_server_init(struct sched_dl_entity *d << 
1667                     dl_server_has_tasks_f has << 
1668                     dl_server_pick_f pick_tas << 
1669 {                                             << 
1670         dl_se->rq = rq;                       << 
1671         dl_se->server_has_tasks = has_tasks;  << 
1672         dl_se->server_pick_task = pick_task;  << 
1673 }                                             << 
1674                                               << 
1675 void __dl_server_attach_root(struct sched_dl_ << 
1676 {                                             << 
1677         u64 new_bw = dl_se->dl_bw;            << 
1678         int cpu = cpu_of(rq);                 << 
1679         struct dl_bw *dl_b;                   << 
1680                                               << 
1681         dl_b = dl_bw_of(cpu_of(rq));          << 
1682         guard(raw_spinlock)(&dl_b->lock);     << 
1683                                               << 
1684         if (!dl_bw_cpus(cpu))                 << 
1685                 return;                       << 
1686                                               << 
1687         __dl_add(dl_b, new_bw, dl_bw_cpus(cpu << 
1688 }                                             << 
1689                                               << 
1690 int dl_server_apply_params(struct sched_dl_en << 
1691 {                                             << 
1692         u64 old_bw = init ? 0 : to_ratio(dl_s << 
1693         u64 new_bw = to_ratio(period, runtime << 
1694         struct rq *rq = dl_se->rq;            << 
1695         int cpu = cpu_of(rq);                 << 
1696         struct dl_bw *dl_b;                   << 
1697         unsigned long cap;                    << 
1698         int retval = 0;                       << 
1699         int cpus;                             << 
1700                                               << 
1701         dl_b = dl_bw_of(cpu);                 << 
1702         guard(raw_spinlock)(&dl_b->lock);     << 
1703                                               << 
1704         cpus = dl_bw_cpus(cpu);               << 
1705         cap = dl_bw_capacity(cpu);            << 
1706                                               << 
1707         if (__dl_overflow(dl_b, cap, old_bw,  << 
1708                 return -EBUSY;                << 
1709                                               << 
1710         if (init) {                           << 
1711                 __add_rq_bw(new_bw, &rq->dl); << 
1712                 __dl_add(dl_b, new_bw, cpus); << 
1713         } else {                              << 
1714                 __dl_sub(dl_b, dl_se->dl_bw,  << 
1715                 __dl_add(dl_b, new_bw, cpus); << 
1716                                               << 
1717                 dl_rq_change_utilization(rq,  << 
1718         }                                     << 
1719                                               << 
1720         dl_se->dl_runtime = runtime;          << 
1721         dl_se->dl_deadline = period;          << 
1722         dl_se->dl_period = period;            << 
1723                                               << 
1724         dl_se->runtime = 0;                   << 
1725         dl_se->deadline = 0;                  << 
1726                                               << 
1727         dl_se->dl_bw = to_ratio(dl_se->dl_per << 
1728         dl_se->dl_density = to_ratio(dl_se->d << 
1729                                               << 
1730         return retval;                        << 
1731 }                                             << 
1732                                               << 
1733 /*                                            << 
1734  * Update the current task's runtime statisti << 
1735  * a -deadline task and has not been removed  << 
1736  */                                           << 
1737 static void update_curr_dl(struct rq *rq)     << 
1738 {                                             << 
1739         struct task_struct *curr = rq->curr;  << 
1740         struct sched_dl_entity *dl_se = &curr << 
1741         s64 delta_exec;                       << 
1742                                               << 
1743         if (!dl_task(curr) || !on_dl_rq(dl_se << 
1744                 return;                       << 
1745                                               << 
1746         /*                                    << 
1747          * Consumed budget is computed consid << 
1748          * observed by schedulable tasks (exc << 
1749          * in hardirq context, etc.). Deadlin << 
1750          * computed using hard walltime. This << 
1751          * natural solution, but the full ram << 
1752          * approach need further study.       << 
1753          */                                   << 
1754         delta_exec = update_curr_common(rq);  << 
1755         update_curr_dl_se(rq, dl_se, delta_ex << 
1756 }                                                1321 }
1757                                                  1322 
1758 static enum hrtimer_restart inactive_task_tim    1323 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1759 {                                                1324 {
1760         struct sched_dl_entity *dl_se = conta    1325         struct sched_dl_entity *dl_se = container_of(timer,
1761                                                  1326                                                      struct sched_dl_entity,
1762                                                  1327                                                      inactive_timer);
1763         struct task_struct *p = NULL;         !! 1328         struct task_struct *p = dl_task_of(dl_se);
1764         struct rq_flags rf;                      1329         struct rq_flags rf;
1765         struct rq *rq;                           1330         struct rq *rq;
1766                                                  1331 
1767         if (!dl_server(dl_se)) {              !! 1332         rq = task_rq_lock(p, &rf);
1768                 p = dl_task_of(dl_se);        << 
1769                 rq = task_rq_lock(p, &rf);    << 
1770         } else {                              << 
1771                 rq = dl_se->rq;               << 
1772                 rq_lock(rq, &rf);             << 
1773         }                                     << 
1774                                                  1333 
1775         sched_clock_tick();                      1334         sched_clock_tick();
1776         update_rq_clock(rq);                     1335         update_rq_clock(rq);
1777                                                  1336 
1778         if (dl_server(dl_se))                 !! 1337         if (!dl_task(p) || p->state == TASK_DEAD) {
1779                 goto no_task;                 << 
1780                                               << 
1781         if (!dl_task(p) || READ_ONCE(p->__sta << 
1782                 struct dl_bw *dl_b = dl_bw_of    1338                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1783                                                  1339 
1784                 if (READ_ONCE(p->__state) ==  !! 1340                 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
1785                         sub_running_bw(&p->dl    1341                         sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1786                         sub_rq_bw(&p->dl, dl_    1342                         sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1787                         dl_se->dl_non_contend    1343                         dl_se->dl_non_contending = 0;
1788                 }                                1344                 }
1789                                                  1345 
1790                 raw_spin_lock(&dl_b->lock);      1346                 raw_spin_lock(&dl_b->lock);
1791                 __dl_sub(dl_b, p->dl.dl_bw, d    1347                 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1792                 raw_spin_unlock(&dl_b->lock);    1348                 raw_spin_unlock(&dl_b->lock);
1793                 __dl_clear_params(dl_se);     !! 1349                 __dl_clear_params(p);
1794                                                  1350 
1795                 goto unlock;                     1351                 goto unlock;
1796         }                                        1352         }
1797                                               << 
1798 no_task:                                      << 
1799         if (dl_se->dl_non_contending == 0)       1353         if (dl_se->dl_non_contending == 0)
1800                 goto unlock;                     1354                 goto unlock;
1801                                                  1355 
1802         sub_running_bw(dl_se, &rq->dl);          1356         sub_running_bw(dl_se, &rq->dl);
1803         dl_se->dl_non_contending = 0;            1357         dl_se->dl_non_contending = 0;
1804 unlock:                                          1358 unlock:
1805                                               !! 1359         task_rq_unlock(rq, p, &rf);
1806         if (!dl_server(dl_se)) {              !! 1360         put_task_struct(p);
1807                 task_rq_unlock(rq, p, &rf);   << 
1808                 put_task_struct(p);           << 
1809         } else {                              << 
1810                 rq_unlock(rq, &rf);           << 
1811         }                                     << 
1812                                                  1361 
1813         return HRTIMER_NORESTART;                1362         return HRTIMER_NORESTART;
1814 }                                                1363 }
1815                                                  1364 
1816 static void init_dl_inactive_task_timer(struc !! 1365 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1817 {                                                1366 {
1818         struct hrtimer *timer = &dl_se->inact    1367         struct hrtimer *timer = &dl_se->inactive_timer;
1819                                                  1368 
1820         hrtimer_init(timer, CLOCK_MONOTONIC,     1369         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1821         timer->function = inactive_task_timer    1370         timer->function = inactive_task_timer;
1822 }                                                1371 }
1823                                                  1372 
1824 #define __node_2_dle(node) \                  << 
1825         rb_entry((node), struct sched_dl_enti << 
1826                                               << 
1827 #ifdef CONFIG_SMP                                1373 #ifdef CONFIG_SMP
1828                                                  1374 
1829 static void inc_dl_deadline(struct dl_rq *dl_    1375 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1830 {                                                1376 {
1831         struct rq *rq = rq_of_dl_rq(dl_rq);      1377         struct rq *rq = rq_of_dl_rq(dl_rq);
1832                                                  1378 
1833         if (dl_rq->earliest_dl.curr == 0 ||      1379         if (dl_rq->earliest_dl.curr == 0 ||
1834             dl_time_before(deadline, dl_rq->e    1380             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1835                 if (dl_rq->earliest_dl.curr = << 
1836                         cpupri_set(&rq->rd->c << 
1837                 dl_rq->earliest_dl.curr = dea    1381                 dl_rq->earliest_dl.curr = deadline;
1838                 cpudl_set(&rq->rd->cpudl, rq-    1382                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1839         }                                        1383         }
1840 }                                                1384 }
1841                                                  1385 
1842 static void dec_dl_deadline(struct dl_rq *dl_    1386 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1843 {                                                1387 {
1844         struct rq *rq = rq_of_dl_rq(dl_rq);      1388         struct rq *rq = rq_of_dl_rq(dl_rq);
1845                                                  1389 
1846         /*                                       1390         /*
1847          * Since we may have removed our earl    1391          * Since we may have removed our earliest (and/or next earliest)
1848          * task we must recompute them.          1392          * task we must recompute them.
1849          */                                      1393          */
1850         if (!dl_rq->dl_nr_running) {             1394         if (!dl_rq->dl_nr_running) {
1851                 dl_rq->earliest_dl.curr = 0;     1395                 dl_rq->earliest_dl.curr = 0;
1852                 dl_rq->earliest_dl.next = 0;     1396                 dl_rq->earliest_dl.next = 0;
1853                 cpudl_clear(&rq->rd->cpudl, r    1397                 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1854                 cpupri_set(&rq->rd->cpupri, r << 
1855         } else {                                 1398         } else {
1856                 struct rb_node *leftmost = rb !! 1399                 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1857                 struct sched_dl_entity *entry !! 1400                 struct sched_dl_entity *entry;
1858                                                  1401 
                                                   >> 1402                 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1859                 dl_rq->earliest_dl.curr = ent    1403                 dl_rq->earliest_dl.curr = entry->deadline;
1860                 cpudl_set(&rq->rd->cpudl, rq-    1404                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1861         }                                        1405         }
1862 }                                                1406 }
1863                                                  1407 
1864 #else                                            1408 #else
1865                                                  1409 
1866 static inline void inc_dl_deadline(struct dl_    1410 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1867 static inline void dec_dl_deadline(struct dl_    1411 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1868                                                  1412 
1869 #endif /* CONFIG_SMP */                          1413 #endif /* CONFIG_SMP */
1870                                                  1414 
1871 static inline                                    1415 static inline
1872 void inc_dl_tasks(struct sched_dl_entity *dl_    1416 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1873 {                                                1417 {
                                                   >> 1418         int prio = dl_task_of(dl_se)->prio;
1874         u64 deadline = dl_se->deadline;          1419         u64 deadline = dl_se->deadline;
1875                                                  1420 
                                                   >> 1421         WARN_ON(!dl_prio(prio));
1876         dl_rq->dl_nr_running++;                  1422         dl_rq->dl_nr_running++;
1877         add_nr_running(rq_of_dl_rq(dl_rq), 1)    1423         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1878                                                  1424 
1879         inc_dl_deadline(dl_rq, deadline);        1425         inc_dl_deadline(dl_rq, deadline);
                                                   >> 1426         inc_dl_migration(dl_se, dl_rq);
1880 }                                                1427 }
1881                                                  1428 
1882 static inline                                    1429 static inline
1883 void dec_dl_tasks(struct sched_dl_entity *dl_    1430 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1884 {                                                1431 {
                                                   >> 1432         int prio = dl_task_of(dl_se)->prio;
                                                   >> 1433 
                                                   >> 1434         WARN_ON(!dl_prio(prio));
1885         WARN_ON(!dl_rq->dl_nr_running);          1435         WARN_ON(!dl_rq->dl_nr_running);
1886         dl_rq->dl_nr_running--;                  1436         dl_rq->dl_nr_running--;
1887         sub_nr_running(rq_of_dl_rq(dl_rq), 1)    1437         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1888                                                  1438 
1889         dec_dl_deadline(dl_rq, dl_se->deadlin    1439         dec_dl_deadline(dl_rq, dl_se->deadline);
1890 }                                             !! 1440         dec_dl_migration(dl_se, dl_rq);
1891                                               << 
1892 static inline bool __dl_less(struct rb_node * << 
1893 {                                             << 
1894         return dl_time_before(__node_2_dle(a) << 
1895 }                                             << 
1896                                               << 
1897 static __always_inline struct sched_statistic << 
1898 __schedstats_from_dl_se(struct sched_dl_entit << 
1899 {                                             << 
1900         if (!schedstat_enabled())             << 
1901                 return NULL;                  << 
1902                                               << 
1903         if (dl_server(dl_se))                 << 
1904                 return NULL;                  << 
1905                                               << 
1906         return &dl_task_of(dl_se)->stats;     << 
1907 }                                             << 
1908                                               << 
1909 static inline void                            << 
1910 update_stats_wait_start_dl(struct dl_rq *dl_r << 
1911 {                                             << 
1912         struct sched_statistics *stats = __sc << 
1913         if (stats)                            << 
1914                 __update_stats_wait_start(rq_ << 
1915 }                                             << 
1916                                               << 
1917 static inline void                            << 
1918 update_stats_wait_end_dl(struct dl_rq *dl_rq, << 
1919 {                                             << 
1920         struct sched_statistics *stats = __sc << 
1921         if (stats)                            << 
1922                 __update_stats_wait_end(rq_of << 
1923 }                                             << 
1924                                               << 
1925 static inline void                            << 
1926 update_stats_enqueue_sleeper_dl(struct dl_rq  << 
1927 {                                             << 
1928         struct sched_statistics *stats = __sc << 
1929         if (stats)                            << 
1930                 __update_stats_enqueue_sleepe << 
1931 }                                             << 
1932                                               << 
1933 static inline void                            << 
1934 update_stats_enqueue_dl(struct dl_rq *dl_rq,  << 
1935                         int flags)            << 
1936 {                                             << 
1937         if (!schedstat_enabled())             << 
1938                 return;                       << 
1939                                               << 
1940         if (flags & ENQUEUE_WAKEUP)           << 
1941                 update_stats_enqueue_sleeper_ << 
1942 }                                             << 
1943                                               << 
1944 static inline void                            << 
1945 update_stats_dequeue_dl(struct dl_rq *dl_rq,  << 
1946                         int flags)            << 
1947 {                                             << 
1948         struct task_struct *p = dl_task_of(dl << 
1949                                               << 
1950         if (!schedstat_enabled())             << 
1951                 return;                       << 
1952                                               << 
1953         if ((flags & DEQUEUE_SLEEP)) {        << 
1954                 unsigned int state;           << 
1955                                               << 
1956                 state = READ_ONCE(p->__state) << 
1957                 if (state & TASK_INTERRUPTIBL << 
1958                         __schedstat_set(p->st << 
1959                                         rq_cl << 
1960                                               << 
1961                 if (state & TASK_UNINTERRUPTI << 
1962                         __schedstat_set(p->st << 
1963                                         rq_cl << 
1964         }                                     << 
1965 }                                                1441 }
1966                                                  1442 
1967 static void __enqueue_dl_entity(struct sched_    1443 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1968 {                                                1444 {
1969         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1445         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
                                                   >> 1446         struct rb_node **link = &dl_rq->root.rb_root.rb_node;
                                                   >> 1447         struct rb_node *parent = NULL;
                                                   >> 1448         struct sched_dl_entity *entry;
                                                   >> 1449         int leftmost = 1;
                                                   >> 1450 
                                                   >> 1451         BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
                                                   >> 1452 
                                                   >> 1453         while (*link) {
                                                   >> 1454                 parent = *link;
                                                   >> 1455                 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
                                                   >> 1456                 if (dl_time_before(dl_se->deadline, entry->deadline))
                                                   >> 1457                         link = &parent->rb_left;
                                                   >> 1458                 else {
                                                   >> 1459                         link = &parent->rb_right;
                                                   >> 1460                         leftmost = 0;
                                                   >> 1461                 }
                                                   >> 1462         }
1970                                                  1463 
1971         WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->r !! 1464         rb_link_node(&dl_se->rb_node, parent, link);
1972                                               !! 1465         rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1973         rb_add_cached(&dl_se->rb_node, &dl_rq << 
1974                                                  1466 
1975         inc_dl_tasks(dl_se, dl_rq);              1467         inc_dl_tasks(dl_se, dl_rq);
1976 }                                                1468 }
1977                                                  1469 
1978 static void __dequeue_dl_entity(struct sched_    1470 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1979 {                                                1471 {
1980         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1472         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1981                                                  1473 
1982         if (RB_EMPTY_NODE(&dl_se->rb_node))      1474         if (RB_EMPTY_NODE(&dl_se->rb_node))
1983                 return;                          1475                 return;
1984                                                  1476 
1985         rb_erase_cached(&dl_se->rb_node, &dl_    1477         rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1986                                               << 
1987         RB_CLEAR_NODE(&dl_se->rb_node);          1478         RB_CLEAR_NODE(&dl_se->rb_node);
1988                                                  1479 
1989         dec_dl_tasks(dl_se, dl_rq);              1480         dec_dl_tasks(dl_se, dl_rq);
1990 }                                                1481 }
1991                                                  1482 
1992 static void                                      1483 static void
1993 enqueue_dl_entity(struct sched_dl_entity *dl_ !! 1484 enqueue_dl_entity(struct sched_dl_entity *dl_se,
                                                   >> 1485                   struct sched_dl_entity *pi_se, int flags)
                                                   >> 1486 {
                                                   >> 1487         BUG_ON(on_dl_rq(dl_se));
                                                   >> 1488 
                                                   >> 1489         /*
                                                   >> 1490          * If this is a wakeup or a new instance, the scheduling
                                                   >> 1491          * parameters of the task might need updating. Otherwise,
                                                   >> 1492          * we want a replenishment of its runtime.
                                                   >> 1493          */
                                                   >> 1494         if (flags & ENQUEUE_WAKEUP) {
                                                   >> 1495                 task_contending(dl_se, flags);
                                                   >> 1496                 update_dl_entity(dl_se, pi_se);
                                                   >> 1497         } else if (flags & ENQUEUE_REPLENISH) {
                                                   >> 1498                 replenish_dl_entity(dl_se, pi_se);
                                                   >> 1499         } else if ((flags & ENQUEUE_RESTORE) &&
                                                   >> 1500                   dl_time_before(dl_se->deadline,
                                                   >> 1501                                  rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
                                                   >> 1502                 setup_new_dl_entity(dl_se);
                                                   >> 1503         }
                                                   >> 1504 
                                                   >> 1505         __enqueue_dl_entity(dl_se);
                                                   >> 1506 }
                                                   >> 1507 
                                                   >> 1508 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
                                                   >> 1509 {
                                                   >> 1510         __dequeue_dl_entity(dl_se);
                                                   >> 1511 }
                                                   >> 1512 
                                                   >> 1513 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1994 {                                                1514 {
1995         WARN_ON_ONCE(on_dl_rq(dl_se));        !! 1515         struct task_struct *pi_task = rt_mutex_get_top_task(p);
                                                   >> 1516         struct sched_dl_entity *pi_se = &p->dl;
1996                                                  1517 
1997         update_stats_enqueue_dl(dl_rq_of_se(d !! 1518         /*
                                                   >> 1519          * Use the scheduling parameters of the top pi-waiter task if:
                                                   >> 1520          * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
                                                   >> 1521          * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
                                                   >> 1522          *   smaller than our deadline OR we are a !SCHED_DEADLINE task getting
                                                   >> 1523          *   boosted due to a SCHED_DEADLINE pi-waiter).
                                                   >> 1524          * Otherwise we keep our runtime and deadline.
                                                   >> 1525          */
                                                   >> 1526         if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
                                                   >> 1527                 pi_se = &pi_task->dl;
                                                   >> 1528         } else if (!dl_prio(p->normal_prio)) {
                                                   >> 1529                 /*
                                                   >> 1530                  * Special case in which we have a !SCHED_DEADLINE task
                                                   >> 1531                  * that is going to be deboosted, but exceeds its
                                                   >> 1532                  * runtime while doing so. No point in replenishing
                                                   >> 1533                  * it, as it's going to return back to its original
                                                   >> 1534                  * scheduling class after this.
                                                   >> 1535                  */
                                                   >> 1536                 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
                                                   >> 1537                 return;
                                                   >> 1538         }
1998                                                  1539 
1999         /*                                       1540         /*
2000          * Check if a constrained deadline ta    1541          * Check if a constrained deadline task was activated
2001          * after the deadline but before the     1542          * after the deadline but before the next period.
2002          * If that is the case, the task will    1543          * If that is the case, the task will be throttled and
2003          * the replenishment timer will be se    1544          * the replenishment timer will be set to the next period.
2004          */                                      1545          */
2005         if (!dl_se->dl_throttled && !dl_is_im !! 1546         if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
2006                 dl_check_constrained_dl(dl_se !! 1547                 dl_check_constrained_dl(&p->dl);
2007                                                  1548 
2008         if (flags & (ENQUEUE_RESTORE|ENQUEUE_ !! 1549         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
2009                 struct dl_rq *dl_rq = dl_rq_o !! 1550                 add_rq_bw(&p->dl, &rq->dl);
2010                                               !! 1551                 add_running_bw(&p->dl, &rq->dl);
2011                 add_rq_bw(dl_se, dl_rq);      << 
2012                 add_running_bw(dl_se, dl_rq); << 
2013         }                                        1552         }
2014                                                  1553 
2015         /*                                       1554         /*
2016          * If p is throttled, we do not enque    1555          * If p is throttled, we do not enqueue it. In fact, if it exhausted
2017          * its budget it needs a replenishmen    1556          * its budget it needs a replenishment and, since it now is on
2018          * its rq, the bandwidth timer callba    1557          * its rq, the bandwidth timer callback (which clearly has not
2019          * run yet) will take care of this.      1558          * run yet) will take care of this.
2020          * However, the active utilization do    1559          * However, the active utilization does not depend on the fact
2021          * that the task is on the runqueue o    1560          * that the task is on the runqueue or not (but depends on the
2022          * task's state - in GRUB parlance, "    1561          * task's state - in GRUB parlance, "inactive" vs "active contending").
2023          * In other words, even if a task is     1562          * In other words, even if a task is throttled its utilization must
2024          * be counted in the active utilizati    1563          * be counted in the active utilization; hence, we need to call
2025          * add_running_bw().                     1564          * add_running_bw().
2026          */                                      1565          */
2027         if (!dl_se->dl_defer && dl_se->dl_thr !! 1566         if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
2028                 if (flags & ENQUEUE_WAKEUP)      1567                 if (flags & ENQUEUE_WAKEUP)
2029                         task_contending(dl_se !! 1568                         task_contending(&p->dl, flags);
2030                                                  1569 
2031                 return;                          1570                 return;
2032         }                                        1571         }
2033                                                  1572 
2034         /*                                    !! 1573         enqueue_dl_entity(&p->dl, pi_se, flags);
2035          * If this is a wakeup or a new insta << 
2036          * parameters of the task might need  << 
2037          * we want a replenishment of its run << 
2038          */                                   << 
2039         if (flags & ENQUEUE_WAKEUP) {         << 
2040                 task_contending(dl_se, flags) << 
2041                 update_dl_entity(dl_se);      << 
2042         } else if (flags & ENQUEUE_REPLENISH) << 
2043                 replenish_dl_entity(dl_se);   << 
2044         } else if ((flags & ENQUEUE_RESTORE)  << 
2045                    dl_time_before(dl_se->dead << 
2046                 setup_new_dl_entity(dl_se);   << 
2047         }                                     << 
2048                                               << 
2049         /*                                    << 
2050          * If the reservation is still thrott << 
2051          * deferred task and still got to wai << 
2052          */                                   << 
2053         if (dl_se->dl_throttled && start_dl_t << 
2054                 return;                       << 
2055                                                  1574 
2056         /*                                    !! 1575         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
2057          * We're about to enqueue, make sure  !! 1576                 enqueue_pushable_dl_task(rq, p);
2058          * In case the timer was not started, << 
2059          * has passed, mark as not throttled  << 
2060          * Also cancel earlier timers, since  << 
2061          */                                   << 
2062         if (dl_se->dl_throttled) {            << 
2063                 hrtimer_try_to_cancel(&dl_se- << 
2064                 dl_se->dl_defer_armed = 0;    << 
2065                 dl_se->dl_throttled = 0;      << 
2066         }                                     << 
2067                                               << 
2068         __enqueue_dl_entity(dl_se);           << 
2069 }                                                1577 }
2070                                                  1578 
2071 static void dequeue_dl_entity(struct sched_dl !! 1579 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2072 {                                                1580 {
2073         __dequeue_dl_entity(dl_se);           !! 1581         dequeue_dl_entity(&p->dl);
                                                   >> 1582         dequeue_pushable_dl_task(rq, p);
                                                   >> 1583 }
2074                                                  1584 
2075         if (flags & (DEQUEUE_SAVE|DEQUEUE_MIG !! 1585 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2076                 struct dl_rq *dl_rq = dl_rq_o !! 1586 {
                                                   >> 1587         update_curr_dl(rq);
                                                   >> 1588         __dequeue_task_dl(rq, p, flags);
2077                                                  1589 
2078                 sub_running_bw(dl_se, dl_rq); !! 1590         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
2079                 sub_rq_bw(dl_se, dl_rq);      !! 1591                 sub_running_bw(&p->dl, &rq->dl);
                                                   >> 1592                 sub_rq_bw(&p->dl, &rq->dl);
2080         }                                        1593         }
2081                                                  1594 
2082         /*                                       1595         /*
2083          * This check allows to start the ina    1596          * This check allows to start the inactive timer (or to immediately
2084          * decrease the active utilization, i    1597          * decrease the active utilization, if needed) in two cases:
2085          * when the task blocks and when it i    1598          * when the task blocks and when it is terminating
2086          * (p->state == TASK_DEAD). We can ha    1599          * (p->state == TASK_DEAD). We can handle the two cases in the same
2087          * way, because from GRUB's point of     1600          * way, because from GRUB's point of view the same thing is happening
2088          * (the task moves from "active conte    1601          * (the task moves from "active contending" to "active non contending"
2089          * or "inactive")                        1602          * or "inactive")
2090          */                                      1603          */
2091         if (flags & DEQUEUE_SLEEP)               1604         if (flags & DEQUEUE_SLEEP)
2092                 task_non_contending(dl_se);   !! 1605                 task_non_contending(p);
2093 }                                             << 
2094                                               << 
2095 static void enqueue_task_dl(struct rq *rq, st << 
2096 {                                             << 
2097         if (is_dl_boosted(&p->dl)) {          << 
2098                 /*                            << 
2099                  * Because of delays in the d << 
2100                  * thread's runtime, it might << 
2101                  * goes to sleep in a rt mute << 
2102                  * a consequence, the thread  << 
2103                  *                            << 
2104                  * While waiting for the mute << 
2105                  * boosted via PI, resulting  << 
2106                  * and boosted at the same ti << 
2107                  *                            << 
2108                  * In this case, the boost ov << 
2109                  */                           << 
2110                 if (p->dl.dl_throttled) {     << 
2111                         /*                    << 
2112                          * The replenish time << 
2113                          * problem if it fire << 
2114                          * are ignored in dl_ << 
2115                          *                    << 
2116                          * If the timer callb << 
2117                          * it will eventually << 
2118                          */                   << 
2119                         if (hrtimer_try_to_ca << 
2120                             !dl_server(&p->dl << 
2121                                 put_task_stru << 
2122                         p->dl.dl_throttled =  << 
2123                 }                             << 
2124         } else if (!dl_prio(p->normal_prio))  << 
2125                 /*                            << 
2126                  * Special case in which we h << 
2127                  * to be deboosted, but excee << 
2128                  * replenishing it, as it's g << 
2129                  * scheduling class after thi << 
2130                  * clear the flag, otherwise  << 
2131                  * being boosted again with n << 
2132                  * the throttle.              << 
2133                  */                           << 
2134                 p->dl.dl_throttled = 0;       << 
2135                 if (!(flags & ENQUEUE_REPLENI << 
2136                         printk_deferred_once( << 
2137                                               << 
2138                                               << 
2139                 return;                       << 
2140         }                                     << 
2141                                               << 
2142         check_schedstat_required();           << 
2143         update_stats_wait_start_dl(dl_rq_of_s << 
2144                                               << 
2145         if (p->on_rq == TASK_ON_RQ_MIGRATING) << 
2146                 flags |= ENQUEUE_MIGRATING;   << 
2147                                               << 
2148         enqueue_dl_entity(&p->dl, flags);     << 
2149                                               << 
2150         if (dl_server(&p->dl))                << 
2151                 return;                       << 
2152                                               << 
2153         if (!task_current(rq, p) && !p->dl.dl << 
2154                 enqueue_pushable_dl_task(rq,  << 
2155 }                                             << 
2156                                               << 
2157 static bool dequeue_task_dl(struct rq *rq, st << 
2158 {                                             << 
2159         update_curr_dl(rq);                   << 
2160                                               << 
2161         if (p->on_rq == TASK_ON_RQ_MIGRATING) << 
2162                 flags |= DEQUEUE_MIGRATING;   << 
2163                                               << 
2164         dequeue_dl_entity(&p->dl, flags);     << 
2165         if (!p->dl.dl_throttled && !dl_server << 
2166                 dequeue_pushable_dl_task(rq,  << 
2167                                               << 
2168         return true;                          << 
2169 }                                                1606 }
2170                                                  1607 
2171 /*                                               1608 /*
2172  * Yield task semantic for -deadline tasks is    1609  * Yield task semantic for -deadline tasks is:
2173  *                                               1610  *
2174  *   get off from the CPU until our next inst    1611  *   get off from the CPU until our next instance, with
2175  *   a new runtime. This is of little use now    1612  *   a new runtime. This is of little use now, since we
2176  *   don't have a bandwidth reclaiming mechan    1613  *   don't have a bandwidth reclaiming mechanism. Anyway,
2177  *   bandwidth reclaiming is planned for the     1614  *   bandwidth reclaiming is planned for the future, and
2178  *   yield_task_dl will indicate that some sp    1615  *   yield_task_dl will indicate that some spare budget
2179  *   is available for other task instances to    1616  *   is available for other task instances to use it.
2180  */                                              1617  */
2181 static void yield_task_dl(struct rq *rq)         1618 static void yield_task_dl(struct rq *rq)
2182 {                                                1619 {
2183         /*                                       1620         /*
2184          * We make the task go to sleep until    1621          * We make the task go to sleep until its current deadline by
2185          * forcing its runtime to zero. This     1622          * forcing its runtime to zero. This way, update_curr_dl() stops
2186          * it and the bandwidth timer will wa    1623          * it and the bandwidth timer will wake it up and will give it
2187          * new scheduling parameters (thanks     1624          * new scheduling parameters (thanks to dl_yielded=1).
2188          */                                      1625          */
2189         rq->curr->dl.dl_yielded = 1;             1626         rq->curr->dl.dl_yielded = 1;
2190                                                  1627 
2191         update_rq_clock(rq);                     1628         update_rq_clock(rq);
2192         update_curr_dl(rq);                      1629         update_curr_dl(rq);
2193         /*                                       1630         /*
2194          * Tell update_rq_clock() that we've     1631          * Tell update_rq_clock() that we've just updated,
2195          * so we don't do microscopic update     1632          * so we don't do microscopic update in schedule()
2196          * and double the fastpath cost.         1633          * and double the fastpath cost.
2197          */                                      1634          */
2198         rq_clock_skip_update(rq);                1635         rq_clock_skip_update(rq);
2199 }                                                1636 }
2200                                                  1637 
2201 #ifdef CONFIG_SMP                                1638 #ifdef CONFIG_SMP
2202                                                  1639 
2203 static inline bool dl_task_is_earliest_deadli << 
2204                                               << 
2205 {                                             << 
2206         return (!rq->dl.dl_nr_running ||      << 
2207                 dl_time_before(p->dl.deadline << 
2208                                rq->dl.earlies << 
2209 }                                             << 
2210                                               << 
2211 static int find_later_rq(struct task_struct *    1640 static int find_later_rq(struct task_struct *task);
2212                                                  1641 
2213 static int                                       1642 static int
2214 select_task_rq_dl(struct task_struct *p, int  !! 1643 select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
2215 {                                                1644 {
2216         struct task_struct *curr;                1645         struct task_struct *curr;
2217         bool select_rq;                          1646         bool select_rq;
2218         struct rq *rq;                           1647         struct rq *rq;
2219                                                  1648 
2220         if (!(flags & WF_TTWU))               !! 1649         if (sd_flag != SD_BALANCE_WAKE)
2221                 goto out;                        1650                 goto out;
2222                                                  1651 
2223         rq = cpu_rq(cpu);                        1652         rq = cpu_rq(cpu);
2224                                                  1653 
2225         rcu_read_lock();                         1654         rcu_read_lock();
2226         curr = READ_ONCE(rq->curr); /* unlock    1655         curr = READ_ONCE(rq->curr); /* unlocked access */
2227                                                  1656 
2228         /*                                       1657         /*
2229          * If we are dealing with a -deadline    1658          * If we are dealing with a -deadline task, we must
2230          * decide where to wake it up.           1659          * decide where to wake it up.
2231          * If it has a later deadline and the    1660          * If it has a later deadline and the current task
2232          * on this rq can't move (provided th    1661          * on this rq can't move (provided the waking task
2233          * can!) we prefer to send it somewhe    1662          * can!) we prefer to send it somewhere else. On the
2234          * other hand, if it has a shorter de    1663          * other hand, if it has a shorter deadline, we
2235          * try to make it stay here, it might    1664          * try to make it stay here, it might be important.
2236          */                                      1665          */
2237         select_rq = unlikely(dl_task(curr)) &    1666         select_rq = unlikely(dl_task(curr)) &&
2238                     (curr->nr_cpus_allowed <     1667                     (curr->nr_cpus_allowed < 2 ||
2239                      !dl_entity_preempt(&p->d    1668                      !dl_entity_preempt(&p->dl, &curr->dl)) &&
2240                     p->nr_cpus_allowed > 1;      1669                     p->nr_cpus_allowed > 1;
2241                                                  1670 
2242         /*                                       1671         /*
2243          * Take the capacity of the CPU into     1672          * Take the capacity of the CPU into account to
2244          * ensure it fits the requirement of     1673          * ensure it fits the requirement of the task.
2245          */                                      1674          */
2246         if (sched_asym_cpucap_active())       !! 1675         if (static_branch_unlikely(&sched_asym_cpucapacity))
2247                 select_rq |= !dl_task_fits_ca    1676                 select_rq |= !dl_task_fits_capacity(p, cpu);
2248                                                  1677 
2249         if (select_rq) {                         1678         if (select_rq) {
2250                 int target = find_later_rq(p)    1679                 int target = find_later_rq(p);
2251                                                  1680 
2252                 if (target != -1 &&              1681                 if (target != -1 &&
2253                     dl_task_is_earliest_deadl !! 1682                                 (dl_time_before(p->dl.deadline,
                                                   >> 1683                                         cpu_rq(target)->dl.earliest_dl.curr) ||
                                                   >> 1684                                 (cpu_rq(target)->dl.dl_nr_running == 0)))
2254                         cpu = target;            1685                         cpu = target;
2255         }                                        1686         }
2256         rcu_read_unlock();                       1687         rcu_read_unlock();
2257                                                  1688 
2258 out:                                             1689 out:
2259         return cpu;                              1690         return cpu;
2260 }                                                1691 }
2261                                                  1692 
2262 static void migrate_task_rq_dl(struct task_st    1693 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
2263 {                                                1694 {
2264         struct rq_flags rf;                   << 
2265         struct rq *rq;                           1695         struct rq *rq;
2266                                                  1696 
2267         if (READ_ONCE(p->__state) != TASK_WAK !! 1697         if (p->state != TASK_WAKING)
2268                 return;                          1698                 return;
2269                                                  1699 
2270         rq = task_rq(p);                         1700         rq = task_rq(p);
2271         /*                                       1701         /*
2272          * Since p->state == TASK_WAKING, set    1702          * Since p->state == TASK_WAKING, set_task_cpu() has been called
2273          * from try_to_wake_up(). Hence, p->p    1703          * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2274          * rq->lock is not... So, lock it        1704          * rq->lock is not... So, lock it
2275          */                                      1705          */
2276         rq_lock(rq, &rf);                     !! 1706         raw_spin_lock(&rq->lock);
2277         if (p->dl.dl_non_contending) {           1707         if (p->dl.dl_non_contending) {
2278                 update_rq_clock(rq);          << 
2279                 sub_running_bw(&p->dl, &rq->d    1708                 sub_running_bw(&p->dl, &rq->dl);
2280                 p->dl.dl_non_contending = 0;     1709                 p->dl.dl_non_contending = 0;
2281                 /*                               1710                 /*
2282                  * If the timer handler is cu    1711                  * If the timer handler is currently running and the
2283                  * timer cannot be canceled,  !! 1712                  * timer cannot be cancelled, inactive_task_timer()
2284                  * will see that dl_not_conte    1713                  * will see that dl_not_contending is not set, and
2285                  * will not touch the rq's ac    1714                  * will not touch the rq's active utilization,
2286                  * so we are still safe.         1715                  * so we are still safe.
2287                  */                              1716                  */
2288                 if (hrtimer_try_to_cancel(&p-    1717                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2289                         put_task_struct(p);      1718                         put_task_struct(p);
2290         }                                        1719         }
2291         sub_rq_bw(&p->dl, &rq->dl);              1720         sub_rq_bw(&p->dl, &rq->dl);
2292         rq_unlock(rq, &rf);                   !! 1721         raw_spin_unlock(&rq->lock);
2293 }                                                1722 }
2294                                                  1723 
2295 static void check_preempt_equal_dl(struct rq     1724 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2296 {                                                1725 {
2297         /*                                       1726         /*
2298          * Current can't be migrated, useless    1727          * Current can't be migrated, useless to reschedule,
2299          * let's hope p can move out.            1728          * let's hope p can move out.
2300          */                                      1729          */
2301         if (rq->curr->nr_cpus_allowed == 1 ||    1730         if (rq->curr->nr_cpus_allowed == 1 ||
2302             !cpudl_find(&rq->rd->cpudl, rq->c    1731             !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
2303                 return;                          1732                 return;
2304                                                  1733 
2305         /*                                       1734         /*
2306          * p is migratable, so let's not sche    1735          * p is migratable, so let's not schedule it and
2307          * see if it is pushed or pulled some    1736          * see if it is pushed or pulled somewhere else.
2308          */                                      1737          */
2309         if (p->nr_cpus_allowed != 1 &&           1738         if (p->nr_cpus_allowed != 1 &&
2310             cpudl_find(&rq->rd->cpudl, p, NUL    1739             cpudl_find(&rq->rd->cpudl, p, NULL))
2311                 return;                          1740                 return;
2312                                                  1741 
2313         resched_curr(rq);                        1742         resched_curr(rq);
2314 }                                                1743 }
2315                                                  1744 
2316 static int balance_dl(struct rq *rq, struct t    1745 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2317 {                                                1746 {
2318         if (!on_dl_rq(&p->dl) && need_pull_dl    1747         if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2319                 /*                               1748                 /*
2320                  * This is OK, because curren    1749                  * This is OK, because current is on_cpu, which avoids it being
2321                  * picked for load-balance an    1750                  * picked for load-balance and preemption/IRQs are still
2322                  * disabled avoiding further     1751                  * disabled avoiding further scheduler activity on it and we've
2323                  * not yet started the pickin    1752                  * not yet started the picking loop.
2324                  */                              1753                  */
2325                 rq_unpin_lock(rq, rf);           1754                 rq_unpin_lock(rq, rf);
2326                 pull_dl_task(rq);                1755                 pull_dl_task(rq);
2327                 rq_repin_lock(rq, rf);           1756                 rq_repin_lock(rq, rf);
2328         }                                        1757         }
2329                                                  1758 
2330         return sched_stop_runnable(rq) || sch    1759         return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2331 }                                                1760 }
2332 #endif /* CONFIG_SMP */                          1761 #endif /* CONFIG_SMP */
2333                                                  1762 
2334 /*                                               1763 /*
2335  * Only called when both the current and waki    1764  * Only called when both the current and waking task are -deadline
2336  * tasks.                                        1765  * tasks.
2337  */                                              1766  */
2338 static void wakeup_preempt_dl(struct rq *rq,  !! 1767 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
2339                                   int flags)     1768                                   int flags)
2340 {                                                1769 {
2341         if (dl_entity_preempt(&p->dl, &rq->cu    1770         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2342                 resched_curr(rq);                1771                 resched_curr(rq);
2343                 return;                          1772                 return;
2344         }                                        1773         }
2345                                                  1774 
2346 #ifdef CONFIG_SMP                                1775 #ifdef CONFIG_SMP
2347         /*                                       1776         /*
2348          * In the unlikely case current and p    1777          * In the unlikely case current and p have the same deadline
2349          * let us try to decide what's the be    1778          * let us try to decide what's the best thing to do...
2350          */                                      1779          */
2351         if ((p->dl.deadline == rq->curr->dl.d    1780         if ((p->dl.deadline == rq->curr->dl.deadline) &&
2352             !test_tsk_need_resched(rq->curr))    1781             !test_tsk_need_resched(rq->curr))
2353                 check_preempt_equal_dl(rq, p)    1782                 check_preempt_equal_dl(rq, p);
2354 #endif /* CONFIG_SMP */                          1783 #endif /* CONFIG_SMP */
2355 }                                                1784 }
2356                                                  1785 
2357 #ifdef CONFIG_SCHED_HRTICK                       1786 #ifdef CONFIG_SCHED_HRTICK
2358 static void start_hrtick_dl(struct rq *rq, st !! 1787 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2359 {                                                1788 {
2360         hrtick_start(rq, dl_se->runtime);     !! 1789         hrtick_start(rq, p->dl.runtime);
2361 }                                                1790 }
2362 #else /* !CONFIG_SCHED_HRTICK */                 1791 #else /* !CONFIG_SCHED_HRTICK */
2363 static void start_hrtick_dl(struct rq *rq, st !! 1792 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2364 {                                                1793 {
2365 }                                                1794 }
2366 #endif                                           1795 #endif
2367                                                  1796 
2368 static void set_next_task_dl(struct rq *rq, s    1797 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2369 {                                                1798 {
2370         struct sched_dl_entity *dl_se = &p->d << 
2371         struct dl_rq *dl_rq = &rq->dl;        << 
2372                                               << 
2373         p->se.exec_start = rq_clock_task(rq);    1799         p->se.exec_start = rq_clock_task(rq);
2374         if (on_dl_rq(&p->dl))                 << 
2375                 update_stats_wait_end_dl(dl_r << 
2376                                                  1800 
2377         /* You can't push away the running ta    1801         /* You can't push away the running task */
2378         dequeue_pushable_dl_task(rq, p);         1802         dequeue_pushable_dl_task(rq, p);
2379                                                  1803 
2380         if (!first)                              1804         if (!first)
2381                 return;                          1805                 return;
2382                                                  1806 
                                                   >> 1807         if (hrtick_enabled(rq))
                                                   >> 1808                 start_hrtick_dl(rq, p);
                                                   >> 1809 
2383         if (rq->curr->sched_class != &dl_sche    1810         if (rq->curr->sched_class != &dl_sched_class)
2384                 update_dl_rq_load_avg(rq_cloc    1811                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2385                                                  1812 
2386         deadline_queue_push_tasks(rq);           1813         deadline_queue_push_tasks(rq);
2387                                               << 
2388         if (hrtick_enabled_dl(rq))            << 
2389                 start_hrtick_dl(rq, &p->dl);  << 
2390 }                                                1814 }
2391                                                  1815 
2392 static struct sched_dl_entity *pick_next_dl_e !! 1816 static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
                                                   >> 1817                                                    struct dl_rq *dl_rq)
2393 {                                                1818 {
2394         struct rb_node *left = rb_first_cache    1819         struct rb_node *left = rb_first_cached(&dl_rq->root);
2395                                                  1820 
2396         if (!left)                               1821         if (!left)
2397                 return NULL;                     1822                 return NULL;
2398                                                  1823 
2399         return __node_2_dle(left);            !! 1824         return rb_entry(left, struct sched_dl_entity, rb_node);
2400 }                                                1825 }
2401                                                  1826 
2402 /*                                            !! 1827 static struct task_struct *pick_next_task_dl(struct rq *rq)
2403  * __pick_next_task_dl - Helper to pick the n << 
2404  * @rq: The runqueue to pick the next task fr << 
2405  */                                           << 
2406 static struct task_struct *__pick_task_dl(str << 
2407 {                                                1828 {
2408         struct sched_dl_entity *dl_se;           1829         struct sched_dl_entity *dl_se;
2409         struct dl_rq *dl_rq = &rq->dl;           1830         struct dl_rq *dl_rq = &rq->dl;
2410         struct task_struct *p;                   1831         struct task_struct *p;
2411                                                  1832 
2412 again:                                        << 
2413         if (!sched_dl_runnable(rq))              1833         if (!sched_dl_runnable(rq))
2414                 return NULL;                     1834                 return NULL;
2415                                                  1835 
2416         dl_se = pick_next_dl_entity(dl_rq);   !! 1836         dl_se = pick_next_dl_entity(rq, dl_rq);
2417         WARN_ON_ONCE(!dl_se);                 !! 1837         BUG_ON(!dl_se);
2418                                               !! 1838         p = dl_task_of(dl_se);
2419         if (dl_server(dl_se)) {               !! 1839         set_next_task_dl(rq, p, true);
2420                 p = dl_se->server_pick_task(d << 
2421                 if (!p) {                     << 
2422                         dl_se->dl_yielded = 1 << 
2423                         update_curr_dl_se(rq, << 
2424                         goto again;           << 
2425                 }                             << 
2426                 rq->dl_server = dl_se;        << 
2427         } else {                              << 
2428                 p = dl_task_of(dl_se);        << 
2429         }                                     << 
2430                                               << 
2431         return p;                                1840         return p;
2432 }                                                1841 }
2433                                                  1842 
2434 static struct task_struct *pick_task_dl(struc !! 1843 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2435 {                                                1844 {
2436         return __pick_task_dl(rq);            << 
2437 }                                             << 
2438                                               << 
2439 static void put_prev_task_dl(struct rq *rq, s << 
2440 {                                             << 
2441         struct sched_dl_entity *dl_se = &p->d << 
2442         struct dl_rq *dl_rq = &rq->dl;        << 
2443                                               << 
2444         if (on_dl_rq(&p->dl))                 << 
2445                 update_stats_wait_start_dl(dl << 
2446                                               << 
2447         update_curr_dl(rq);                      1845         update_curr_dl(rq);
2448                                                  1846 
2449         update_dl_rq_load_avg(rq_clock_pelt(r    1847         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2450         if (on_dl_rq(&p->dl) && p->nr_cpus_al    1848         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2451                 enqueue_pushable_dl_task(rq,     1849                 enqueue_pushable_dl_task(rq, p);
2452 }                                                1850 }
2453                                                  1851 
2454 /*                                               1852 /*
2455  * scheduler tick hitting a task of our sched    1853  * scheduler tick hitting a task of our scheduling class.
2456  *                                               1854  *
2457  * NOTE: This function can be called remotely    1855  * NOTE: This function can be called remotely by the tick offload that
2458  * goes along full dynticks. Therefore no loc    1856  * goes along full dynticks. Therefore no local assumption can be made
2459  * and everything must be accessed through th    1857  * and everything must be accessed through the @rq and @curr passed in
2460  * parameters.                                   1858  * parameters.
2461  */                                              1859  */
2462 static void task_tick_dl(struct rq *rq, struc    1860 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2463 {                                                1861 {
2464         update_curr_dl(rq);                      1862         update_curr_dl(rq);
2465                                                  1863 
2466         update_dl_rq_load_avg(rq_clock_pelt(r    1864         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2467         /*                                       1865         /*
2468          * Even when we have runtime, update_    1866          * Even when we have runtime, update_curr_dl() might have resulted in us
2469          * not being the leftmost task anymor    1867          * not being the leftmost task anymore. In that case NEED_RESCHED will
2470          * be set and schedule() will start a    1868          * be set and schedule() will start a new hrtick for the next task.
2471          */                                      1869          */
2472         if (hrtick_enabled_dl(rq) && queued & !! 1870         if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
2473             is_leftmost(&p->dl, &rq->dl))     !! 1871             is_leftmost(p, &rq->dl))
2474                 start_hrtick_dl(rq, &p->dl);  !! 1872                 start_hrtick_dl(rq, p);
2475 }                                                1873 }
2476                                                  1874 
2477 static void task_fork_dl(struct task_struct *    1875 static void task_fork_dl(struct task_struct *p)
2478 {                                                1876 {
2479         /*                                       1877         /*
2480          * SCHED_DEADLINE tasks cannot fork a    1878          * SCHED_DEADLINE tasks cannot fork and this is achieved through
2481          * sched_fork()                          1879          * sched_fork()
2482          */                                      1880          */
2483 }                                                1881 }
2484                                                  1882 
2485 #ifdef CONFIG_SMP                                1883 #ifdef CONFIG_SMP
2486                                                  1884 
2487 /* Only try algorithms three times */            1885 /* Only try algorithms three times */
2488 #define DL_MAX_TRIES 3                           1886 #define DL_MAX_TRIES 3
2489                                                  1887 
2490 static int pick_dl_task(struct rq *rq, struct    1888 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2491 {                                                1889 {
2492         if (!task_on_cpu(rq, p) &&            !! 1890         if (!task_running(rq, p) &&
2493             cpumask_test_cpu(cpu, &p->cpus_ma !! 1891             cpumask_test_cpu(cpu, p->cpus_ptr))
2494                 return 1;                        1892                 return 1;
2495         return 0;                                1893         return 0;
2496 }                                                1894 }
2497                                                  1895 
2498 /*                                               1896 /*
2499  * Return the earliest pushable rq's task, wh    1897  * Return the earliest pushable rq's task, which is suitable to be executed
2500  * on the CPU, NULL otherwise:                   1898  * on the CPU, NULL otherwise:
2501  */                                              1899  */
2502 static struct task_struct *pick_earliest_push    1900 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2503 {                                                1901 {
                                                   >> 1902         struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
2504         struct task_struct *p = NULL;            1903         struct task_struct *p = NULL;
2505         struct rb_node *next_node;            << 
2506                                                  1904 
2507         if (!has_pushable_dl_tasks(rq))          1905         if (!has_pushable_dl_tasks(rq))
2508                 return NULL;                     1906                 return NULL;
2509                                                  1907 
2510         next_node = rb_first_cached(&rq->dl.p << 
2511                                               << 
2512 next_node:                                       1908 next_node:
2513         if (next_node) {                         1909         if (next_node) {
2514                 p = __node_2_pdl(next_node);  !! 1910                 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
2515                                                  1911 
2516                 if (pick_dl_task(rq, p, cpu))    1912                 if (pick_dl_task(rq, p, cpu))
2517                         return p;                1913                         return p;
2518                                                  1914 
2519                 next_node = rb_next(next_node    1915                 next_node = rb_next(next_node);
2520                 goto next_node;                  1916                 goto next_node;
2521         }                                        1917         }
2522                                                  1918 
2523         return NULL;                             1919         return NULL;
2524 }                                                1920 }
2525                                                  1921 
2526 static DEFINE_PER_CPU(cpumask_var_t, local_cp    1922 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2527                                                  1923 
2528 static int find_later_rq(struct task_struct *    1924 static int find_later_rq(struct task_struct *task)
2529 {                                                1925 {
2530         struct sched_domain *sd;                 1926         struct sched_domain *sd;
2531         struct cpumask *later_mask = this_cpu    1927         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2532         int this_cpu = smp_processor_id();       1928         int this_cpu = smp_processor_id();
2533         int cpu = task_cpu(task);                1929         int cpu = task_cpu(task);
2534                                                  1930 
2535         /* Make sure the mask is initialized     1931         /* Make sure the mask is initialized first */
2536         if (unlikely(!later_mask))               1932         if (unlikely(!later_mask))
2537                 return -1;                       1933                 return -1;
2538                                                  1934 
2539         if (task->nr_cpus_allowed == 1)          1935         if (task->nr_cpus_allowed == 1)
2540                 return -1;                       1936                 return -1;
2541                                                  1937 
2542         /*                                       1938         /*
2543          * We have to consider system topolog    1939          * We have to consider system topology and task affinity
2544          * first, then we can look for a suit    1940          * first, then we can look for a suitable CPU.
2545          */                                      1941          */
2546         if (!cpudl_find(&task_rq(task)->rd->c    1942         if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2547                 return -1;                       1943                 return -1;
2548                                                  1944 
2549         /*                                       1945         /*
2550          * If we are here, some targets have     1946          * If we are here, some targets have been found, including
2551          * the most suitable which is, among     1947          * the most suitable which is, among the runqueues where the
2552          * current tasks have later deadlines    1948          * current tasks have later deadlines than the task's one, the
2553          * rq with the latest possible one.      1949          * rq with the latest possible one.
2554          *                                       1950          *
2555          * Now we check how well this matches    1951          * Now we check how well this matches with task's
2556          * affinity and system topology.         1952          * affinity and system topology.
2557          *                                       1953          *
2558          * The last CPU where the task run is    1954          * The last CPU where the task run is our first
2559          * guess, since it is most likely cac    1955          * guess, since it is most likely cache-hot there.
2560          */                                      1956          */
2561         if (cpumask_test_cpu(cpu, later_mask)    1957         if (cpumask_test_cpu(cpu, later_mask))
2562                 return cpu;                      1958                 return cpu;
2563         /*                                       1959         /*
2564          * Check if this_cpu is to be skipped    1960          * Check if this_cpu is to be skipped (i.e., it is
2565          * not in the mask) or not.              1961          * not in the mask) or not.
2566          */                                      1962          */
2567         if (!cpumask_test_cpu(this_cpu, later    1963         if (!cpumask_test_cpu(this_cpu, later_mask))
2568                 this_cpu = -1;                   1964                 this_cpu = -1;
2569                                                  1965 
2570         rcu_read_lock();                         1966         rcu_read_lock();
2571         for_each_domain(cpu, sd) {               1967         for_each_domain(cpu, sd) {
2572                 if (sd->flags & SD_WAKE_AFFIN    1968                 if (sd->flags & SD_WAKE_AFFINE) {
2573                         int best_cpu;            1969                         int best_cpu;
2574                                                  1970 
2575                         /*                       1971                         /*
2576                          * If possible, preem    1972                          * If possible, preempting this_cpu is
2577                          * cheaper than migra    1973                          * cheaper than migrating.
2578                          */                      1974                          */
2579                         if (this_cpu != -1 &&    1975                         if (this_cpu != -1 &&
2580                             cpumask_test_cpu(    1976                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2581                                 rcu_read_unlo    1977                                 rcu_read_unlock();
2582                                 return this_c    1978                                 return this_cpu;
2583                         }                        1979                         }
2584                                                  1980 
2585                         best_cpu = cpumask_an !! 1981                         best_cpu = cpumask_first_and(later_mask,
2586                                               !! 1982                                                         sched_domain_span(sd));
2587                         /*                       1983                         /*
2588                          * Last chance: if a     1984                          * Last chance: if a CPU being in both later_mask
2589                          * and current sd spa    1985                          * and current sd span is valid, that becomes our
2590                          * choice. Of course,    1986                          * choice. Of course, the latest possible CPU is
2591                          * already under cons    1987                          * already under consideration through later_mask.
2592                          */                      1988                          */
2593                         if (best_cpu < nr_cpu    1989                         if (best_cpu < nr_cpu_ids) {
2594                                 rcu_read_unlo    1990                                 rcu_read_unlock();
2595                                 return best_c    1991                                 return best_cpu;
2596                         }                        1992                         }
2597                 }                                1993                 }
2598         }                                        1994         }
2599         rcu_read_unlock();                       1995         rcu_read_unlock();
2600                                                  1996 
2601         /*                                       1997         /*
2602          * At this point, all our guesses fai    1998          * At this point, all our guesses failed, we just return
2603          * 'something', and let the caller so    1999          * 'something', and let the caller sort the things out.
2604          */                                      2000          */
2605         if (this_cpu != -1)                      2001         if (this_cpu != -1)
2606                 return this_cpu;                 2002                 return this_cpu;
2607                                                  2003 
2608         cpu = cpumask_any_distribute(later_ma !! 2004         cpu = cpumask_any(later_mask);
2609         if (cpu < nr_cpu_ids)                    2005         if (cpu < nr_cpu_ids)
2610                 return cpu;                      2006                 return cpu;
2611                                                  2007 
2612         return -1;                               2008         return -1;
2613 }                                                2009 }
2614                                                  2010 
2615 /* Locks the rq it finds */                      2011 /* Locks the rq it finds */
2616 static struct rq *find_lock_later_rq(struct t    2012 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2617 {                                                2013 {
2618         struct rq *later_rq = NULL;              2014         struct rq *later_rq = NULL;
2619         int tries;                               2015         int tries;
2620         int cpu;                                 2016         int cpu;
2621                                                  2017 
2622         for (tries = 0; tries < DL_MAX_TRIES;    2018         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2623                 cpu = find_later_rq(task);       2019                 cpu = find_later_rq(task);
2624                                                  2020 
2625                 if ((cpu == -1) || (cpu == rq    2021                 if ((cpu == -1) || (cpu == rq->cpu))
2626                         break;                   2022                         break;
2627                                                  2023 
2628                 later_rq = cpu_rq(cpu);          2024                 later_rq = cpu_rq(cpu);
2629                                                  2025 
2630                 if (!dl_task_is_earliest_dead !! 2026                 if (later_rq->dl.dl_nr_running &&
                                                   >> 2027                     !dl_time_before(task->dl.deadline,
                                                   >> 2028                                         later_rq->dl.earliest_dl.curr)) {
2631                         /*                       2029                         /*
2632                          * Target rq has task    2030                          * Target rq has tasks of equal or earlier deadline,
2633                          * retrying does not     2031                          * retrying does not release any lock and is unlikely
2634                          * to yield a differe    2032                          * to yield a different result.
2635                          */                      2033                          */
2636                         later_rq = NULL;         2034                         later_rq = NULL;
2637                         break;                   2035                         break;
2638                 }                                2036                 }
2639                                                  2037 
2640                 /* Retry if something changed    2038                 /* Retry if something changed. */
2641                 if (double_lock_balance(rq, l    2039                 if (double_lock_balance(rq, later_rq)) {
2642                         if (unlikely(task_rq(    2040                         if (unlikely(task_rq(task) != rq ||
2643                                      !cpumask !! 2041                                      !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
2644                                      task_on_ !! 2042                                      task_running(rq, task) ||
2645                                      !dl_task    2043                                      !dl_task(task) ||
2646                                      is_migra << 
2647                                      !task_on    2044                                      !task_on_rq_queued(task))) {
2648                                 double_unlock    2045                                 double_unlock_balance(rq, later_rq);
2649                                 later_rq = NU    2046                                 later_rq = NULL;
2650                                 break;           2047                                 break;
2651                         }                        2048                         }
2652                 }                                2049                 }
2653                                                  2050 
2654                 /*                               2051                 /*
2655                  * If the rq we found has no     2052                  * If the rq we found has no -deadline task, or
2656                  * its earliest one has a lat    2053                  * its earliest one has a later deadline than our
2657                  * task, the rq is a good one    2054                  * task, the rq is a good one.
2658                  */                              2055                  */
2659                 if (dl_task_is_earliest_deadl !! 2056                 if (!later_rq->dl.dl_nr_running ||
                                                   >> 2057                     dl_time_before(task->dl.deadline,
                                                   >> 2058                                    later_rq->dl.earliest_dl.curr))
2660                         break;                   2059                         break;
2661                                                  2060 
2662                 /* Otherwise we try again. */    2061                 /* Otherwise we try again. */
2663                 double_unlock_balance(rq, lat    2062                 double_unlock_balance(rq, later_rq);
2664                 later_rq = NULL;                 2063                 later_rq = NULL;
2665         }                                        2064         }
2666                                                  2065 
2667         return later_rq;                         2066         return later_rq;
2668 }                                                2067 }
2669                                                  2068 
2670 static struct task_struct *pick_next_pushable    2069 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2671 {                                                2070 {
2672         struct task_struct *p;                   2071         struct task_struct *p;
2673                                                  2072 
2674         if (!has_pushable_dl_tasks(rq))          2073         if (!has_pushable_dl_tasks(rq))
2675                 return NULL;                     2074                 return NULL;
2676                                                  2075 
2677         p = __node_2_pdl(rb_first_cached(&rq- !! 2076         p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
                                                   >> 2077                      struct task_struct, pushable_dl_tasks);
2678                                                  2078 
2679         WARN_ON_ONCE(rq->cpu != task_cpu(p)); !! 2079         BUG_ON(rq->cpu != task_cpu(p));
2680         WARN_ON_ONCE(task_current(rq, p));    !! 2080         BUG_ON(task_current(rq, p));
2681         WARN_ON_ONCE(p->nr_cpus_allowed <= 1) !! 2081         BUG_ON(p->nr_cpus_allowed <= 1);
2682                                                  2082 
2683         WARN_ON_ONCE(!task_on_rq_queued(p));  !! 2083         BUG_ON(!task_on_rq_queued(p));
2684         WARN_ON_ONCE(!dl_task(p));            !! 2084         BUG_ON(!dl_task(p));
2685                                                  2085 
2686         return p;                                2086         return p;
2687 }                                                2087 }
2688                                                  2088 
2689 /*                                               2089 /*
2690  * See if the non running -deadline tasks on     2090  * See if the non running -deadline tasks on this rq
2691  * can be sent to some other CPU where they c    2091  * can be sent to some other CPU where they can preempt
2692  * and start executing.                          2092  * and start executing.
2693  */                                              2093  */
2694 static int push_dl_task(struct rq *rq)           2094 static int push_dl_task(struct rq *rq)
2695 {                                                2095 {
2696         struct task_struct *next_task;           2096         struct task_struct *next_task;
2697         struct rq *later_rq;                     2097         struct rq *later_rq;
2698         int ret = 0;                             2098         int ret = 0;
2699                                                  2099 
                                                   >> 2100         if (!rq->dl.overloaded)
                                                   >> 2101                 return 0;
                                                   >> 2102 
2700         next_task = pick_next_pushable_dl_tas    2103         next_task = pick_next_pushable_dl_task(rq);
2701         if (!next_task)                          2104         if (!next_task)
2702                 return 0;                        2105                 return 0;
2703                                                  2106 
2704 retry:                                           2107 retry:
                                                   >> 2108         if (WARN_ON(next_task == rq->curr))
                                                   >> 2109                 return 0;
                                                   >> 2110 
2705         /*                                       2111         /*
2706          * If next_task preempts rq->curr, an    2112          * If next_task preempts rq->curr, and rq->curr
2707          * can move away, it makes sense to j    2113          * can move away, it makes sense to just reschedule
2708          * without going further in pushing n    2114          * without going further in pushing next_task.
2709          */                                      2115          */
2710         if (dl_task(rq->curr) &&                 2116         if (dl_task(rq->curr) &&
2711             dl_time_before(next_task->dl.dead    2117             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2712             rq->curr->nr_cpus_allowed > 1) {     2118             rq->curr->nr_cpus_allowed > 1) {
2713                 resched_curr(rq);                2119                 resched_curr(rq);
2714                 return 0;                        2120                 return 0;
2715         }                                        2121         }
2716                                                  2122 
2717         if (is_migration_disabled(next_task)) << 
2718                 return 0;                     << 
2719                                               << 
2720         if (WARN_ON(next_task == rq->curr))   << 
2721                 return 0;                     << 
2722                                               << 
2723         /* We might release rq lock */           2123         /* We might release rq lock */
2724         get_task_struct(next_task);              2124         get_task_struct(next_task);
2725                                                  2125 
2726         /* Will lock the rq it'll find */        2126         /* Will lock the rq it'll find */
2727         later_rq = find_lock_later_rq(next_ta    2127         later_rq = find_lock_later_rq(next_task, rq);
2728         if (!later_rq) {                         2128         if (!later_rq) {
2729                 struct task_struct *task;        2129                 struct task_struct *task;
2730                                                  2130 
2731                 /*                               2131                 /*
2732                  * We must check all this aga    2132                  * We must check all this again, since
2733                  * find_lock_later_rq release    2133                  * find_lock_later_rq releases rq->lock and it is
2734                  * then possible that next_ta    2134                  * then possible that next_task has migrated.
2735                  */                              2135                  */
2736                 task = pick_next_pushable_dl_    2136                 task = pick_next_pushable_dl_task(rq);
2737                 if (task == next_task) {         2137                 if (task == next_task) {
2738                         /*                       2138                         /*
2739                          * The task is still     2139                          * The task is still there. We don't try
2740                          * again, some other     2140                          * again, some other CPU will pull it when ready.
2741                          */                      2141                          */
2742                         goto out;                2142                         goto out;
2743                 }                                2143                 }
2744                                                  2144 
2745                 if (!task)                       2145                 if (!task)
2746                         /* No more tasks */      2146                         /* No more tasks */
2747                         goto out;                2147                         goto out;
2748                                                  2148 
2749                 put_task_struct(next_task);      2149                 put_task_struct(next_task);
2750                 next_task = task;                2150                 next_task = task;
2751                 goto retry;                      2151                 goto retry;
2752         }                                        2152         }
2753                                                  2153 
2754         deactivate_task(rq, next_task, 0);       2154         deactivate_task(rq, next_task, 0);
2755         set_task_cpu(next_task, later_rq->cpu    2155         set_task_cpu(next_task, later_rq->cpu);
2756         activate_task(later_rq, next_task, 0) !! 2156 
                                                   >> 2157         /*
                                                   >> 2158          * Update the later_rq clock here, because the clock is used
                                                   >> 2159          * by the cpufreq_update_util() inside __add_running_bw().
                                                   >> 2160          */
                                                   >> 2161         update_rq_clock(later_rq);
                                                   >> 2162         activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
2757         ret = 1;                                 2163         ret = 1;
2758                                                  2164 
2759         resched_curr(later_rq);                  2165         resched_curr(later_rq);
2760                                                  2166 
2761         double_unlock_balance(rq, later_rq);     2167         double_unlock_balance(rq, later_rq);
2762                                                  2168 
2763 out:                                             2169 out:
2764         put_task_struct(next_task);              2170         put_task_struct(next_task);
2765                                                  2171 
2766         return ret;                              2172         return ret;
2767 }                                                2173 }
2768                                                  2174 
2769 static void push_dl_tasks(struct rq *rq)         2175 static void push_dl_tasks(struct rq *rq)
2770 {                                                2176 {
2771         /* push_dl_task() will return true if    2177         /* push_dl_task() will return true if it moved a -deadline task */
2772         while (push_dl_task(rq))                 2178         while (push_dl_task(rq))
2773                 ;                                2179                 ;
2774 }                                                2180 }
2775                                                  2181 
2776 static void pull_dl_task(struct rq *this_rq)     2182 static void pull_dl_task(struct rq *this_rq)
2777 {                                                2183 {
2778         int this_cpu = this_rq->cpu, cpu;        2184         int this_cpu = this_rq->cpu, cpu;
2779         struct task_struct *p, *push_task;    !! 2185         struct task_struct *p;
2780         bool resched = false;                    2186         bool resched = false;
2781         struct rq *src_rq;                       2187         struct rq *src_rq;
2782         u64 dmin = LONG_MAX;                     2188         u64 dmin = LONG_MAX;
2783                                                  2189 
2784         if (likely(!dl_overloaded(this_rq)))     2190         if (likely(!dl_overloaded(this_rq)))
2785                 return;                          2191                 return;
2786                                                  2192 
2787         /*                                       2193         /*
2788          * Match the barrier from dl_set_over    2194          * Match the barrier from dl_set_overloaded; this guarantees that if we
2789          * see overloaded we must also see th    2195          * see overloaded we must also see the dlo_mask bit.
2790          */                                      2196          */
2791         smp_rmb();                               2197         smp_rmb();
2792                                                  2198 
2793         for_each_cpu(cpu, this_rq->rd->dlo_ma    2199         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2794                 if (this_cpu == cpu)             2200                 if (this_cpu == cpu)
2795                         continue;                2201                         continue;
2796                                                  2202 
2797                 src_rq = cpu_rq(cpu);            2203                 src_rq = cpu_rq(cpu);
2798                                                  2204 
2799                 /*                               2205                 /*
2800                  * It looks racy, and it is!  !! 2206                  * It looks racy, abd it is! However, as in sched_rt.c,
2801                  * we are fine with this.        2207                  * we are fine with this.
2802                  */                              2208                  */
2803                 if (this_rq->dl.dl_nr_running    2209                 if (this_rq->dl.dl_nr_running &&
2804                     dl_time_before(this_rq->d    2210                     dl_time_before(this_rq->dl.earliest_dl.curr,
2805                                    src_rq->dl    2211                                    src_rq->dl.earliest_dl.next))
2806                         continue;                2212                         continue;
2807                                                  2213 
2808                 /* Might drop this_rq->lock *    2214                 /* Might drop this_rq->lock */
2809                 push_task = NULL;             << 
2810                 double_lock_balance(this_rq,     2215                 double_lock_balance(this_rq, src_rq);
2811                                                  2216 
2812                 /*                               2217                 /*
2813                  * If there are no more pulla    2218                  * If there are no more pullable tasks on the
2814                  * rq, we're done with it.       2219                  * rq, we're done with it.
2815                  */                              2220                  */
2816                 if (src_rq->dl.dl_nr_running     2221                 if (src_rq->dl.dl_nr_running <= 1)
2817                         goto skip;               2222                         goto skip;
2818                                                  2223 
2819                 p = pick_earliest_pushable_dl    2224                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2820                                                  2225 
2821                 /*                               2226                 /*
2822                  * We found a task to be pull    2227                  * We found a task to be pulled if:
2823                  *  - it preempts our current    2228                  *  - it preempts our current (if there's one),
2824                  *  - it will preempt the las    2229                  *  - it will preempt the last one we pulled (if any).
2825                  */                              2230                  */
2826                 if (p && dl_time_before(p->dl    2231                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2827                     dl_task_is_earliest_deadl !! 2232                     (!this_rq->dl.dl_nr_running ||
                                                   >> 2233                      dl_time_before(p->dl.deadline,
                                                   >> 2234                                     this_rq->dl.earliest_dl.curr))) {
2828                         WARN_ON(p == src_rq->    2235                         WARN_ON(p == src_rq->curr);
2829                         WARN_ON(!task_on_rq_q    2236                         WARN_ON(!task_on_rq_queued(p));
2830                                                  2237 
2831                         /*                       2238                         /*
2832                          * Then we pull iff p    2239                          * Then we pull iff p has actually an earlier
2833                          * deadline than the     2240                          * deadline than the current task of its runqueue.
2834                          */                      2241                          */
2835                         if (dl_time_before(p-    2242                         if (dl_time_before(p->dl.deadline,
2836                                            sr    2243                                            src_rq->curr->dl.deadline))
2837                                 goto skip;       2244                                 goto skip;
2838                                                  2245 
2839                         if (is_migration_disa !! 2246                         resched = true;
2840                                 push_task = g !! 2247 
2841                         } else {              !! 2248                         deactivate_task(src_rq, p, 0);
2842                                 deactivate_ta !! 2249                         set_task_cpu(p, this_cpu);
2843                                 set_task_cpu( !! 2250                         activate_task(this_rq, p, 0);
2844                                 activate_task !! 2251                         dmin = p->dl.deadline;
2845                                 dmin = p->dl. << 
2846                                 resched = tru << 
2847                         }                     << 
2848                                                  2252 
2849                         /* Is there any other    2253                         /* Is there any other task even earlier? */
2850                 }                                2254                 }
2851 skip:                                            2255 skip:
2852                 double_unlock_balance(this_rq    2256                 double_unlock_balance(this_rq, src_rq);
2853                                               << 
2854                 if (push_task) {              << 
2855                         preempt_disable();    << 
2856                         raw_spin_rq_unlock(th << 
2857                         stop_one_cpu_nowait(s << 
2858                                             p << 
2859                         preempt_enable();     << 
2860                         raw_spin_rq_lock(this << 
2861                 }                             << 
2862         }                                        2257         }
2863                                                  2258 
2864         if (resched)                             2259         if (resched)
2865                 resched_curr(this_rq);           2260                 resched_curr(this_rq);
2866 }                                                2261 }
2867                                                  2262 
2868 /*                                               2263 /*
2869  * Since the task is not running and a resche    2264  * Since the task is not running and a reschedule is not going to happen
2870  * anytime soon on its runqueue, we try pushi    2265  * anytime soon on its runqueue, we try pushing it away now.
2871  */                                              2266  */
2872 static void task_woken_dl(struct rq *rq, stru    2267 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2873 {                                                2268 {
2874         if (!task_on_cpu(rq, p) &&            !! 2269         if (!task_running(rq, p) &&
2875             !test_tsk_need_resched(rq->curr)     2270             !test_tsk_need_resched(rq->curr) &&
2876             p->nr_cpus_allowed > 1 &&            2271             p->nr_cpus_allowed > 1 &&
2877             dl_task(rq->curr) &&                 2272             dl_task(rq->curr) &&
2878             (rq->curr->nr_cpus_allowed < 2 ||    2273             (rq->curr->nr_cpus_allowed < 2 ||
2879              !dl_entity_preempt(&p->dl, &rq->    2274              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2880                 push_dl_tasks(rq);               2275                 push_dl_tasks(rq);
2881         }                                        2276         }
2882 }                                                2277 }
2883                                                  2278 
2884 static void set_cpus_allowed_dl(struct task_s    2279 static void set_cpus_allowed_dl(struct task_struct *p,
2885                                 struct affini !! 2280                                 const struct cpumask *new_mask)
2886 {                                                2281 {
2887         struct root_domain *src_rd;              2282         struct root_domain *src_rd;
2888         struct rq *rq;                           2283         struct rq *rq;
2889                                                  2284 
2890         WARN_ON_ONCE(!dl_task(p));            !! 2285         BUG_ON(!dl_task(p));
2891                                                  2286 
2892         rq = task_rq(p);                         2287         rq = task_rq(p);
2893         src_rd = rq->rd;                         2288         src_rd = rq->rd;
2894         /*                                       2289         /*
2895          * Migrating a SCHED_DEADLINE task be    2290          * Migrating a SCHED_DEADLINE task between exclusive
2896          * cpusets (different root_domains) e    2291          * cpusets (different root_domains) entails a bandwidth
2897          * update. We already made space for     2292          * update. We already made space for us in the destination
2898          * domain (see cpuset_can_attach()).     2293          * domain (see cpuset_can_attach()).
2899          */                                      2294          */
2900         if (!cpumask_intersects(src_rd->span, !! 2295         if (!cpumask_intersects(src_rd->span, new_mask)) {
2901                 struct dl_bw *src_dl_b;          2296                 struct dl_bw *src_dl_b;
2902                                                  2297 
2903                 src_dl_b = dl_bw_of(cpu_of(rq    2298                 src_dl_b = dl_bw_of(cpu_of(rq));
2904                 /*                               2299                 /*
2905                  * We now free resources of t    2300                  * We now free resources of the root_domain we are migrating
2906                  * off. In the worst case, sc    2301                  * off. In the worst case, sched_setattr() may temporary fail
2907                  * until we complete the upda    2302                  * until we complete the update.
2908                  */                              2303                  */
2909                 raw_spin_lock(&src_dl_b->lock    2304                 raw_spin_lock(&src_dl_b->lock);
2910                 __dl_sub(src_dl_b, p->dl.dl_b    2305                 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2911                 raw_spin_unlock(&src_dl_b->lo    2306                 raw_spin_unlock(&src_dl_b->lock);
2912         }                                        2307         }
2913                                                  2308 
2914         set_cpus_allowed_common(p, ctx);      !! 2309         set_cpus_allowed_common(p, new_mask);
2915 }                                                2310 }
2916                                                  2311 
2917 /* Assumes rq->lock is held */                   2312 /* Assumes rq->lock is held */
2918 static void rq_online_dl(struct rq *rq)          2313 static void rq_online_dl(struct rq *rq)
2919 {                                                2314 {
2920         if (rq->dl.overloaded)                   2315         if (rq->dl.overloaded)
2921                 dl_set_overload(rq);             2316                 dl_set_overload(rq);
2922                                                  2317 
2923         cpudl_set_freecpu(&rq->rd->cpudl, rq-    2318         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2924         if (rq->dl.dl_nr_running > 0)            2319         if (rq->dl.dl_nr_running > 0)
2925                 cpudl_set(&rq->rd->cpudl, rq-    2320                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2926 }                                                2321 }
2927                                                  2322 
2928 /* Assumes rq->lock is held */                   2323 /* Assumes rq->lock is held */
2929 static void rq_offline_dl(struct rq *rq)         2324 static void rq_offline_dl(struct rq *rq)
2930 {                                                2325 {
2931         if (rq->dl.overloaded)                   2326         if (rq->dl.overloaded)
2932                 dl_clear_overload(rq);           2327                 dl_clear_overload(rq);
2933                                                  2328 
2934         cpudl_clear(&rq->rd->cpudl, rq->cpu);    2329         cpudl_clear(&rq->rd->cpudl, rq->cpu);
2935         cpudl_clear_freecpu(&rq->rd->cpudl, r    2330         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2936 }                                                2331 }
2937                                                  2332 
2938 void __init init_sched_dl_class(void)            2333 void __init init_sched_dl_class(void)
2939 {                                                2334 {
2940         unsigned int i;                          2335         unsigned int i;
2941                                                  2336 
2942         for_each_possible_cpu(i)                 2337         for_each_possible_cpu(i)
2943                 zalloc_cpumask_var_node(&per_    2338                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2944                                         GFP_K    2339                                         GFP_KERNEL, cpu_to_node(i));
2945 }                                                2340 }
2946                                                  2341 
2947 void dl_add_task_root_domain(struct task_stru    2342 void dl_add_task_root_domain(struct task_struct *p)
2948 {                                                2343 {
2949         struct rq_flags rf;                      2344         struct rq_flags rf;
2950         struct rq *rq;                           2345         struct rq *rq;
2951         struct dl_bw *dl_b;                      2346         struct dl_bw *dl_b;
2952                                                  2347 
2953         raw_spin_lock_irqsave(&p->pi_lock, rf !! 2348         rq = task_rq_lock(p, &rf);
2954         if (!dl_task(p)) {                    !! 2349         if (!dl_task(p))
2955                 raw_spin_unlock_irqrestore(&p !! 2350                 goto unlock;
2956                 return;                       << 
2957         }                                     << 
2958                                               << 
2959         rq = __task_rq_lock(p, &rf);          << 
2960                                                  2351 
2961         dl_b = &rq->rd->dl_bw;                   2352         dl_b = &rq->rd->dl_bw;
2962         raw_spin_lock(&dl_b->lock);              2353         raw_spin_lock(&dl_b->lock);
2963                                                  2354 
2964         __dl_add(dl_b, p->dl.dl_bw, cpumask_w    2355         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2965                                                  2356 
2966         raw_spin_unlock(&dl_b->lock);            2357         raw_spin_unlock(&dl_b->lock);
2967                                                  2358 
                                                   >> 2359 unlock:
2968         task_rq_unlock(rq, p, &rf);              2360         task_rq_unlock(rq, p, &rf);
2969 }                                                2361 }
2970                                                  2362 
2971 void dl_clear_root_domain(struct root_domain     2363 void dl_clear_root_domain(struct root_domain *rd)
2972 {                                                2364 {
2973         unsigned long flags;                     2365         unsigned long flags;
2974                                                  2366 
2975         raw_spin_lock_irqsave(&rd->dl_bw.lock    2367         raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2976         rd->dl_bw.total_bw = 0;                  2368         rd->dl_bw.total_bw = 0;
2977         raw_spin_unlock_irqrestore(&rd->dl_bw    2369         raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2978 }                                                2370 }
2979                                                  2371 
2980 #endif /* CONFIG_SMP */                          2372 #endif /* CONFIG_SMP */
2981                                                  2373 
2982 static void switched_from_dl(struct rq *rq, s    2374 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2983 {                                                2375 {
2984         /*                                       2376         /*
2985          * task_non_contending() can start th    2377          * task_non_contending() can start the "inactive timer" (if the 0-lag
2986          * time is in the future). If the tas    2378          * time is in the future). If the task switches back to dl before
2987          * the "inactive timer" fires, it can    2379          * the "inactive timer" fires, it can continue to consume its current
2988          * runtime using its current deadline    2380          * runtime using its current deadline. If it stays outside of
2989          * SCHED_DEADLINE until the 0-lag tim    2381          * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2990          * will reset the task parameters.       2382          * will reset the task parameters.
2991          */                                      2383          */
2992         if (task_on_rq_queued(p) && p->dl.dl_    2384         if (task_on_rq_queued(p) && p->dl.dl_runtime)
2993                 task_non_contending(&p->dl);  !! 2385                 task_non_contending(p);
2994                                               << 
2995         /*                                    << 
2996          * In case a task is setscheduled out << 
2997          * keep track of that on its cpuset ( << 
2998          */                                   << 
2999         dec_dl_tasks_cs(p);                   << 
3000                                                  2386 
3001         if (!task_on_rq_queued(p)) {             2387         if (!task_on_rq_queued(p)) {
3002                 /*                               2388                 /*
3003                  * Inactive timer is armed. H    2389                  * Inactive timer is armed. However, p is leaving DEADLINE and
3004                  * might migrate away from th    2390                  * might migrate away from this rq while continuing to run on
3005                  * some other class. We need     2391                  * some other class. We need to remove its contribution from
3006                  * this rq running_bw now, or    2392                  * this rq running_bw now, or sub_rq_bw (below) will complain.
3007                  */                              2393                  */
3008                 if (p->dl.dl_non_contending)     2394                 if (p->dl.dl_non_contending)
3009                         sub_running_bw(&p->dl    2395                         sub_running_bw(&p->dl, &rq->dl);
3010                 sub_rq_bw(&p->dl, &rq->dl);      2396                 sub_rq_bw(&p->dl, &rq->dl);
3011         }                                        2397         }
3012                                                  2398 
3013         /*                                       2399         /*
3014          * We cannot use inactive_task_timer(    2400          * We cannot use inactive_task_timer() to invoke sub_running_bw()
3015          * at the 0-lag time, because the tas    2401          * at the 0-lag time, because the task could have been migrated
3016          * while SCHED_OTHER in the meanwhile    2402          * while SCHED_OTHER in the meanwhile.
3017          */                                      2403          */
3018         if (p->dl.dl_non_contending)             2404         if (p->dl.dl_non_contending)
3019                 p->dl.dl_non_contending = 0;     2405                 p->dl.dl_non_contending = 0;
3020                                                  2406 
3021         /*                                       2407         /*
3022          * Since this might be the only -dead    2408          * Since this might be the only -deadline task on the rq,
3023          * this is the right place to try to     2409          * this is the right place to try to pull some other one
3024          * from an overloaded CPU, if any.       2410          * from an overloaded CPU, if any.
3025          */                                      2411          */
3026         if (!task_on_rq_queued(p) || rq->dl.d    2412         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3027                 return;                          2413                 return;
3028                                                  2414 
3029         deadline_queue_pull_task(rq);            2415         deadline_queue_pull_task(rq);
3030 }                                                2416 }
3031                                                  2417 
3032 /*                                               2418 /*
3033  * When switching to -deadline, we may overlo    2419  * When switching to -deadline, we may overload the rq, then
3034  * we try to push someone off, if possible.      2420  * we try to push someone off, if possible.
3035  */                                              2421  */
3036 static void switched_to_dl(struct rq *rq, str    2422 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3037 {                                                2423 {
3038         if (hrtimer_try_to_cancel(&p->dl.inac    2424         if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
3039                 put_task_struct(p);              2425                 put_task_struct(p);
3040                                                  2426 
3041         /*                                    << 
3042          * In case a task is setscheduled to  << 
3043          * track of that on its cpuset (for c << 
3044          */                                   << 
3045         inc_dl_tasks_cs(p);                   << 
3046                                               << 
3047         /* If p is not queued we will update     2427         /* If p is not queued we will update its parameters at next wakeup. */
3048         if (!task_on_rq_queued(p)) {             2428         if (!task_on_rq_queued(p)) {
3049                 add_rq_bw(&p->dl, &rq->dl);      2429                 add_rq_bw(&p->dl, &rq->dl);
3050                                                  2430 
3051                 return;                          2431                 return;
3052         }                                        2432         }
3053                                                  2433 
3054         if (rq->curr != p) {                     2434         if (rq->curr != p) {
3055 #ifdef CONFIG_SMP                                2435 #ifdef CONFIG_SMP
3056                 if (p->nr_cpus_allowed > 1 &&    2436                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3057                         deadline_queue_push_t    2437                         deadline_queue_push_tasks(rq);
3058 #endif                                           2438 #endif
3059                 if (dl_task(rq->curr))           2439                 if (dl_task(rq->curr))
3060                         wakeup_preempt_dl(rq, !! 2440                         check_preempt_curr_dl(rq, p, 0);
3061                 else                             2441                 else
3062                         resched_curr(rq);        2442                         resched_curr(rq);
3063         } else {                              << 
3064                 update_dl_rq_load_avg(rq_cloc << 
3065         }                                        2443         }
3066 }                                                2444 }
3067                                                  2445 
3068 /*                                               2446 /*
3069  * If the scheduling parameters of a -deadlin    2447  * If the scheduling parameters of a -deadline task changed,
3070  * a push or pull operation might be needed.     2448  * a push or pull operation might be needed.
3071  */                                              2449  */
3072 static void prio_changed_dl(struct rq *rq, st    2450 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3073                             int oldprio)         2451                             int oldprio)
3074 {                                                2452 {
3075         if (!task_on_rq_queued(p))            !! 2453         if (task_on_rq_queued(p) || rq->curr == p) {
3076                 return;                       << 
3077                                               << 
3078 #ifdef CONFIG_SMP                                2454 #ifdef CONFIG_SMP
3079         /*                                    !! 2455                 /*
3080          * This might be too much, but unfort !! 2456                  * This might be too much, but unfortunately
3081          * we don't have the old deadline val !! 2457                  * we don't have the old deadline value, and
3082          * we can't argue if the task is incr !! 2458                  * we can't argue if the task is increasing
3083          * or lowering its prio, so...        !! 2459                  * or lowering its prio, so...
3084          */                                   !! 2460                  */
3085         if (!rq->dl.overloaded)               !! 2461                 if (!rq->dl.overloaded)
3086                 deadline_queue_pull_task(rq); !! 2462                         deadline_queue_pull_task(rq);
3087                                                  2463 
3088         if (task_current(rq, p)) {            << 
3089                 /*                               2464                 /*
3090                  * If we now have a earlier d    2465                  * If we now have a earlier deadline task than p,
3091                  * then reschedule, provided     2466                  * then reschedule, provided p is still on this
3092                  * runqueue.                     2467                  * runqueue.
3093                  */                              2468                  */
3094                 if (dl_time_before(rq->dl.ear    2469                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3095                         resched_curr(rq);        2470                         resched_curr(rq);
3096         } else {                              !! 2471 #else
3097                 /*                               2472                 /*
3098                  * Current may not be deadlin !! 2473                  * Again, we don't know if p has a earlier
3099                  * have just replenished it ( !! 2474                  * or later deadline, so let's blindly set a
3100                  *                            !! 2475                  * (maybe not needed) rescheduling point.
3101                  * Otherwise, if p was given  << 
3102                  */                              2476                  */
3103                 if (!dl_task(rq->curr) ||     !! 2477                 resched_curr(rq);
3104                     dl_time_before(p->dl.dead !! 2478 #endif /* CONFIG_SMP */
3105                         resched_curr(rq);     << 
3106         }                                        2479         }
3107 #else                                         << 
3108         /*                                    << 
3109          * We don't know if p has a earlier o << 
3110          * set a (maybe not needed) reschedul << 
3111          */                                   << 
3112         resched_curr(rq);                     << 
3113 #endif                                        << 
3114 }                                             << 
3115                                               << 
3116 #ifdef CONFIG_SCHED_CORE                      << 
3117 static int task_is_throttled_dl(struct task_s << 
3118 {                                             << 
3119         return p->dl.dl_throttled;            << 
3120 }                                                2480 }
3121 #endif                                        << 
3122                                               << 
3123 DEFINE_SCHED_CLASS(dl) = {                    << 
3124                                                  2481 
                                                   >> 2482 const struct sched_class dl_sched_class
                                                   >> 2483         __attribute__((section("__dl_sched_class"))) = {
3125         .enqueue_task           = enqueue_tas    2484         .enqueue_task           = enqueue_task_dl,
3126         .dequeue_task           = dequeue_tas    2485         .dequeue_task           = dequeue_task_dl,
3127         .yield_task             = yield_task_    2486         .yield_task             = yield_task_dl,
3128                                                  2487 
3129         .wakeup_preempt         = wakeup_pree !! 2488         .check_preempt_curr     = check_preempt_curr_dl,
3130                                                  2489 
3131         .pick_task              = pick_task_d !! 2490         .pick_next_task         = pick_next_task_dl,
3132         .put_prev_task          = put_prev_ta    2491         .put_prev_task          = put_prev_task_dl,
3133         .set_next_task          = set_next_ta    2492         .set_next_task          = set_next_task_dl,
3134                                                  2493 
3135 #ifdef CONFIG_SMP                                2494 #ifdef CONFIG_SMP
3136         .balance                = balance_dl,    2495         .balance                = balance_dl,
3137         .select_task_rq         = select_task    2496         .select_task_rq         = select_task_rq_dl,
3138         .migrate_task_rq        = migrate_tas    2497         .migrate_task_rq        = migrate_task_rq_dl,
3139         .set_cpus_allowed       = set_cpus_al    2498         .set_cpus_allowed       = set_cpus_allowed_dl,
3140         .rq_online              = rq_online_d    2499         .rq_online              = rq_online_dl,
3141         .rq_offline             = rq_offline_    2500         .rq_offline             = rq_offline_dl,
3142         .task_woken             = task_woken_    2501         .task_woken             = task_woken_dl,
3143         .find_lock_rq           = find_lock_l << 
3144 #endif                                           2502 #endif
3145                                                  2503 
3146         .task_tick              = task_tick_d    2504         .task_tick              = task_tick_dl,
3147         .task_fork              = task_fork_d    2505         .task_fork              = task_fork_dl,
3148                                                  2506 
3149         .prio_changed           = prio_change    2507         .prio_changed           = prio_changed_dl,
3150         .switched_from          = switched_fr    2508         .switched_from          = switched_from_dl,
3151         .switched_to            = switched_to    2509         .switched_to            = switched_to_dl,
3152                                                  2510 
3153         .update_curr            = update_curr    2511         .update_curr            = update_curr_dl,
3154 #ifdef CONFIG_SCHED_CORE                      << 
3155         .task_is_throttled      = task_is_thr << 
3156 #endif                                        << 
3157 };                                               2512 };
3158                                                  2513 
3159 /* Used for dl_bw check and update, used unde << 
3160 static u64 dl_generation;                     << 
3161                                               << 
3162 int sched_dl_global_validate(void)               2514 int sched_dl_global_validate(void)
3163 {                                                2515 {
3164         u64 runtime = global_rt_runtime();       2516         u64 runtime = global_rt_runtime();
3165         u64 period = global_rt_period();         2517         u64 period = global_rt_period();
3166         u64 new_bw = to_ratio(period, runtime    2518         u64 new_bw = to_ratio(period, runtime);
3167         u64 gen = ++dl_generation;            << 
3168         struct dl_bw *dl_b;                      2519         struct dl_bw *dl_b;
3169         int cpu, cpus, ret = 0;               !! 2520         int cpu, ret = 0;
3170         unsigned long flags;                     2521         unsigned long flags;
3171                                                  2522 
3172         /*                                       2523         /*
3173          * Here we want to check the bandwidt    2524          * Here we want to check the bandwidth not being set to some
3174          * value smaller than the currently a    2525          * value smaller than the currently allocated bandwidth in
3175          * any of the root_domains.              2526          * any of the root_domains.
                                                   >> 2527          *
                                                   >> 2528          * FIXME: Cycling on all the CPUs is overdoing, but simpler than
                                                   >> 2529          * cycling on root_domains... Discussion on different/better
                                                   >> 2530          * solutions is welcome!
3176          */                                      2531          */
3177         for_each_possible_cpu(cpu) {             2532         for_each_possible_cpu(cpu) {
3178                 rcu_read_lock_sched();           2533                 rcu_read_lock_sched();
3179                                               << 
3180                 if (dl_bw_visited(cpu, gen))  << 
3181                         goto next;            << 
3182                                               << 
3183                 dl_b = dl_bw_of(cpu);            2534                 dl_b = dl_bw_of(cpu);
3184                 cpus = dl_bw_cpus(cpu);       << 
3185                                                  2535 
3186                 raw_spin_lock_irqsave(&dl_b->    2536                 raw_spin_lock_irqsave(&dl_b->lock, flags);
3187                 if (new_bw * cpus < dl_b->tot !! 2537                 if (new_bw < dl_b->total_bw)
3188                         ret = -EBUSY;            2538                         ret = -EBUSY;
3189                 raw_spin_unlock_irqrestore(&d    2539                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3190                                                  2540 
3191 next:                                         << 
3192                 rcu_read_unlock_sched();         2541                 rcu_read_unlock_sched();
3193                                                  2542 
3194                 if (ret)                         2543                 if (ret)
3195                         break;                   2544                         break;
3196         }                                        2545         }
3197                                                  2546 
3198         return ret;                              2547         return ret;
3199 }                                                2548 }
3200                                                  2549 
3201 static void init_dl_rq_bw_ratio(struct dl_rq     2550 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
3202 {                                                2551 {
3203         if (global_rt_runtime() == RUNTIME_IN    2552         if (global_rt_runtime() == RUNTIME_INF) {
3204                 dl_rq->bw_ratio = 1 << RATIO_    2553                 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
3205                 dl_rq->max_bw = dl_rq->extra_ !! 2554                 dl_rq->extra_bw = 1 << BW_SHIFT;
3206         } else {                                 2555         } else {
3207                 dl_rq->bw_ratio = to_ratio(gl    2556                 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3208                           global_rt_period())    2557                           global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
3209                 dl_rq->max_bw = dl_rq->extra_ !! 2558                 dl_rq->extra_bw = to_ratio(global_rt_period(),
3210                         to_ratio(global_rt_pe !! 2559                                                     global_rt_runtime());
3211         }                                        2560         }
3212 }                                                2561 }
3213                                                  2562 
3214 void sched_dl_do_global(void)                    2563 void sched_dl_do_global(void)
3215 {                                                2564 {
3216         u64 new_bw = -1;                         2565         u64 new_bw = -1;
3217         u64 gen = ++dl_generation;            << 
3218         struct dl_bw *dl_b;                      2566         struct dl_bw *dl_b;
3219         int cpu;                                 2567         int cpu;
3220         unsigned long flags;                     2568         unsigned long flags;
3221                                                  2569 
                                                   >> 2570         def_dl_bandwidth.dl_period = global_rt_period();
                                                   >> 2571         def_dl_bandwidth.dl_runtime = global_rt_runtime();
                                                   >> 2572 
3222         if (global_rt_runtime() != RUNTIME_IN    2573         if (global_rt_runtime() != RUNTIME_INF)
3223                 new_bw = to_ratio(global_rt_p    2574                 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3224                                                  2575 
                                                   >> 2576         /*
                                                   >> 2577          * FIXME: As above...
                                                   >> 2578          */
3225         for_each_possible_cpu(cpu) {             2579         for_each_possible_cpu(cpu) {
3226                 rcu_read_lock_sched();           2580                 rcu_read_lock_sched();
3227                                               << 
3228                 if (dl_bw_visited(cpu, gen))  << 
3229                         rcu_read_unlock_sched << 
3230                         continue;             << 
3231                 }                             << 
3232                                               << 
3233                 dl_b = dl_bw_of(cpu);            2581                 dl_b = dl_bw_of(cpu);
3234                                                  2582 
3235                 raw_spin_lock_irqsave(&dl_b->    2583                 raw_spin_lock_irqsave(&dl_b->lock, flags);
3236                 dl_b->bw = new_bw;               2584                 dl_b->bw = new_bw;
3237                 raw_spin_unlock_irqrestore(&d    2585                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3238                                                  2586 
3239                 rcu_read_unlock_sched();         2587                 rcu_read_unlock_sched();
3240                 init_dl_rq_bw_ratio(&cpu_rq(c    2588                 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3241         }                                        2589         }
3242 }                                                2590 }
3243                                                  2591 
3244 /*                                               2592 /*
3245  * We must be sure that accepting a new task     2593  * We must be sure that accepting a new task (or allowing changing the
3246  * parameters of an existing one) is consiste    2594  * parameters of an existing one) is consistent with the bandwidth
3247  * constraints. If yes, this function also ac    2595  * constraints. If yes, this function also accordingly updates the currently
3248  * allocated bandwidth to reflect the new sit    2596  * allocated bandwidth to reflect the new situation.
3249  *                                               2597  *
3250  * This function is called while holding p's     2598  * This function is called while holding p's rq->lock.
3251  */                                              2599  */
3252 int sched_dl_overflow(struct task_struct *p,     2600 int sched_dl_overflow(struct task_struct *p, int policy,
3253                       const struct sched_attr    2601                       const struct sched_attr *attr)
3254 {                                                2602 {
3255         u64 period = attr->sched_period ?: at    2603         u64 period = attr->sched_period ?: attr->sched_deadline;
3256         u64 runtime = attr->sched_runtime;       2604         u64 runtime = attr->sched_runtime;
3257         u64 new_bw = dl_policy(policy) ? to_r    2605         u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
3258         int cpus, err = -1, cpu = task_cpu(p)    2606         int cpus, err = -1, cpu = task_cpu(p);
3259         struct dl_bw *dl_b = dl_bw_of(cpu);      2607         struct dl_bw *dl_b = dl_bw_of(cpu);
3260         unsigned long cap;                       2608         unsigned long cap;
3261                                                  2609 
3262         if (attr->sched_flags & SCHED_FLAG_SU    2610         if (attr->sched_flags & SCHED_FLAG_SUGOV)
3263                 return 0;                        2611                 return 0;
3264                                                  2612 
3265         /* !deadline task may carry old deadl    2613         /* !deadline task may carry old deadline bandwidth */
3266         if (new_bw == p->dl.dl_bw && task_has    2614         if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3267                 return 0;                        2615                 return 0;
3268                                                  2616 
3269         /*                                       2617         /*
3270          * Either if a task, enters, leave, o    2618          * Either if a task, enters, leave, or stays -deadline but changes
3271          * its parameters, we may need to upd    2619          * its parameters, we may need to update accordingly the total
3272          * allocated bandwidth of the contain    2620          * allocated bandwidth of the container.
3273          */                                      2621          */
3274         raw_spin_lock(&dl_b->lock);              2622         raw_spin_lock(&dl_b->lock);
3275         cpus = dl_bw_cpus(cpu);                  2623         cpus = dl_bw_cpus(cpu);
3276         cap = dl_bw_capacity(cpu);               2624         cap = dl_bw_capacity(cpu);
3277                                                  2625 
3278         if (dl_policy(policy) && !task_has_dl    2626         if (dl_policy(policy) && !task_has_dl_policy(p) &&
3279             !__dl_overflow(dl_b, cap, 0, new_    2627             !__dl_overflow(dl_b, cap, 0, new_bw)) {
3280                 if (hrtimer_active(&p->dl.ina    2628                 if (hrtimer_active(&p->dl.inactive_timer))
3281                         __dl_sub(dl_b, p->dl.    2629                         __dl_sub(dl_b, p->dl.dl_bw, cpus);
3282                 __dl_add(dl_b, new_bw, cpus);    2630                 __dl_add(dl_b, new_bw, cpus);
3283                 err = 0;                         2631                 err = 0;
3284         } else if (dl_policy(policy) && task_    2632         } else if (dl_policy(policy) && task_has_dl_policy(p) &&
3285                    !__dl_overflow(dl_b, cap,     2633                    !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
3286                 /*                               2634                 /*
3287                  * XXX this is slightly incor    2635                  * XXX this is slightly incorrect: when the task
3288                  * utilization decreases, we     2636                  * utilization decreases, we should delay the total
3289                  * utilization change until t    2637                  * utilization change until the task's 0-lag point.
3290                  * But this would require to     2638                  * But this would require to set the task's "inactive
3291                  * timer" when the task is no    2639                  * timer" when the task is not inactive.
3292                  */                              2640                  */
3293                 __dl_sub(dl_b, p->dl.dl_bw, c    2641                 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3294                 __dl_add(dl_b, new_bw, cpus);    2642                 __dl_add(dl_b, new_bw, cpus);
3295                 dl_change_utilization(p, new_    2643                 dl_change_utilization(p, new_bw);
3296                 err = 0;                         2644                 err = 0;
3297         } else if (!dl_policy(policy) && task    2645         } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3298                 /*                               2646                 /*
3299                  * Do not decrease the total     2647                  * Do not decrease the total deadline utilization here,
3300                  * switched_from_dl() will ta    2648                  * switched_from_dl() will take care to do it at the correct
3301                  * (0-lag) time.                 2649                  * (0-lag) time.
3302                  */                              2650                  */
3303                 err = 0;                         2651                 err = 0;
3304         }                                        2652         }
3305         raw_spin_unlock(&dl_b->lock);            2653         raw_spin_unlock(&dl_b->lock);
3306                                                  2654 
3307         return err;                              2655         return err;
3308 }                                                2656 }
3309                                                  2657 
3310 /*                                               2658 /*
3311  * This function initializes the sched_dl_ent    2659  * This function initializes the sched_dl_entity of a newly becoming
3312  * SCHED_DEADLINE task.                          2660  * SCHED_DEADLINE task.
3313  *                                               2661  *
3314  * Only the static values are considered here    2662  * Only the static values are considered here, the actual runtime and the
3315  * absolute deadline will be properly calcula    2663  * absolute deadline will be properly calculated when the task is enqueued
3316  * for the first time with its new policy.       2664  * for the first time with its new policy.
3317  */                                              2665  */
3318 void __setparam_dl(struct task_struct *p, con    2666 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3319 {                                                2667 {
3320         struct sched_dl_entity *dl_se = &p->d    2668         struct sched_dl_entity *dl_se = &p->dl;
3321                                                  2669 
3322         dl_se->dl_runtime = attr->sched_runti    2670         dl_se->dl_runtime = attr->sched_runtime;
3323         dl_se->dl_deadline = attr->sched_dead    2671         dl_se->dl_deadline = attr->sched_deadline;
3324         dl_se->dl_period = attr->sched_period    2672         dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3325         dl_se->flags = attr->sched_flags & SC !! 2673         dl_se->flags = attr->sched_flags;
3326         dl_se->dl_bw = to_ratio(dl_se->dl_per    2674         dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3327         dl_se->dl_density = to_ratio(dl_se->d    2675         dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3328 }                                                2676 }
3329                                                  2677 
3330 void __getparam_dl(struct task_struct *p, str    2678 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3331 {                                                2679 {
3332         struct sched_dl_entity *dl_se = &p->d    2680         struct sched_dl_entity *dl_se = &p->dl;
3333                                                  2681 
3334         attr->sched_priority = p->rt_priority    2682         attr->sched_priority = p->rt_priority;
3335         attr->sched_runtime = dl_se->dl_runti    2683         attr->sched_runtime = dl_se->dl_runtime;
3336         attr->sched_deadline = dl_se->dl_dead    2684         attr->sched_deadline = dl_se->dl_deadline;
3337         attr->sched_period = dl_se->dl_period    2685         attr->sched_period = dl_se->dl_period;
3338         attr->sched_flags &= ~SCHED_DL_FLAGS; !! 2686         attr->sched_flags = dl_se->flags;
3339         attr->sched_flags |= dl_se->flags;    << 
3340 }                                                2687 }
3341                                                  2688 
3342 /*                                               2689 /*
                                                   >> 2690  * Default limits for DL period; on the top end we guard against small util
                                                   >> 2691  * tasks still getting rediculous long effective runtimes, on the bottom end we
                                                   >> 2692  * guard against timer DoS.
                                                   >> 2693  */
                                                   >> 2694 unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
                                                   >> 2695 unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
                                                   >> 2696 
                                                   >> 2697 /*
3343  * This function validates the new parameters    2698  * This function validates the new parameters of a -deadline task.
3344  * We ask for the deadline not being zero, an    2699  * We ask for the deadline not being zero, and greater or equal
3345  * than the runtime, as well as the period of    2700  * than the runtime, as well as the period of being zero or
3346  * greater than deadline. Furthermore, we hav    2701  * greater than deadline. Furthermore, we have to be sure that
3347  * user parameters are above the internal res    2702  * user parameters are above the internal resolution of 1us (we
3348  * check sched_runtime only since it is alway    2703  * check sched_runtime only since it is always the smaller one) and
3349  * below 2^63 ns (we have to check both sched    2704  * below 2^63 ns (we have to check both sched_deadline and
3350  * sched_period, as the latter can be zero).     2705  * sched_period, as the latter can be zero).
3351  */                                              2706  */
3352 bool __checkparam_dl(const struct sched_attr     2707 bool __checkparam_dl(const struct sched_attr *attr)
3353 {                                                2708 {
3354         u64 period, max, min;                    2709         u64 period, max, min;
3355                                                  2710 
3356         /* special dl tasks don't actually us    2711         /* special dl tasks don't actually use any parameter */
3357         if (attr->sched_flags & SCHED_FLAG_SU    2712         if (attr->sched_flags & SCHED_FLAG_SUGOV)
3358                 return true;                     2713                 return true;
3359                                                  2714 
3360         /* deadline != 0 */                      2715         /* deadline != 0 */
3361         if (attr->sched_deadline == 0)           2716         if (attr->sched_deadline == 0)
3362                 return false;                    2717                 return false;
3363                                                  2718 
3364         /*                                       2719         /*
3365          * Since we truncate DL_SCALE bits, m    2720          * Since we truncate DL_SCALE bits, make sure we're at least
3366          * that big.                             2721          * that big.
3367          */                                      2722          */
3368         if (attr->sched_runtime < (1ULL << DL    2723         if (attr->sched_runtime < (1ULL << DL_SCALE))
3369                 return false;                    2724                 return false;
3370                                                  2725 
3371         /*                                       2726         /*
3372          * Since we use the MSB for wrap-arou    2727          * Since we use the MSB for wrap-around and sign issues, make
3373          * sure it's not set (mind that perio    2728          * sure it's not set (mind that period can be equal to zero).
3374          */                                      2729          */
3375         if (attr->sched_deadline & (1ULL << 6    2730         if (attr->sched_deadline & (1ULL << 63) ||
3376             attr->sched_period & (1ULL << 63)    2731             attr->sched_period & (1ULL << 63))
3377                 return false;                    2732                 return false;
3378                                                  2733 
3379         period = attr->sched_period;             2734         period = attr->sched_period;
3380         if (!period)                             2735         if (!period)
3381                 period = attr->sched_deadline    2736                 period = attr->sched_deadline;
3382                                                  2737 
3383         /* runtime <= deadline <= period (if     2738         /* runtime <= deadline <= period (if period != 0) */
3384         if (period < attr->sched_deadline ||     2739         if (period < attr->sched_deadline ||
3385             attr->sched_deadline < attr->sche    2740             attr->sched_deadline < attr->sched_runtime)
3386                 return false;                    2741                 return false;
3387                                                  2742 
3388         max = (u64)READ_ONCE(sysctl_sched_dl_    2743         max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3389         min = (u64)READ_ONCE(sysctl_sched_dl_    2744         min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3390                                                  2745 
3391         if (period < min || period > max)        2746         if (period < min || period > max)
3392                 return false;                    2747                 return false;
3393                                                  2748 
3394         return true;                             2749         return true;
3395 }                                                2750 }
3396                                                  2751 
3397 /*                                               2752 /*
3398  * This function clears the sched_dl_entity s    2753  * This function clears the sched_dl_entity static params.
3399  */                                              2754  */
3400 static void __dl_clear_params(struct sched_dl !! 2755 void __dl_clear_params(struct task_struct *p)
3401 {                                                2756 {
                                                   >> 2757         struct sched_dl_entity *dl_se = &p->dl;
                                                   >> 2758 
3402         dl_se->dl_runtime               = 0;     2759         dl_se->dl_runtime               = 0;
3403         dl_se->dl_deadline              = 0;     2760         dl_se->dl_deadline              = 0;
3404         dl_se->dl_period                = 0;     2761         dl_se->dl_period                = 0;
3405         dl_se->flags                    = 0;     2762         dl_se->flags                    = 0;
3406         dl_se->dl_bw                    = 0;     2763         dl_se->dl_bw                    = 0;
3407         dl_se->dl_density               = 0;     2764         dl_se->dl_density               = 0;
3408                                                  2765 
                                                   >> 2766         dl_se->dl_boosted               = 0;
3409         dl_se->dl_throttled             = 0;     2767         dl_se->dl_throttled             = 0;
3410         dl_se->dl_yielded               = 0;     2768         dl_se->dl_yielded               = 0;
3411         dl_se->dl_non_contending        = 0;     2769         dl_se->dl_non_contending        = 0;
3412         dl_se->dl_overrun               = 0;     2770         dl_se->dl_overrun               = 0;
3413         dl_se->dl_server                = 0;  << 
3414                                               << 
3415 #ifdef CONFIG_RT_MUTEXES                      << 
3416         dl_se->pi_se                    = dl_ << 
3417 #endif                                        << 
3418 }                                             << 
3419                                               << 
3420 void init_dl_entity(struct sched_dl_entity *d << 
3421 {                                             << 
3422         RB_CLEAR_NODE(&dl_se->rb_node);       << 
3423         init_dl_task_timer(dl_se);            << 
3424         init_dl_inactive_task_timer(dl_se);   << 
3425         __dl_clear_params(dl_se);             << 
3426 }                                                2771 }
3427                                                  2772 
3428 bool dl_param_changed(struct task_struct *p,     2773 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3429 {                                                2774 {
3430         struct sched_dl_entity *dl_se = &p->d    2775         struct sched_dl_entity *dl_se = &p->dl;
3431                                                  2776 
3432         if (dl_se->dl_runtime != attr->sched_    2777         if (dl_se->dl_runtime != attr->sched_runtime ||
3433             dl_se->dl_deadline != attr->sched    2778             dl_se->dl_deadline != attr->sched_deadline ||
3434             dl_se->dl_period != attr->sched_p    2779             dl_se->dl_period != attr->sched_period ||
3435             dl_se->flags != (attr->sched_flag !! 2780             dl_se->flags != attr->sched_flags)
3436                 return true;                     2781                 return true;
3437                                                  2782 
3438         return false;                            2783         return false;
3439 }                                                2784 }
3440                                                  2785 
3441 #ifdef CONFIG_SMP                                2786 #ifdef CONFIG_SMP
                                                   >> 2787 int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
                                                   >> 2788 {
                                                   >> 2789         unsigned long flags, cap;
                                                   >> 2790         unsigned int dest_cpu;
                                                   >> 2791         struct dl_bw *dl_b;
                                                   >> 2792         bool overflow;
                                                   >> 2793         int ret;
                                                   >> 2794 
                                                   >> 2795         dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
                                                   >> 2796 
                                                   >> 2797         rcu_read_lock_sched();
                                                   >> 2798         dl_b = dl_bw_of(dest_cpu);
                                                   >> 2799         raw_spin_lock_irqsave(&dl_b->lock, flags);
                                                   >> 2800         cap = dl_bw_capacity(dest_cpu);
                                                   >> 2801         overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
                                                   >> 2802         if (overflow) {
                                                   >> 2803                 ret = -EBUSY;
                                                   >> 2804         } else {
                                                   >> 2805                 /*
                                                   >> 2806                  * We reserve space for this task in the destination
                                                   >> 2807                  * root_domain, as we can't fail after this point.
                                                   >> 2808                  * We will free resources in the source root_domain
                                                   >> 2809                  * later on (see set_cpus_allowed_dl()).
                                                   >> 2810                  */
                                                   >> 2811                 int cpus = dl_bw_cpus(dest_cpu);
                                                   >> 2812 
                                                   >> 2813                 __dl_add(dl_b, p->dl.dl_bw, cpus);
                                                   >> 2814                 ret = 0;
                                                   >> 2815         }
                                                   >> 2816         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
                                                   >> 2817         rcu_read_unlock_sched();
                                                   >> 2818 
                                                   >> 2819         return ret;
                                                   >> 2820 }
                                                   >> 2821 
3442 int dl_cpuset_cpumask_can_shrink(const struct    2822 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3443                                  const struct    2823                                  const struct cpumask *trial)
3444 {                                                2824 {
3445         unsigned long flags, cap;             !! 2825         int ret = 1, trial_cpus;
3446         struct dl_bw *cur_dl_b;                  2826         struct dl_bw *cur_dl_b;
3447         int ret = 1;                          !! 2827         unsigned long flags;
3448                                                  2828 
3449         rcu_read_lock_sched();                   2829         rcu_read_lock_sched();
3450         cur_dl_b = dl_bw_of(cpumask_any(cur))    2830         cur_dl_b = dl_bw_of(cpumask_any(cur));
3451         cap = __dl_bw_capacity(trial);        !! 2831         trial_cpus = cpumask_weight(trial);
                                                   >> 2832 
3452         raw_spin_lock_irqsave(&cur_dl_b->lock    2833         raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3453         if (__dl_overflow(cur_dl_b, cap, 0, 0 !! 2834         if (cur_dl_b->bw != -1 &&
                                                   >> 2835             cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
3454                 ret = 0;                         2836                 ret = 0;
3455         raw_spin_unlock_irqrestore(&cur_dl_b-    2837         raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3456         rcu_read_unlock_sched();                 2838         rcu_read_unlock_sched();
3457                                                  2839 
3458         return ret;                              2840         return ret;
3459 }                                                2841 }
3460                                                  2842 
3461 enum dl_bw_request {                          !! 2843 bool dl_cpu_busy(unsigned int cpu)
3462         dl_bw_req_check_overflow = 0,         << 
3463         dl_bw_req_alloc,                      << 
3464         dl_bw_req_free                        << 
3465 };                                            << 
3466                                               << 
3467 static int dl_bw_manage(enum dl_bw_request re << 
3468 {                                                2844 {
3469         unsigned long flags;                  !! 2845         unsigned long flags, cap;
3470         struct dl_bw *dl_b;                      2846         struct dl_bw *dl_b;
3471         bool overflow = 0;                    !! 2847         bool overflow;
3472                                                  2848 
3473         rcu_read_lock_sched();                   2849         rcu_read_lock_sched();
3474         dl_b = dl_bw_of(cpu);                    2850         dl_b = dl_bw_of(cpu);
3475         raw_spin_lock_irqsave(&dl_b->lock, fl    2851         raw_spin_lock_irqsave(&dl_b->lock, flags);
3476                                               !! 2852         cap = dl_bw_capacity(cpu);
3477         if (req == dl_bw_req_free) {          !! 2853         overflow = __dl_overflow(dl_b, cap, 0, 0);
3478                 __dl_sub(dl_b, dl_bw, dl_bw_c << 
3479         } else {                              << 
3480                 unsigned long cap = dl_bw_cap << 
3481                                               << 
3482                 overflow = __dl_overflow(dl_b << 
3483                                               << 
3484                 if (req == dl_bw_req_alloc && << 
3485                         /*                    << 
3486                          * We reserve space i << 
3487                          * root_domain, as we << 
3488                          * We will free resou << 
3489                          * later on (see set_ << 
3490                          */                   << 
3491                         __dl_add(dl_b, dl_bw, << 
3492                 }                             << 
3493         }                                     << 
3494                                               << 
3495         raw_spin_unlock_irqrestore(&dl_b->loc    2854         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3496         rcu_read_unlock_sched();                 2855         rcu_read_unlock_sched();
3497                                                  2856 
3498         return overflow ? -EBUSY : 0;         !! 2857         return overflow;
3499 }                                             << 
3500                                               << 
3501 int dl_bw_check_overflow(int cpu)             << 
3502 {                                             << 
3503         return dl_bw_manage(dl_bw_req_check_o << 
3504 }                                             << 
3505                                               << 
3506 int dl_bw_alloc(int cpu, u64 dl_bw)           << 
3507 {                                             << 
3508         return dl_bw_manage(dl_bw_req_alloc,  << 
3509 }                                             << 
3510                                               << 
3511 void dl_bw_free(int cpu, u64 dl_bw)           << 
3512 {                                             << 
3513         dl_bw_manage(dl_bw_req_free, cpu, dl_ << 
3514 }                                                2858 }
3515 #endif                                           2859 #endif
3516                                                  2860 
3517 #ifdef CONFIG_SCHED_DEBUG                        2861 #ifdef CONFIG_SCHED_DEBUG
3518 void print_dl_stats(struct seq_file *m, int c    2862 void print_dl_stats(struct seq_file *m, int cpu)
3519 {                                                2863 {
3520         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl)    2864         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3521 }                                                2865 }
3522 #endif /* CONFIG_SCHED_DEBUG */                  2866 #endif /* CONFIG_SCHED_DEBUG */
3523                                                  2867 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php