~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/deadline.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/sched/deadline.c (Version linux-6.12-rc7) and /kernel/sched/deadline.c (Version linux-6.1.116)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Deadline Scheduling Class (SCHED_DEADLINE)       3  * Deadline Scheduling Class (SCHED_DEADLINE)
  4  *                                                  4  *
  5  * Earliest Deadline First (EDF) + Constant Ba      5  * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
  6  *                                                  6  *
  7  * Tasks that periodically executes their inst      7  * Tasks that periodically executes their instances for less than their
  8  * runtime won't miss any of their deadlines.       8  * runtime won't miss any of their deadlines.
  9  * Tasks that are not periodic or sporadic or       9  * Tasks that are not periodic or sporadic or that tries to execute more
 10  * than their reserved bandwidth will be slowe     10  * than their reserved bandwidth will be slowed down (and may potentially
 11  * miss some of their deadlines), and won't af     11  * miss some of their deadlines), and won't affect any other task.
 12  *                                                 12  *
 13  * Copyright (C) 2012 Dario Faggioli <raistlin     13  * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
 14  *                    Juri Lelli <juri.lelli@g     14  *                    Juri Lelli <juri.lelli@gmail.com>,
 15  *                    Michael Trimarchi <micha     15  *                    Michael Trimarchi <michael@amarulasolutions.com>,
 16  *                    Fabio Checconi <fcheccon     16  *                    Fabio Checconi <fchecconi@gmail.com>
 17  */                                                17  */
 18                                                    18 
 19 #include <linux/cpuset.h>                          19 #include <linux/cpuset.h>
 20                                                    20 
 21 /*                                                 21 /*
 22  * Default limits for DL period; on the top en     22  * Default limits for DL period; on the top end we guard against small util
 23  * tasks still getting ridiculously long effec     23  * tasks still getting ridiculously long effective runtimes, on the bottom end we
 24  * guard against timer DoS.                        24  * guard against timer DoS.
 25  */                                                25  */
 26 static unsigned int sysctl_sched_dl_period_max     26 static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
 27 static unsigned int sysctl_sched_dl_period_min     27 static unsigned int sysctl_sched_dl_period_min = 100;     /* 100 us */
 28 #ifdef CONFIG_SYSCTL                               28 #ifdef CONFIG_SYSCTL
 29 static struct ctl_table sched_dl_sysctls[] = {     29 static struct ctl_table sched_dl_sysctls[] = {
 30         {                                          30         {
 31                 .procname       = "sched_deadl     31                 .procname       = "sched_deadline_period_max_us",
 32                 .data           = &sysctl_sche     32                 .data           = &sysctl_sched_dl_period_max,
 33                 .maxlen         = sizeof(unsig     33                 .maxlen         = sizeof(unsigned int),
 34                 .mode           = 0644,            34                 .mode           = 0644,
 35                 .proc_handler   = proc_douintv     35                 .proc_handler   = proc_douintvec_minmax,
 36                 .extra1         = (void *)&sys     36                 .extra1         = (void *)&sysctl_sched_dl_period_min,
 37         },                                         37         },
 38         {                                          38         {
 39                 .procname       = "sched_deadl     39                 .procname       = "sched_deadline_period_min_us",
 40                 .data           = &sysctl_sche     40                 .data           = &sysctl_sched_dl_period_min,
 41                 .maxlen         = sizeof(unsig     41                 .maxlen         = sizeof(unsigned int),
 42                 .mode           = 0644,            42                 .mode           = 0644,
 43                 .proc_handler   = proc_douintv     43                 .proc_handler   = proc_douintvec_minmax,
 44                 .extra2         = (void *)&sys     44                 .extra2         = (void *)&sysctl_sched_dl_period_max,
 45         },                                         45         },
                                                   >>  46         {}
 46 };                                                 47 };
 47                                                    48 
 48 static int __init sched_dl_sysctl_init(void)       49 static int __init sched_dl_sysctl_init(void)
 49 {                                                  50 {
 50         register_sysctl_init("kernel", sched_d     51         register_sysctl_init("kernel", sched_dl_sysctls);
 51         return 0;                                  52         return 0;
 52 }                                                  53 }
 53 late_initcall(sched_dl_sysctl_init);               54 late_initcall(sched_dl_sysctl_init);
 54 #endif                                             55 #endif
 55                                                    56 
 56 static bool dl_server(struct sched_dl_entity * << 
 57 {                                              << 
 58         return dl_se->dl_server;               << 
 59 }                                              << 
 60                                                << 
 61 static inline struct task_struct *dl_task_of(s     57 static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
 62 {                                                  58 {
 63         BUG_ON(dl_server(dl_se));              << 
 64         return container_of(dl_se, struct task     59         return container_of(dl_se, struct task_struct, dl);
 65 }                                                  60 }
 66                                                    61 
 67 static inline struct rq *rq_of_dl_rq(struct dl     62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
 68 {                                                  63 {
 69         return container_of(dl_rq, struct rq,      64         return container_of(dl_rq, struct rq, dl);
 70 }                                                  65 }
 71                                                    66 
 72 static inline struct rq *rq_of_dl_se(struct sc << 
 73 {                                              << 
 74         struct rq *rq = dl_se->rq;             << 
 75                                                << 
 76         if (!dl_server(dl_se))                 << 
 77                 rq = task_rq(dl_task_of(dl_se) << 
 78                                                << 
 79         return rq;                             << 
 80 }                                              << 
 81                                                << 
 82 static inline struct dl_rq *dl_rq_of_se(struct     67 static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
 83 {                                                  68 {
 84         return &rq_of_dl_se(dl_se)->dl;        !!  69         struct task_struct *p = dl_task_of(dl_se);
                                                   >>  70         struct rq *rq = task_rq(p);
                                                   >>  71 
                                                   >>  72         return &rq->dl;
 85 }                                                  73 }
 86                                                    74 
 87 static inline int on_dl_rq(struct sched_dl_ent     75 static inline int on_dl_rq(struct sched_dl_entity *dl_se)
 88 {                                                  76 {
 89         return !RB_EMPTY_NODE(&dl_se->rb_node)     77         return !RB_EMPTY_NODE(&dl_se->rb_node);
 90 }                                                  78 }
 91                                                    79 
 92 #ifdef CONFIG_RT_MUTEXES                           80 #ifdef CONFIG_RT_MUTEXES
 93 static inline struct sched_dl_entity *pi_of(st     81 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
 94 {                                                  82 {
 95         return dl_se->pi_se;                       83         return dl_se->pi_se;
 96 }                                                  84 }
 97                                                    85 
 98 static inline bool is_dl_boosted(struct sched_     86 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
 99 {                                                  87 {
100         return pi_of(dl_se) != dl_se;              88         return pi_of(dl_se) != dl_se;
101 }                                                  89 }
102 #else                                              90 #else
103 static inline struct sched_dl_entity *pi_of(st     91 static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
104 {                                                  92 {
105         return dl_se;                              93         return dl_se;
106 }                                                  94 }
107                                                    95 
108 static inline bool is_dl_boosted(struct sched_     96 static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
109 {                                                  97 {
110         return false;                              98         return false;
111 }                                                  99 }
112 #endif                                            100 #endif
113                                                   101 
114 #ifdef CONFIG_SMP                                 102 #ifdef CONFIG_SMP
115 static inline struct dl_bw *dl_bw_of(int i)       103 static inline struct dl_bw *dl_bw_of(int i)
116 {                                                 104 {
117         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_    105         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118                          "sched RCU must be he    106                          "sched RCU must be held");
119         return &cpu_rq(i)->rd->dl_bw;             107         return &cpu_rq(i)->rd->dl_bw;
120 }                                                 108 }
121                                                   109 
122 static inline int dl_bw_cpus(int i)               110 static inline int dl_bw_cpus(int i)
123 {                                                 111 {
124         struct root_domain *rd = cpu_rq(i)->rd    112         struct root_domain *rd = cpu_rq(i)->rd;
125         int cpus;                                 113         int cpus;
126                                                   114 
127         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_    115         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128                          "sched RCU must be he    116                          "sched RCU must be held");
129                                                   117 
130         if (cpumask_subset(rd->span, cpu_activ    118         if (cpumask_subset(rd->span, cpu_active_mask))
131                 return cpumask_weight(rd->span    119                 return cpumask_weight(rd->span);
132                                                   120 
133         cpus = 0;                                 121         cpus = 0;
134                                                   122 
135         for_each_cpu_and(i, rd->span, cpu_acti    123         for_each_cpu_and(i, rd->span, cpu_active_mask)
136                 cpus++;                           124                 cpus++;
137                                                   125 
138         return cpus;                              126         return cpus;
139 }                                                 127 }
140                                                   128 
141 static inline unsigned long __dl_bw_capacity(c    129 static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
142 {                                                 130 {
143         unsigned long cap = 0;                    131         unsigned long cap = 0;
144         int i;                                    132         int i;
145                                                   133 
146         for_each_cpu_and(i, mask, cpu_active_m    134         for_each_cpu_and(i, mask, cpu_active_mask)
147                 cap += arch_scale_cpu_capacity !! 135                 cap += capacity_orig_of(i);
148                                                   136 
149         return cap;                               137         return cap;
150 }                                                 138 }
151                                                   139 
152 /*                                                140 /*
153  * XXX Fix: If 'rq->rd == def_root_domain' per    141  * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154  * of the CPU the task is running on rather rd    142  * of the CPU the task is running on rather rd's \Sum CPU capacity.
155  */                                               143  */
156 static inline unsigned long dl_bw_capacity(int    144 static inline unsigned long dl_bw_capacity(int i)
157 {                                                 145 {
158         if (!sched_asym_cpucap_active() &&        146         if (!sched_asym_cpucap_active() &&
159             arch_scale_cpu_capacity(i) == SCHE !! 147             capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
160                 return dl_bw_cpus(i) << SCHED_    148                 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161         } else {                                  149         } else {
162                 RCU_LOCKDEP_WARN(!rcu_read_loc    150                 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
163                                  "sched RCU mu    151                                  "sched RCU must be held");
164                                                   152 
165                 return __dl_bw_capacity(cpu_rq    153                 return __dl_bw_capacity(cpu_rq(i)->rd->span);
166         }                                         154         }
167 }                                                 155 }
168                                                   156 
169 static inline bool dl_bw_visited(int cpu, u64     157 static inline bool dl_bw_visited(int cpu, u64 gen)
170 {                                                 158 {
171         struct root_domain *rd = cpu_rq(cpu)->    159         struct root_domain *rd = cpu_rq(cpu)->rd;
172                                                   160 
173         if (rd->visit_gen == gen)                 161         if (rd->visit_gen == gen)
174                 return true;                      162                 return true;
175                                                   163 
176         rd->visit_gen = gen;                      164         rd->visit_gen = gen;
177         return false;                             165         return false;
178 }                                                 166 }
179                                                   167 
180 static inline                                     168 static inline
181 void __dl_update(struct dl_bw *dl_b, s64 bw)      169 void __dl_update(struct dl_bw *dl_b, s64 bw)
182 {                                                 170 {
183         struct root_domain *rd = container_of(    171         struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
184         int i;                                    172         int i;
185                                                   173 
186         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_    174         RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
187                          "sched RCU must be he    175                          "sched RCU must be held");
188         for_each_cpu_and(i, rd->span, cpu_acti    176         for_each_cpu_and(i, rd->span, cpu_active_mask) {
189                 struct rq *rq = cpu_rq(i);        177                 struct rq *rq = cpu_rq(i);
190                                                   178 
191                 rq->dl.extra_bw += bw;            179                 rq->dl.extra_bw += bw;
192         }                                         180         }
193 }                                                 181 }
194 #else                                             182 #else
195 static inline struct dl_bw *dl_bw_of(int i)       183 static inline struct dl_bw *dl_bw_of(int i)
196 {                                                 184 {
197         return &cpu_rq(i)->dl.dl_bw;              185         return &cpu_rq(i)->dl.dl_bw;
198 }                                                 186 }
199                                                   187 
200 static inline int dl_bw_cpus(int i)               188 static inline int dl_bw_cpus(int i)
201 {                                                 189 {
202         return 1;                                 190         return 1;
203 }                                                 191 }
204                                                   192 
205 static inline unsigned long dl_bw_capacity(int    193 static inline unsigned long dl_bw_capacity(int i)
206 {                                                 194 {
207         return SCHED_CAPACITY_SCALE;              195         return SCHED_CAPACITY_SCALE;
208 }                                                 196 }
209                                                   197 
210 static inline bool dl_bw_visited(int cpu, u64     198 static inline bool dl_bw_visited(int cpu, u64 gen)
211 {                                                 199 {
212         return false;                             200         return false;
213 }                                                 201 }
214                                                   202 
215 static inline                                     203 static inline
216 void __dl_update(struct dl_bw *dl_b, s64 bw)      204 void __dl_update(struct dl_bw *dl_b, s64 bw)
217 {                                                 205 {
218         struct dl_rq *dl = container_of(dl_b,     206         struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
219                                                   207 
220         dl->extra_bw += bw;                       208         dl->extra_bw += bw;
221 }                                                 209 }
222 #endif                                            210 #endif
223                                                   211 
224 static inline                                     212 static inline
225 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw,     213 void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
226 {                                                 214 {
227         dl_b->total_bw -= tsk_bw;                 215         dl_b->total_bw -= tsk_bw;
228         __dl_update(dl_b, (s32)tsk_bw / cpus);    216         __dl_update(dl_b, (s32)tsk_bw / cpus);
229 }                                                 217 }
230                                                   218 
231 static inline                                     219 static inline
232 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw,     220 void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
233 {                                                 221 {
234         dl_b->total_bw += tsk_bw;                 222         dl_b->total_bw += tsk_bw;
235         __dl_update(dl_b, -((s32)tsk_bw / cpus    223         __dl_update(dl_b, -((s32)tsk_bw / cpus));
236 }                                                 224 }
237                                                   225 
238 static inline bool                                226 static inline bool
239 __dl_overflow(struct dl_bw *dl_b, unsigned lon    227 __dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
240 {                                                 228 {
241         return dl_b->bw != -1 &&                  229         return dl_b->bw != -1 &&
242                cap_scale(dl_b->bw, cap) < dl_b    230                cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
243 }                                                 231 }
244                                                   232 
245 static inline                                     233 static inline
246 void __add_running_bw(u64 dl_bw, struct dl_rq     234 void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
247 {                                                 235 {
248         u64 old = dl_rq->running_bw;              236         u64 old = dl_rq->running_bw;
249                                                   237 
250         lockdep_assert_rq_held(rq_of_dl_rq(dl_    238         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
251         dl_rq->running_bw += dl_bw;               239         dl_rq->running_bw += dl_bw;
252         SCHED_WARN_ON(dl_rq->running_bw < old)    240         SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
253         SCHED_WARN_ON(dl_rq->running_bw > dl_r    241         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
254         /* kick cpufreq (see the comment in ke    242         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
255         cpufreq_update_util(rq_of_dl_rq(dl_rq)    243         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
256 }                                                 244 }
257                                                   245 
258 static inline                                     246 static inline
259 void __sub_running_bw(u64 dl_bw, struct dl_rq     247 void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
260 {                                                 248 {
261         u64 old = dl_rq->running_bw;              249         u64 old = dl_rq->running_bw;
262                                                   250 
263         lockdep_assert_rq_held(rq_of_dl_rq(dl_    251         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
264         dl_rq->running_bw -= dl_bw;               252         dl_rq->running_bw -= dl_bw;
265         SCHED_WARN_ON(dl_rq->running_bw > old)    253         SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266         if (dl_rq->running_bw > old)              254         if (dl_rq->running_bw > old)
267                 dl_rq->running_bw = 0;            255                 dl_rq->running_bw = 0;
268         /* kick cpufreq (see the comment in ke    256         /* kick cpufreq (see the comment in kernel/sched/sched.h). */
269         cpufreq_update_util(rq_of_dl_rq(dl_rq)    257         cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
270 }                                                 258 }
271                                                   259 
272 static inline                                     260 static inline
273 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_r    261 void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
274 {                                                 262 {
275         u64 old = dl_rq->this_bw;                 263         u64 old = dl_rq->this_bw;
276                                                   264 
277         lockdep_assert_rq_held(rq_of_dl_rq(dl_    265         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
278         dl_rq->this_bw += dl_bw;                  266         dl_rq->this_bw += dl_bw;
279         SCHED_WARN_ON(dl_rq->this_bw < old); /    267         SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
280 }                                                 268 }
281                                                   269 
282 static inline                                     270 static inline
283 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_r    271 void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
284 {                                                 272 {
285         u64 old = dl_rq->this_bw;                 273         u64 old = dl_rq->this_bw;
286                                                   274 
287         lockdep_assert_rq_held(rq_of_dl_rq(dl_    275         lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
288         dl_rq->this_bw -= dl_bw;                  276         dl_rq->this_bw -= dl_bw;
289         SCHED_WARN_ON(dl_rq->this_bw > old); /    277         SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290         if (dl_rq->this_bw > old)                 278         if (dl_rq->this_bw > old)
291                 dl_rq->this_bw = 0;               279                 dl_rq->this_bw = 0;
292         SCHED_WARN_ON(dl_rq->running_bw > dl_r    280         SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
293 }                                                 281 }
294                                                   282 
295 static inline                                     283 static inline
296 void add_rq_bw(struct sched_dl_entity *dl_se,     284 void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
297 {                                                 285 {
298         if (!dl_entity_is_special(dl_se))         286         if (!dl_entity_is_special(dl_se))
299                 __add_rq_bw(dl_se->dl_bw, dl_r    287                 __add_rq_bw(dl_se->dl_bw, dl_rq);
300 }                                                 288 }
301                                                   289 
302 static inline                                     290 static inline
303 void sub_rq_bw(struct sched_dl_entity *dl_se,     291 void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304 {                                                 292 {
305         if (!dl_entity_is_special(dl_se))         293         if (!dl_entity_is_special(dl_se))
306                 __sub_rq_bw(dl_se->dl_bw, dl_r    294                 __sub_rq_bw(dl_se->dl_bw, dl_rq);
307 }                                                 295 }
308                                                   296 
309 static inline                                     297 static inline
310 void add_running_bw(struct sched_dl_entity *dl    298 void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311 {                                                 299 {
312         if (!dl_entity_is_special(dl_se))         300         if (!dl_entity_is_special(dl_se))
313                 __add_running_bw(dl_se->dl_bw,    301                 __add_running_bw(dl_se->dl_bw, dl_rq);
314 }                                                 302 }
315                                                   303 
316 static inline                                     304 static inline
317 void sub_running_bw(struct sched_dl_entity *dl    305 void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
318 {                                                 306 {
319         if (!dl_entity_is_special(dl_se))         307         if (!dl_entity_is_special(dl_se))
320                 __sub_running_bw(dl_se->dl_bw,    308                 __sub_running_bw(dl_se->dl_bw, dl_rq);
321 }                                                 309 }
322                                                   310 
323 static void dl_rq_change_utilization(struct rq !! 311 static void dl_change_utilization(struct task_struct *p, u64 new_bw)
324 {                                                 312 {
325         if (dl_se->dl_non_contending) {        !! 313         struct rq *rq;
326                 sub_running_bw(dl_se, &rq->dl) !! 314 
327                 dl_se->dl_non_contending = 0;  !! 315         WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
                                                   >> 316 
                                                   >> 317         if (task_on_rq_queued(p))
                                                   >> 318                 return;
328                                                   319 
                                                   >> 320         rq = task_rq(p);
                                                   >> 321         if (p->dl.dl_non_contending) {
                                                   >> 322                 sub_running_bw(&p->dl, &rq->dl);
                                                   >> 323                 p->dl.dl_non_contending = 0;
329                 /*                                324                 /*
330                  * If the timer handler is cur    325                  * If the timer handler is currently running and the
331                  * timer cannot be canceled, i    326                  * timer cannot be canceled, inactive_task_timer()
332                  * will see that dl_not_conten    327                  * will see that dl_not_contending is not set, and
333                  * will not touch the rq's act    328                  * will not touch the rq's active utilization,
334                  * so we are still safe.          329                  * so we are still safe.
335                  */                               330                  */
336                 if (hrtimer_try_to_cancel(&dl_ !! 331                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
337                         if (!dl_server(dl_se)) !! 332                         put_task_struct(p);
338                                 put_task_struc << 
339                 }                              << 
340         }                                         333         }
341         __sub_rq_bw(dl_se->dl_bw, &rq->dl);    !! 334         __sub_rq_bw(p->dl.dl_bw, &rq->dl);
342         __add_rq_bw(new_bw, &rq->dl);             335         __add_rq_bw(new_bw, &rq->dl);
343 }                                                 336 }
344                                                   337 
345 static void dl_change_utilization(struct task_ << 
346 {                                              << 
347         WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_ << 
348                                                << 
349         if (task_on_rq_queued(p))              << 
350                 return;                        << 
351                                                << 
352         dl_rq_change_utilization(task_rq(p), & << 
353 }                                              << 
354                                                << 
355 static void __dl_clear_params(struct sched_dl_ << 
356                                                << 
357 /*                                                338 /*
358  * The utilization of a task cannot be immedia    339  * The utilization of a task cannot be immediately removed from
359  * the rq active utilization (running_bw) when    340  * the rq active utilization (running_bw) when the task blocks.
360  * Instead, we have to wait for the so called     341  * Instead, we have to wait for the so called "0-lag time".
361  *                                                342  *
362  * If a task blocks before the "0-lag time", a    343  * If a task blocks before the "0-lag time", a timer (the inactive
363  * timer) is armed, and running_bw is decrease    344  * timer) is armed, and running_bw is decreased when the timer
364  * fires.                                         345  * fires.
365  *                                                346  *
366  * If the task wakes up again before the inact    347  * If the task wakes up again before the inactive timer fires,
367  * the timer is canceled, whereas if the task     348  * the timer is canceled, whereas if the task wakes up after the
368  * inactive timer fired (and running_bw has be    349  * inactive timer fired (and running_bw has been decreased) the
369  * task's utilization has to be added to runni    350  * task's utilization has to be added to running_bw again.
370  * A flag in the deadline scheduling entity (d    351  * A flag in the deadline scheduling entity (dl_non_contending)
371  * is used to avoid race conditions between th    352  * is used to avoid race conditions between the inactive timer handler
372  * and task wakeups.                              353  * and task wakeups.
373  *                                                354  *
374  * The following diagram shows how running_bw     355  * The following diagram shows how running_bw is updated. A task is
375  * "ACTIVE" when its utilization contributes t    356  * "ACTIVE" when its utilization contributes to running_bw; an
376  * "ACTIVE contending" task is in the TASK_RUN    357  * "ACTIVE contending" task is in the TASK_RUNNING state, while an
377  * "ACTIVE non contending" task is a blocked t    358  * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
378  * has not passed yet. An "INACTIVE" task is a    359  * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
379  * time already passed, which does not contrib    360  * time already passed, which does not contribute to running_bw anymore.
380  *                              +-------------    361  *                              +------------------+
381  *             wakeup           |    ACTIVE       362  *             wakeup           |    ACTIVE        |
382  *          +------------------>+   contending    363  *          +------------------>+   contending     |
383  *          | add_running_bw    |                 364  *          | add_running_bw    |                  |
384  *          |                   +----+------+-    365  *          |                   +----+------+------+
385  *          |                        |      ^     366  *          |                        |      ^
386  *          |                dequeue |      |     367  *          |                dequeue |      |
387  * +--------+-------+                |      |     368  * +--------+-------+                |      |
388  * |                |   t >= 0-lag   |      |     369  * |                |   t >= 0-lag   |      | wakeup
389  * |    INACTIVE    |<---------------+      |     370  * |    INACTIVE    |<---------------+      |
390  * |                | sub_running_bw |      |     371  * |                | sub_running_bw |      |
391  * +--------+-------+                |      |     372  * +--------+-------+                |      |
392  *          ^                        |      |     373  *          ^                        |      |
393  *          |              t < 0-lag |      |     374  *          |              t < 0-lag |      |
394  *          |                        |      |     375  *          |                        |      |
395  *          |                        V      |     376  *          |                        V      |
396  *          |                   +----+------+-    377  *          |                   +----+------+------+
397  *          | sub_running_bw    |    ACTIVE       378  *          | sub_running_bw    |    ACTIVE        |
398  *          +-------------------+                 379  *          +-------------------+                  |
399  *            inactive timer    |  non contend    380  *            inactive timer    |  non contending  |
400  *            fired             +-------------    381  *            fired             +------------------+
401  *                                                382  *
402  * The task_non_contending() function is invok    383  * The task_non_contending() function is invoked when a task
403  * blocks, and checks if the 0-lag time alread    384  * blocks, and checks if the 0-lag time already passed or
404  * not (in the first case, it directly updates    385  * not (in the first case, it directly updates running_bw;
405  * in the second case, it arms the inactive ti    386  * in the second case, it arms the inactive timer).
406  *                                                387  *
407  * The task_contending() function is invoked w    388  * The task_contending() function is invoked when a task wakes
408  * up, and checks if the task is still in the     389  * up, and checks if the task is still in the "ACTIVE non contending"
409  * state or not (in the second case, it update    390  * state or not (in the second case, it updates running_bw).
410  */                                               391  */
411 static void task_non_contending(struct sched_d !! 392 static void task_non_contending(struct task_struct *p)
412 {                                                 393 {
                                                   >> 394         struct sched_dl_entity *dl_se = &p->dl;
413         struct hrtimer *timer = &dl_se->inacti    395         struct hrtimer *timer = &dl_se->inactive_timer;
414         struct rq *rq = rq_of_dl_se(dl_se);    !! 396         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
415         struct dl_rq *dl_rq = &rq->dl;         !! 397         struct rq *rq = rq_of_dl_rq(dl_rq);
416         s64 zerolag_time;                         398         s64 zerolag_time;
417                                                   399 
418         /*                                        400         /*
419          * If this is a non-deadline task that    401          * If this is a non-deadline task that has been boosted,
420          * do nothing                             402          * do nothing
421          */                                       403          */
422         if (dl_se->dl_runtime == 0)               404         if (dl_se->dl_runtime == 0)
423                 return;                           405                 return;
424                                                   406 
425         if (dl_entity_is_special(dl_se))          407         if (dl_entity_is_special(dl_se))
426                 return;                           408                 return;
427                                                   409 
428         WARN_ON(dl_se->dl_non_contending);        410         WARN_ON(dl_se->dl_non_contending);
429                                                   411 
430         zerolag_time = dl_se->deadline -          412         zerolag_time = dl_se->deadline -
431                  div64_long((dl_se->runtime *     413                  div64_long((dl_se->runtime * dl_se->dl_period),
432                         dl_se->dl_runtime);       414                         dl_se->dl_runtime);
433                                                   415 
434         /*                                        416         /*
435          * Using relative times instead of the    417          * Using relative times instead of the absolute "0-lag time"
436          * allows to simplify the code            418          * allows to simplify the code
437          */                                       419          */
438         zerolag_time -= rq_clock(rq);             420         zerolag_time -= rq_clock(rq);
439                                                   421 
440         /*                                        422         /*
441          * If the "0-lag time" already passed,    423          * If the "0-lag time" already passed, decrease the active
442          * utilization now, instead of startin    424          * utilization now, instead of starting a timer
443          */                                       425          */
444         if ((zerolag_time < 0) || hrtimer_acti    426         if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
445                 if (dl_server(dl_se)) {        !! 427                 if (dl_task(p))
446                         sub_running_bw(dl_se,     428                         sub_running_bw(dl_se, dl_rq);
447                 } else {                       !! 429                 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
448                         struct task_struct *p  !! 430                         struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
449                                                << 
450                         if (dl_task(p))        << 
451                                 sub_running_bw << 
452                                                   431 
453                         if (!dl_task(p) || REA !! 432                         if (READ_ONCE(p->__state) == TASK_DEAD)
454                                 struct dl_bw * !! 433                                 sub_rq_bw(&p->dl, &rq->dl);
455                                                !! 434                         raw_spin_lock(&dl_b->lock);
456                                 if (READ_ONCE( !! 435                         __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
457                                         sub_rq !! 436                         raw_spin_unlock(&dl_b->lock);
458                                 raw_spin_lock( !! 437                         __dl_clear_params(p);
459                                 __dl_sub(dl_b, << 
460                                 raw_spin_unloc << 
461                                 __dl_clear_par << 
462                         }                      << 
463                 }                                 438                 }
464                                                   439 
465                 return;                           440                 return;
466         }                                         441         }
467                                                   442 
468         dl_se->dl_non_contending = 1;             443         dl_se->dl_non_contending = 1;
469         if (!dl_server(dl_se))                 !! 444         get_task_struct(p);
470                 get_task_struct(dl_task_of(dl_ << 
471                                                << 
472         hrtimer_start(timer, ns_to_ktime(zerol    445         hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
473 }                                                 446 }
474                                                   447 
475 static void task_contending(struct sched_dl_en    448 static void task_contending(struct sched_dl_entity *dl_se, int flags)
476 {                                                 449 {
477         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    450         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
478                                                   451 
479         /*                                        452         /*
480          * If this is a non-deadline task that    453          * If this is a non-deadline task that has been boosted,
481          * do nothing                             454          * do nothing
482          */                                       455          */
483         if (dl_se->dl_runtime == 0)               456         if (dl_se->dl_runtime == 0)
484                 return;                           457                 return;
485                                                   458 
486         if (flags & ENQUEUE_MIGRATED)             459         if (flags & ENQUEUE_MIGRATED)
487                 add_rq_bw(dl_se, dl_rq);          460                 add_rq_bw(dl_se, dl_rq);
488                                                   461 
489         if (dl_se->dl_non_contending) {           462         if (dl_se->dl_non_contending) {
490                 dl_se->dl_non_contending = 0;     463                 dl_se->dl_non_contending = 0;
491                 /*                                464                 /*
492                  * If the timer handler is cur    465                  * If the timer handler is currently running and the
493                  * timer cannot be canceled, i    466                  * timer cannot be canceled, inactive_task_timer()
494                  * will see that dl_not_conten    467                  * will see that dl_not_contending is not set, and
495                  * will not touch the rq's act    468                  * will not touch the rq's active utilization,
496                  * so we are still safe.          469                  * so we are still safe.
497                  */                               470                  */
498                 if (hrtimer_try_to_cancel(&dl_ !! 471                 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
499                         if (!dl_server(dl_se)) !! 472                         put_task_struct(dl_task_of(dl_se));
500                                 put_task_struc << 
501                 }                              << 
502         } else {                                  473         } else {
503                 /*                                474                 /*
504                  * Since "dl_non_contending" i    475                  * Since "dl_non_contending" is not set, the
505                  * task's utilization has alre    476                  * task's utilization has already been removed from
506                  * active utilization (either     477                  * active utilization (either when the task blocked,
507                  * when the "inactive timer" f    478                  * when the "inactive timer" fired).
508                  * So, add it back.               479                  * So, add it back.
509                  */                               480                  */
510                 add_running_bw(dl_se, dl_rq);     481                 add_running_bw(dl_se, dl_rq);
511         }                                         482         }
512 }                                                 483 }
513                                                   484 
514 static inline int is_leftmost(struct sched_dl_ !! 485 static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
515 {                                                 486 {
                                                   >> 487         struct sched_dl_entity *dl_se = &p->dl;
                                                   >> 488 
516         return rb_first_cached(&dl_rq->root) =    489         return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
517 }                                                 490 }
518                                                   491 
519 static void init_dl_rq_bw_ratio(struct dl_rq *    492 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
520                                                   493 
                                                   >> 494 void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
                                                   >> 495 {
                                                   >> 496         raw_spin_lock_init(&dl_b->dl_runtime_lock);
                                                   >> 497         dl_b->dl_period = period;
                                                   >> 498         dl_b->dl_runtime = runtime;
                                                   >> 499 }
                                                   >> 500 
521 void init_dl_bw(struct dl_bw *dl_b)               501 void init_dl_bw(struct dl_bw *dl_b)
522 {                                                 502 {
523         raw_spin_lock_init(&dl_b->lock);          503         raw_spin_lock_init(&dl_b->lock);
524         if (global_rt_runtime() == RUNTIME_INF    504         if (global_rt_runtime() == RUNTIME_INF)
525                 dl_b->bw = -1;                    505                 dl_b->bw = -1;
526         else                                      506         else
527                 dl_b->bw = to_ratio(global_rt_    507                 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
528         dl_b->total_bw = 0;                       508         dl_b->total_bw = 0;
529 }                                                 509 }
530                                                   510 
531 void init_dl_rq(struct dl_rq *dl_rq)              511 void init_dl_rq(struct dl_rq *dl_rq)
532 {                                                 512 {
533         dl_rq->root = RB_ROOT_CACHED;             513         dl_rq->root = RB_ROOT_CACHED;
534                                                   514 
535 #ifdef CONFIG_SMP                                 515 #ifdef CONFIG_SMP
536         /* zero means no -deadline tasks */       516         /* zero means no -deadline tasks */
537         dl_rq->earliest_dl.curr = dl_rq->earli    517         dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
538                                                   518 
                                                   >> 519         dl_rq->dl_nr_migratory = 0;
539         dl_rq->overloaded = 0;                    520         dl_rq->overloaded = 0;
540         dl_rq->pushable_dl_tasks_root = RB_ROO    521         dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
541 #else                                             522 #else
542         init_dl_bw(&dl_rq->dl_bw);                523         init_dl_bw(&dl_rq->dl_bw);
543 #endif                                            524 #endif
544                                                   525 
545         dl_rq->running_bw = 0;                    526         dl_rq->running_bw = 0;
546         dl_rq->this_bw = 0;                       527         dl_rq->this_bw = 0;
547         init_dl_rq_bw_ratio(dl_rq);               528         init_dl_rq_bw_ratio(dl_rq);
548 }                                                 529 }
549                                                   530 
550 #ifdef CONFIG_SMP                                 531 #ifdef CONFIG_SMP
551                                                   532 
552 static inline int dl_overloaded(struct rq *rq)    533 static inline int dl_overloaded(struct rq *rq)
553 {                                                 534 {
554         return atomic_read(&rq->rd->dlo_count)    535         return atomic_read(&rq->rd->dlo_count);
555 }                                                 536 }
556                                                   537 
557 static inline void dl_set_overload(struct rq *    538 static inline void dl_set_overload(struct rq *rq)
558 {                                                 539 {
559         if (!rq->online)                          540         if (!rq->online)
560                 return;                           541                 return;
561                                                   542 
562         cpumask_set_cpu(rq->cpu, rq->rd->dlo_m    543         cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
563         /*                                        544         /*
564          * Must be visible before the overload    545          * Must be visible before the overload count is
565          * set (as in sched_rt.c).                546          * set (as in sched_rt.c).
566          *                                        547          *
567          * Matched by the barrier in pull_dl_t    548          * Matched by the barrier in pull_dl_task().
568          */                                       549          */
569         smp_wmb();                                550         smp_wmb();
570         atomic_inc(&rq->rd->dlo_count);           551         atomic_inc(&rq->rd->dlo_count);
571 }                                                 552 }
572                                                   553 
573 static inline void dl_clear_overload(struct rq    554 static inline void dl_clear_overload(struct rq *rq)
574 {                                                 555 {
575         if (!rq->online)                          556         if (!rq->online)
576                 return;                           557                 return;
577                                                   558 
578         atomic_dec(&rq->rd->dlo_count);           559         atomic_dec(&rq->rd->dlo_count);
579         cpumask_clear_cpu(rq->cpu, rq->rd->dlo    560         cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
580 }                                                 561 }
581                                                   562 
                                                   >> 563 static void update_dl_migration(struct dl_rq *dl_rq)
                                                   >> 564 {
                                                   >> 565         if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
                                                   >> 566                 if (!dl_rq->overloaded) {
                                                   >> 567                         dl_set_overload(rq_of_dl_rq(dl_rq));
                                                   >> 568                         dl_rq->overloaded = 1;
                                                   >> 569                 }
                                                   >> 570         } else if (dl_rq->overloaded) {
                                                   >> 571                 dl_clear_overload(rq_of_dl_rq(dl_rq));
                                                   >> 572                 dl_rq->overloaded = 0;
                                                   >> 573         }
                                                   >> 574 }
                                                   >> 575 
                                                   >> 576 static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
                                                   >> 577 {
                                                   >> 578         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 579 
                                                   >> 580         if (p->nr_cpus_allowed > 1)
                                                   >> 581                 dl_rq->dl_nr_migratory++;
                                                   >> 582 
                                                   >> 583         update_dl_migration(dl_rq);
                                                   >> 584 }
                                                   >> 585 
                                                   >> 586 static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
                                                   >> 587 {
                                                   >> 588         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 589 
                                                   >> 590         if (p->nr_cpus_allowed > 1)
                                                   >> 591                 dl_rq->dl_nr_migratory--;
                                                   >> 592 
                                                   >> 593         update_dl_migration(dl_rq);
                                                   >> 594 }
                                                   >> 595 
582 #define __node_2_pdl(node) \                      596 #define __node_2_pdl(node) \
583         rb_entry((node), struct task_struct, p    597         rb_entry((node), struct task_struct, pushable_dl_tasks)
584                                                   598 
585 static inline bool __pushable_less(struct rb_n    599 static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
586 {                                                 600 {
587         return dl_entity_preempt(&__node_2_pdl    601         return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
588 }                                                 602 }
589                                                   603 
590 static inline int has_pushable_dl_tasks(struct << 
591 {                                              << 
592         return !RB_EMPTY_ROOT(&rq->dl.pushable << 
593 }                                              << 
594                                                << 
595 /*                                                604 /*
596  * The list of pushable -deadline task is not     605  * The list of pushable -deadline task is not a plist, like in
597  * sched_rt.c, it is an rb-tree with tasks ord    606  * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
598  */                                               607  */
599 static void enqueue_pushable_dl_task(struct rq    608 static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
600 {                                                 609 {
601         struct rb_node *leftmost;                 610         struct rb_node *leftmost;
602                                                   611 
603         WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushab    612         WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
604                                                   613 
605         leftmost = rb_add_cached(&p->pushable_    614         leftmost = rb_add_cached(&p->pushable_dl_tasks,
606                                  &rq->dl.pusha    615                                  &rq->dl.pushable_dl_tasks_root,
607                                  __pushable_le    616                                  __pushable_less);
608         if (leftmost)                             617         if (leftmost)
609                 rq->dl.earliest_dl.next = p->d    618                 rq->dl.earliest_dl.next = p->dl.deadline;
610                                                << 
611         if (!rq->dl.overloaded) {              << 
612                 dl_set_overload(rq);           << 
613                 rq->dl.overloaded = 1;         << 
614         }                                      << 
615 }                                                 619 }
616                                                   620 
617 static void dequeue_pushable_dl_task(struct rq    621 static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
618 {                                                 622 {
619         struct dl_rq *dl_rq = &rq->dl;            623         struct dl_rq *dl_rq = &rq->dl;
620         struct rb_root_cached *root = &dl_rq->    624         struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
621         struct rb_node *leftmost;                 625         struct rb_node *leftmost;
622                                                   626 
623         if (RB_EMPTY_NODE(&p->pushable_dl_task    627         if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
624                 return;                           628                 return;
625                                                   629 
626         leftmost = rb_erase_cached(&p->pushabl    630         leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
627         if (leftmost)                             631         if (leftmost)
628                 dl_rq->earliest_dl.next = __no    632                 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
629                                                   633 
630         RB_CLEAR_NODE(&p->pushable_dl_tasks);     634         RB_CLEAR_NODE(&p->pushable_dl_tasks);
                                                   >> 635 }
631                                                   636 
632         if (!has_pushable_dl_tasks(rq) && rq-> !! 637 static inline int has_pushable_dl_tasks(struct rq *rq)
633                 dl_clear_overload(rq);         !! 638 {
634                 rq->dl.overloaded = 0;         !! 639         return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
635         }                                      << 
636 }                                                 640 }
637                                                   641 
638 static int push_dl_task(struct rq *rq);           642 static int push_dl_task(struct rq *rq);
639                                                   643 
640 static inline bool need_pull_dl_task(struct rq    644 static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
641 {                                                 645 {
642         return rq->online && dl_task(prev);       646         return rq->online && dl_task(prev);
643 }                                                 647 }
644                                                   648 
645 static DEFINE_PER_CPU(struct balance_callback,    649 static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
646 static DEFINE_PER_CPU(struct balance_callback,    650 static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
647                                                   651 
648 static void push_dl_tasks(struct rq *);           652 static void push_dl_tasks(struct rq *);
649 static void pull_dl_task(struct rq *);            653 static void pull_dl_task(struct rq *);
650                                                   654 
651 static inline void deadline_queue_push_tasks(s    655 static inline void deadline_queue_push_tasks(struct rq *rq)
652 {                                                 656 {
653         if (!has_pushable_dl_tasks(rq))           657         if (!has_pushable_dl_tasks(rq))
654                 return;                           658                 return;
655                                                   659 
656         queue_balance_callback(rq, &per_cpu(dl    660         queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
657 }                                                 661 }
658                                                   662 
659 static inline void deadline_queue_pull_task(st    663 static inline void deadline_queue_pull_task(struct rq *rq)
660 {                                                 664 {
661         queue_balance_callback(rq, &per_cpu(dl    665         queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
662 }                                                 666 }
663                                                   667 
664 static struct rq *find_lock_later_rq(struct ta    668 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
665                                                   669 
666 static struct rq *dl_task_offline_migration(st    670 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
667 {                                                 671 {
668         struct rq *later_rq = NULL;               672         struct rq *later_rq = NULL;
669         struct dl_bw *dl_b;                       673         struct dl_bw *dl_b;
670                                                   674 
671         later_rq = find_lock_later_rq(p, rq);     675         later_rq = find_lock_later_rq(p, rq);
672         if (!later_rq) {                          676         if (!later_rq) {
673                 int cpu;                          677                 int cpu;
674                                                   678 
675                 /*                                679                 /*
676                  * If we cannot preempt any rq    680                  * If we cannot preempt any rq, fall back to pick any
677                  * online CPU:                    681                  * online CPU:
678                  */                               682                  */
679                 cpu = cpumask_any_and(cpu_acti    683                 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
680                 if (cpu >= nr_cpu_ids) {          684                 if (cpu >= nr_cpu_ids) {
681                         /*                        685                         /*
682                          * Failed to find any     686                          * Failed to find any suitable CPU.
683                          * The task will never    687                          * The task will never come back!
684                          */                       688                          */
685                         WARN_ON_ONCE(dl_bandwi    689                         WARN_ON_ONCE(dl_bandwidth_enabled());
686                                                   690 
687                         /*                        691                         /*
688                          * If admission contro    692                          * If admission control is disabled we
689                          * try a little harder    693                          * try a little harder to let the task
690                          * run.                   694                          * run.
691                          */                       695                          */
692                         cpu = cpumask_any(cpu_    696                         cpu = cpumask_any(cpu_active_mask);
693                 }                                 697                 }
694                 later_rq = cpu_rq(cpu);           698                 later_rq = cpu_rq(cpu);
695                 double_lock_balance(rq, later_    699                 double_lock_balance(rq, later_rq);
696         }                                         700         }
697                                                   701 
698         if (p->dl.dl_non_contending || p->dl.d    702         if (p->dl.dl_non_contending || p->dl.dl_throttled) {
699                 /*                                703                 /*
700                  * Inactive timer is armed (or    704                  * Inactive timer is armed (or callback is running, but
701                  * waiting for us to release r    705                  * waiting for us to release rq locks). In any case, when it
702                  * will fire (or continue), it    706                  * will fire (or continue), it will see running_bw of this
703                  * task migrated to later_rq (    707                  * task migrated to later_rq (and correctly handle it).
704                  */                               708                  */
705                 sub_running_bw(&p->dl, &rq->dl    709                 sub_running_bw(&p->dl, &rq->dl);
706                 sub_rq_bw(&p->dl, &rq->dl);       710                 sub_rq_bw(&p->dl, &rq->dl);
707                                                   711 
708                 add_rq_bw(&p->dl, &later_rq->d    712                 add_rq_bw(&p->dl, &later_rq->dl);
709                 add_running_bw(&p->dl, &later_    713                 add_running_bw(&p->dl, &later_rq->dl);
710         } else {                                  714         } else {
711                 sub_rq_bw(&p->dl, &rq->dl);       715                 sub_rq_bw(&p->dl, &rq->dl);
712                 add_rq_bw(&p->dl, &later_rq->d    716                 add_rq_bw(&p->dl, &later_rq->dl);
713         }                                         717         }
714                                                   718 
715         /*                                        719         /*
716          * And we finally need to fix up root_ !! 720          * And we finally need to fixup root_domain(s) bandwidth accounting,
717          * since p is still hanging out in the    721          * since p is still hanging out in the old (now moved to default) root
718          * domain.                                722          * domain.
719          */                                       723          */
720         dl_b = &rq->rd->dl_bw;                    724         dl_b = &rq->rd->dl_bw;
721         raw_spin_lock(&dl_b->lock);               725         raw_spin_lock(&dl_b->lock);
722         __dl_sub(dl_b, p->dl.dl_bw, cpumask_we    726         __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
723         raw_spin_unlock(&dl_b->lock);             727         raw_spin_unlock(&dl_b->lock);
724                                                   728 
725         dl_b = &later_rq->rd->dl_bw;              729         dl_b = &later_rq->rd->dl_bw;
726         raw_spin_lock(&dl_b->lock);               730         raw_spin_lock(&dl_b->lock);
727         __dl_add(dl_b, p->dl.dl_bw, cpumask_we    731         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
728         raw_spin_unlock(&dl_b->lock);             732         raw_spin_unlock(&dl_b->lock);
729                                                   733 
730         set_task_cpu(p, later_rq->cpu);           734         set_task_cpu(p, later_rq->cpu);
731         double_unlock_balance(later_rq, rq);      735         double_unlock_balance(later_rq, rq);
732                                                   736 
733         return later_rq;                          737         return later_rq;
734 }                                                 738 }
735                                                   739 
736 #else                                             740 #else
737                                                   741 
738 static inline                                     742 static inline
739 void enqueue_pushable_dl_task(struct rq *rq, s    743 void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740 {                                                 744 {
741 }                                                 745 }
742                                                   746 
743 static inline                                     747 static inline
744 void dequeue_pushable_dl_task(struct rq *rq, s    748 void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
745 {                                                 749 {
746 }                                                 750 }
747                                                   751 
748 static inline                                     752 static inline
749 void inc_dl_migration(struct sched_dl_entity *    753 void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750 {                                                 754 {
751 }                                                 755 }
752                                                   756 
753 static inline                                     757 static inline
754 void dec_dl_migration(struct sched_dl_entity *    758 void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
755 {                                                 759 {
756 }                                                 760 }
757                                                   761 
758 static inline void deadline_queue_push_tasks(s    762 static inline void deadline_queue_push_tasks(struct rq *rq)
759 {                                                 763 {
760 }                                                 764 }
761                                                   765 
762 static inline void deadline_queue_pull_task(st    766 static inline void deadline_queue_pull_task(struct rq *rq)
763 {                                                 767 {
764 }                                                 768 }
765 #endif /* CONFIG_SMP */                           769 #endif /* CONFIG_SMP */
766                                                   770 
767 static void                                    << 
768 enqueue_dl_entity(struct sched_dl_entity *dl_s << 
769 static void enqueue_task_dl(struct rq *rq, str    771 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
770 static void dequeue_dl_entity(struct sched_dl_ !! 772 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
771 static void wakeup_preempt_dl(struct rq *rq, s !! 773 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
772                                                   774 
773 static inline void replenish_dl_new_period(str    775 static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
774                                             st    776                                             struct rq *rq)
775 {                                                 777 {
776         /* for non-boosted task, pi_of(dl_se)     778         /* for non-boosted task, pi_of(dl_se) == dl_se */
777         dl_se->deadline = rq_clock(rq) + pi_of    779         dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
778         dl_se->runtime = pi_of(dl_se)->dl_runt    780         dl_se->runtime = pi_of(dl_se)->dl_runtime;
779                                                << 
780         /*                                     << 
781          * If it is a deferred reservation, an << 
782          * is not handling an starvation case, << 
783          */                                    << 
784         if (dl_se->dl_defer & !dl_se->dl_defer << 
785                 dl_se->dl_throttled = 1;       << 
786                 dl_se->dl_defer_armed = 1;     << 
787         }                                      << 
788 }                                                 781 }
789                                                   782 
790 /*                                                783 /*
791  * We are being explicitly informed that a new    784  * We are being explicitly informed that a new instance is starting,
792  * and this means that:                           785  * and this means that:
793  *  - the absolute deadline of the entity has     786  *  - the absolute deadline of the entity has to be placed at
794  *    current time + relative deadline;           787  *    current time + relative deadline;
795  *  - the runtime of the entity has to be set     788  *  - the runtime of the entity has to be set to the maximum value.
796  *                                                789  *
797  * The capability of specifying such event is     790  * The capability of specifying such event is useful whenever a -deadline
798  * entity wants to (try to!) synchronize its b    791  * entity wants to (try to!) synchronize its behaviour with the scheduler's
799  * one, and to (try to!) reconcile itself with    792  * one, and to (try to!) reconcile itself with its own scheduling
800  * parameters.                                    793  * parameters.
801  */                                               794  */
802 static inline void setup_new_dl_entity(struct     795 static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
803 {                                                 796 {
804         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    797         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
805         struct rq *rq = rq_of_dl_rq(dl_rq);       798         struct rq *rq = rq_of_dl_rq(dl_rq);
806                                                   799 
807         WARN_ON(is_dl_boosted(dl_se));            800         WARN_ON(is_dl_boosted(dl_se));
808         WARN_ON(dl_time_before(rq_clock(rq), d    801         WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
809                                                   802 
810         /*                                        803         /*
811          * We are racing with the deadline tim    804          * We are racing with the deadline timer. So, do nothing because
812          * the deadline timer handler will tak    805          * the deadline timer handler will take care of properly recharging
813          * the runtime and postponing the dead    806          * the runtime and postponing the deadline
814          */                                       807          */
815         if (dl_se->dl_throttled)                  808         if (dl_se->dl_throttled)
816                 return;                           809                 return;
817                                                   810 
818         /*                                        811         /*
819          * We use the regular wall clock time     812          * We use the regular wall clock time to set deadlines in the
820          * future; in fact, we must consider e    813          * future; in fact, we must consider execution overheads (time
821          * spent on hardirq context, etc.).       814          * spent on hardirq context, etc.).
822          */                                       815          */
823         replenish_dl_new_period(dl_se, rq);       816         replenish_dl_new_period(dl_se, rq);
824 }                                                 817 }
825                                                   818 
826 static int start_dl_timer(struct sched_dl_enti << 
827 static bool dl_entity_overflow(struct sched_dl << 
828                                                << 
829 /*                                                819 /*
830  * Pure Earliest Deadline First (EDF) scheduli    820  * Pure Earliest Deadline First (EDF) scheduling does not deal with the
831  * possibility of a entity lasting more than w    821  * possibility of a entity lasting more than what it declared, and thus
832  * exhausting its runtime.                        822  * exhausting its runtime.
833  *                                                823  *
834  * Here we are interested in making runtime ov    824  * Here we are interested in making runtime overrun possible, but we do
835  * not want a entity which is misbehaving to a    825  * not want a entity which is misbehaving to affect the scheduling of all
836  * other entities.                                826  * other entities.
837  * Therefore, a budgeting strategy called Cons    827  * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
838  * is used, in order to confine each entity wi    828  * is used, in order to confine each entity within its own bandwidth.
839  *                                                829  *
840  * This function deals exactly with that, and     830  * This function deals exactly with that, and ensures that when the runtime
841  * of a entity is replenished, its deadline is    831  * of a entity is replenished, its deadline is also postponed. That ensures
842  * the overrunning entity can't interfere with    832  * the overrunning entity can't interfere with other entity in the system and
843  * can't make them miss their deadlines. Reaso    833  * can't make them miss their deadlines. Reasons why this kind of overruns
844  * could happen are, typically, a entity volun    834  * could happen are, typically, a entity voluntarily trying to overcome its
845  * runtime, or it just underestimated it durin    835  * runtime, or it just underestimated it during sched_setattr().
846  */                                               836  */
847 static void replenish_dl_entity(struct sched_d    837 static void replenish_dl_entity(struct sched_dl_entity *dl_se)
848 {                                                 838 {
849         struct dl_rq *dl_rq = dl_rq_of_se(dl_s    839         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
850         struct rq *rq = rq_of_dl_rq(dl_rq);       840         struct rq *rq = rq_of_dl_rq(dl_rq);
851                                                   841 
852         WARN_ON_ONCE(pi_of(dl_se)->dl_runtime     842         WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
853                                                   843 
854         /*                                        844         /*
855          * This could be the case for a !-dl t    845          * This could be the case for a !-dl task that is boosted.
856          * Just go with full inherited paramet    846          * Just go with full inherited parameters.
857          *                                     << 
858          * Or, it could be the case of a defer << 
859          * was not able to consume its runtime << 
860          * reached this point with current u > << 
861          *                                     << 
862          * In both cases, set a new period.    << 
863          */                                       847          */
864         if (dl_se->dl_deadline == 0 ||         !! 848         if (dl_se->dl_deadline == 0)
865             (dl_se->dl_defer_armed && dl_entit !! 849                 replenish_dl_new_period(dl_se, rq);
866                 dl_se->deadline = rq_clock(rq) << 
867                 dl_se->runtime = pi_of(dl_se)- << 
868         }                                      << 
869                                                   850 
870         if (dl_se->dl_yielded && dl_se->runtim    851         if (dl_se->dl_yielded && dl_se->runtime > 0)
871                 dl_se->runtime = 0;               852                 dl_se->runtime = 0;
872                                                   853 
873         /*                                        854         /*
874          * We keep moving the deadline away un    855          * We keep moving the deadline away until we get some
875          * available runtime for the entity. T    856          * available runtime for the entity. This ensures correct
876          * handling of situations where the ru    857          * handling of situations where the runtime overrun is
877          * arbitrary large.                       858          * arbitrary large.
878          */                                       859          */
879         while (dl_se->runtime <= 0) {             860         while (dl_se->runtime <= 0) {
880                 dl_se->deadline += pi_of(dl_se    861                 dl_se->deadline += pi_of(dl_se)->dl_period;
881                 dl_se->runtime += pi_of(dl_se)    862                 dl_se->runtime += pi_of(dl_se)->dl_runtime;
882         }                                         863         }
883                                                   864 
884         /*                                        865         /*
885          * At this point, the deadline really     866          * At this point, the deadline really should be "in
886          * the future" with respect to rq->clo    867          * the future" with respect to rq->clock. If it's
887          * not, we are, for some reason, laggi    868          * not, we are, for some reason, lagging too much!
888          * Anyway, after having warn userspace    869          * Anyway, after having warn userspace abut that,
889          * we still try to keep the things run    870          * we still try to keep the things running by
890          * resetting the deadline and the budg    871          * resetting the deadline and the budget of the
891          * entity.                                872          * entity.
892          */                                       873          */
893         if (dl_time_before(dl_se->deadline, rq    874         if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
894                 printk_deferred_once("sched: D    875                 printk_deferred_once("sched: DL replenish lagged too much\n");
895                 replenish_dl_new_period(dl_se,    876                 replenish_dl_new_period(dl_se, rq);
896         }                                         877         }
897                                                   878 
898         if (dl_se->dl_yielded)                    879         if (dl_se->dl_yielded)
899                 dl_se->dl_yielded = 0;            880                 dl_se->dl_yielded = 0;
900         if (dl_se->dl_throttled)                  881         if (dl_se->dl_throttled)
901                 dl_se->dl_throttled = 0;          882                 dl_se->dl_throttled = 0;
902                                                << 
903         /*                                     << 
904          * If this is the replenishment of a d << 
905          * clear the flag and return.          << 
906          */                                    << 
907         if (dl_se->dl_defer_armed) {           << 
908                 dl_se->dl_defer_armed = 0;     << 
909                 return;                        << 
910         }                                      << 
911                                                << 
912         /*                                     << 
913          * A this point, if the deferred serve << 
914          * is in the future, if it is not runn << 
915          * and arm the defer timer.            << 
916          */                                    << 
917         if (dl_se->dl_defer && !dl_se->dl_defe << 
918             dl_time_before(rq_clock(dl_se->rq) << 
919                 if (!is_dl_boosted(dl_se) && d << 
920                                                << 
921                         /*                     << 
922                          * Set dl_se->dl_defer << 
923                          * inform the start_dl << 
924                          * activation.         << 
925                          */                    << 
926                         dl_se->dl_defer_armed  << 
927                         dl_se->dl_throttled =  << 
928                         if (!start_dl_timer(dl << 
929                                 /*             << 
930                                  * If for what << 
931                                  * queued but  << 
932                                  * deferrable  << 
933                                  */            << 
934                                 hrtimer_try_to << 
935                                 dl_se->dl_defe << 
936                                 dl_se->dl_thro << 
937                         }                      << 
938                 }                              << 
939         }                                      << 
940 }                                                 883 }
941                                                   884 
942 /*                                                885 /*
943  * Here we check if --at time t-- an entity (w    886  * Here we check if --at time t-- an entity (which is probably being
944  * [re]activated or, in general, enqueued) can    887  * [re]activated or, in general, enqueued) can use its remaining runtime
945  * and its current deadline _without_ exceedin    888  * and its current deadline _without_ exceeding the bandwidth it is
946  * assigned (function returns true if it can't    889  * assigned (function returns true if it can't). We are in fact applying
947  * one of the CBS rules: when a task wakes up,    890  * one of the CBS rules: when a task wakes up, if the residual runtime
948  * over residual deadline fits within the allo    891  * over residual deadline fits within the allocated bandwidth, then we
949  * can keep the current (absolute) deadline an    892  * can keep the current (absolute) deadline and residual budget without
950  * disrupting the schedulability of the system    893  * disrupting the schedulability of the system. Otherwise, we should
951  * refill the runtime and set the deadline a p    894  * refill the runtime and set the deadline a period in the future,
952  * because keeping the current (absolute) dead    895  * because keeping the current (absolute) deadline of the task would
953  * result in breaking guarantees promised to o    896  * result in breaking guarantees promised to other tasks (refer to
954  * Documentation/scheduler/sched-deadline.rst     897  * Documentation/scheduler/sched-deadline.rst for more information).
955  *                                                898  *
956  * This function returns true if:                 899  * This function returns true if:
957  *                                                900  *
958  *   runtime / (deadline - t) > dl_runtime / d    901  *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
959  *                                                902  *
960  * IOW we can't recycle current parameters.       903  * IOW we can't recycle current parameters.
961  *                                                904  *
962  * Notice that the bandwidth check is done aga    905  * Notice that the bandwidth check is done against the deadline. For
963  * task with deadline equal to period this is     906  * task with deadline equal to period this is the same of using
964  * dl_period instead of dl_deadline in the equ    907  * dl_period instead of dl_deadline in the equation above.
965  */                                               908  */
966 static bool dl_entity_overflow(struct sched_dl    909 static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
967 {                                                 910 {
968         u64 left, right;                          911         u64 left, right;
969                                                   912 
970         /*                                        913         /*
971          * left and right are the two sides of    914          * left and right are the two sides of the equation above,
972          * after a bit of shuffling to use mul    915          * after a bit of shuffling to use multiplications instead
973          * of divisions.                          916          * of divisions.
974          *                                        917          *
975          * Note that none of the time values i    918          * Note that none of the time values involved in the two
976          * multiplications are absolute: dl_de    919          * multiplications are absolute: dl_deadline and dl_runtime
977          * are the relative deadline and the m    920          * are the relative deadline and the maximum runtime of each
978          * instance, runtime is the runtime le    921          * instance, runtime is the runtime left for the last instance
979          * and (deadline - t), since t is rq->    922          * and (deadline - t), since t is rq->clock, is the time left
980          * to the (absolute) deadline. Even if    923          * to the (absolute) deadline. Even if overflowing the u64 type
981          * is very unlikely to occur in both c    924          * is very unlikely to occur in both cases, here we scale down
982          * as we want to avoid that risk at al    925          * as we want to avoid that risk at all. Scaling down by 10
983          * means that we reduce granularity to    926          * means that we reduce granularity to 1us. We are fine with it,
984          * since this is only a true/false che    927          * since this is only a true/false check and, anyway, thinking
985          * of anything below microseconds reso    928          * of anything below microseconds resolution is actually fiction
986          * (but still we want to give the user    929          * (but still we want to give the user that illusion >;).
987          */                                       930          */
988         left = (pi_of(dl_se)->dl_deadline >> D    931         left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
989         right = ((dl_se->deadline - t) >> DL_S    932         right = ((dl_se->deadline - t) >> DL_SCALE) *
990                 (pi_of(dl_se)->dl_runtime >> D    933                 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
991                                                   934 
992         return dl_time_before(right, left);       935         return dl_time_before(right, left);
993 }                                                 936 }
994                                                   937 
995 /*                                                938 /*
996  * Revised wakeup rule [1]: For self-suspendin    939  * Revised wakeup rule [1]: For self-suspending tasks, rather then
997  * re-initializing task's runtime and deadline    940  * re-initializing task's runtime and deadline, the revised wakeup
998  * rule adjusts the task's runtime to avoid th    941  * rule adjusts the task's runtime to avoid the task to overrun its
999  * density.                                       942  * density.
1000  *                                               943  *
1001  * Reasoning: a task may overrun the density     944  * Reasoning: a task may overrun the density if:
1002  *    runtime / (deadline - t) > dl_runtime /    945  *    runtime / (deadline - t) > dl_runtime / dl_deadline
1003  *                                               946  *
1004  * Therefore, runtime can be adjusted to:        947  * Therefore, runtime can be adjusted to:
1005  *     runtime = (dl_runtime / dl_deadline) *    948  *     runtime = (dl_runtime / dl_deadline) * (deadline - t)
1006  *                                               949  *
1007  * In such way that runtime will be equal to     950  * In such way that runtime will be equal to the maximum density
1008  * the task can use without breaking any rule    951  * the task can use without breaking any rule.
1009  *                                               952  *
1010  * [1] Luca Abeni, Giuseppe Lipari, and Juri     953  * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
1011  * bandwidth server revisited. SIGBED Rev. 11    954  * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
1012  */                                              955  */
1013 static void                                      956 static void
1014 update_dl_revised_wakeup(struct sched_dl_enti    957 update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
1015 {                                                958 {
1016         u64 laxity = dl_se->deadline - rq_clo    959         u64 laxity = dl_se->deadline - rq_clock(rq);
1017                                                  960 
1018         /*                                       961         /*
1019          * If the task has deadline < period,    962          * If the task has deadline < period, and the deadline is in the past,
1020          * it should already be throttled bef    963          * it should already be throttled before this check.
1021          *                                       964          *
1022          * See update_dl_entity() comments fo    965          * See update_dl_entity() comments for further details.
1023          */                                      966          */
1024         WARN_ON(dl_time_before(dl_se->deadlin    967         WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
1025                                                  968 
1026         dl_se->runtime = (dl_se->dl_density *    969         dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
1027 }                                                970 }
1028                                                  971 
1029 /*                                               972 /*
1030  * Regarding the deadline, a task with implic    973  * Regarding the deadline, a task with implicit deadline has a relative
1031  * deadline == relative period. A task with c    974  * deadline == relative period. A task with constrained deadline has a
1032  * relative deadline <= relative period.         975  * relative deadline <= relative period.
1033  *                                               976  *
1034  * We support constrained deadline tasks. How    977  * We support constrained deadline tasks. However, there are some restrictions
1035  * applied only for tasks which do not have a    978  * applied only for tasks which do not have an implicit deadline. See
1036  * update_dl_entity() to know more about such    979  * update_dl_entity() to know more about such restrictions.
1037  *                                               980  *
1038  * The dl_is_implicit() returns true if the t    981  * The dl_is_implicit() returns true if the task has an implicit deadline.
1039  */                                              982  */
1040 static inline bool dl_is_implicit(struct sche    983 static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1041 {                                                984 {
1042         return dl_se->dl_deadline == dl_se->d    985         return dl_se->dl_deadline == dl_se->dl_period;
1043 }                                                986 }
1044                                                  987 
1045 /*                                               988 /*
1046  * When a deadline entity is placed in the ru    989  * When a deadline entity is placed in the runqueue, its runtime and deadline
1047  * might need to be updated. This is done by     990  * might need to be updated. This is done by a CBS wake up rule. There are two
1048  * different rules: 1) the original CBS; and     991  * different rules: 1) the original CBS; and 2) the Revisited CBS.
1049  *                                               992  *
1050  * When the task is starting a new period, th    993  * When the task is starting a new period, the Original CBS is used. In this
1051  * case, the runtime is replenished and a new    994  * case, the runtime is replenished and a new absolute deadline is set.
1052  *                                               995  *
1053  * When a task is queued before the begin of     996  * When a task is queued before the begin of the next period, using the
1054  * remaining runtime and deadline could make     997  * remaining runtime and deadline could make the entity to overflow, see
1055  * dl_entity_overflow() to find more about ru    998  * dl_entity_overflow() to find more about runtime overflow. When such case
1056  * is detected, the runtime and deadline need    999  * is detected, the runtime and deadline need to be updated.
1057  *                                               1000  *
1058  * If the task has an implicit deadline, i.e.    1001  * If the task has an implicit deadline, i.e., deadline == period, the Original
1059  * CBS is applied. The runtime is replenished !! 1002  * CBS is applied. the runtime is replenished and a new absolute deadline is
1060  * set, as in the previous cases.                1003  * set, as in the previous cases.
1061  *                                               1004  *
1062  * However, the Original CBS does not work pr    1005  * However, the Original CBS does not work properly for tasks with
1063  * deadline < period, which are said to have     1006  * deadline < period, which are said to have a constrained deadline. By
1064  * applying the Original CBS, a constrained d    1007  * applying the Original CBS, a constrained deadline task would be able to run
1065  * runtime/deadline in a period. With deadlin    1008  * runtime/deadline in a period. With deadline < period, the task would
1066  * overrun the runtime/period allowed bandwid    1009  * overrun the runtime/period allowed bandwidth, breaking the admission test.
1067  *                                               1010  *
1068  * In order to prevent this misbehave, the Re    1011  * In order to prevent this misbehave, the Revisited CBS is used for
1069  * constrained deadline tasks when a runtime     1012  * constrained deadline tasks when a runtime overflow is detected. In the
1070  * Revisited CBS, rather than replenishing &     1013  * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1071  * the remaining runtime of the task is reduc    1014  * the remaining runtime of the task is reduced to avoid runtime overflow.
1072  * Please refer to the comments update_dl_rev    1015  * Please refer to the comments update_dl_revised_wakeup() function to find
1073  * more about the Revised CBS rule.              1016  * more about the Revised CBS rule.
1074  */                                              1017  */
1075 static void update_dl_entity(struct sched_dl_    1018 static void update_dl_entity(struct sched_dl_entity *dl_se)
1076 {                                                1019 {
1077         struct rq *rq = rq_of_dl_se(dl_se);   !! 1020         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
                                                   >> 1021         struct rq *rq = rq_of_dl_rq(dl_rq);
1078                                                  1022 
1079         if (dl_time_before(dl_se->deadline, r    1023         if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1080             dl_entity_overflow(dl_se, rq_cloc    1024             dl_entity_overflow(dl_se, rq_clock(rq))) {
1081                                                  1025 
1082                 if (unlikely(!dl_is_implicit(    1026                 if (unlikely(!dl_is_implicit(dl_se) &&
1083                              !dl_time_before(    1027                              !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1084                              !is_dl_boosted(d    1028                              !is_dl_boosted(dl_se))) {
1085                         update_dl_revised_wak    1029                         update_dl_revised_wakeup(dl_se, rq);
1086                         return;                  1030                         return;
1087                 }                                1031                 }
1088                                                  1032 
1089                 replenish_dl_new_period(dl_se    1033                 replenish_dl_new_period(dl_se, rq);
1090         } else if (dl_server(dl_se) && dl_se- << 
1091                 /*                            << 
1092                  * The server can still use i << 
1093                  * it left the dl_defer_runni << 
1094                  */                           << 
1095                 if (!dl_se->dl_defer_running) << 
1096                         dl_se->dl_defer_armed << 
1097                         dl_se->dl_throttled = << 
1098                 }                             << 
1099         }                                        1034         }
1100 }                                                1035 }
1101                                                  1036 
1102 static inline u64 dl_next_period(struct sched    1037 static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1103 {                                                1038 {
1104         return dl_se->deadline - dl_se->dl_de    1039         return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1105 }                                                1040 }
1106                                                  1041 
1107 /*                                               1042 /*
1108  * If the entity depleted all its runtime, an    1043  * If the entity depleted all its runtime, and if we want it to sleep
1109  * while waiting for some new execution time     1044  * while waiting for some new execution time to become available, we
1110  * set the bandwidth replenishment timer to t    1045  * set the bandwidth replenishment timer to the replenishment instant
1111  * and try to activate it.                       1046  * and try to activate it.
1112  *                                               1047  *
1113  * Notice that it is important for the caller    1048  * Notice that it is important for the caller to know if the timer
1114  * actually started or not (i.e., the repleni    1049  * actually started or not (i.e., the replenishment instant is in
1115  * the future or in the past).                   1050  * the future or in the past).
1116  */                                              1051  */
1117 static int start_dl_timer(struct sched_dl_ent !! 1052 static int start_dl_timer(struct task_struct *p)
1118 {                                                1053 {
                                                   >> 1054         struct sched_dl_entity *dl_se = &p->dl;
1119         struct hrtimer *timer = &dl_se->dl_ti    1055         struct hrtimer *timer = &dl_se->dl_timer;
1120         struct dl_rq *dl_rq = dl_rq_of_se(dl_ !! 1056         struct rq *rq = task_rq(p);
1121         struct rq *rq = rq_of_dl_rq(dl_rq);   << 
1122         ktime_t now, act;                        1057         ktime_t now, act;
1123         s64 delta;                               1058         s64 delta;
1124                                                  1059 
1125         lockdep_assert_rq_held(rq);              1060         lockdep_assert_rq_held(rq);
1126                                                  1061 
1127         /*                                       1062         /*
1128          * We want the timer to fire at the d    1063          * We want the timer to fire at the deadline, but considering
1129          * that it is actually coming from rq    1064          * that it is actually coming from rq->clock and not from
1130          * hrtimer's time base reading.          1065          * hrtimer's time base reading.
1131          *                                    !! 1066          */
1132          * The deferred reservation will have !! 1067         act = ns_to_ktime(dl_next_period(dl_se));
1133          * (deadline - runtime). At that poin << 
1134          * if the current deadline can be use << 
1135          * required to avoid add too much pre << 
1136          * (current u > U).                   << 
1137          */                                   << 
1138         if (dl_se->dl_defer_armed) {          << 
1139                 WARN_ON_ONCE(!dl_se->dl_throt << 
1140                 act = ns_to_ktime(dl_se->dead << 
1141         } else {                              << 
1142                 /* act = deadline - rel-deadl << 
1143                 act = ns_to_ktime(dl_next_per << 
1144         }                                     << 
1145                                               << 
1146         now = hrtimer_cb_get_time(timer);        1068         now = hrtimer_cb_get_time(timer);
1147         delta = ktime_to_ns(now) - rq_clock(r    1069         delta = ktime_to_ns(now) - rq_clock(rq);
1148         act = ktime_add_ns(act, delta);          1070         act = ktime_add_ns(act, delta);
1149                                                  1071 
1150         /*                                       1072         /*
1151          * If the expiry time already passed,    1073          * If the expiry time already passed, e.g., because the value
1152          * chosen as the deadline is too smal    1074          * chosen as the deadline is too small, don't even try to
1153          * start the timer in the past!          1075          * start the timer in the past!
1154          */                                      1076          */
1155         if (ktime_us_delta(act, now) < 0)        1077         if (ktime_us_delta(act, now) < 0)
1156                 return 0;                        1078                 return 0;
1157                                                  1079 
1158         /*                                       1080         /*
1159          * !enqueued will guarantee another c    1081          * !enqueued will guarantee another callback; even if one is already in
1160          * progress. This ensures a balanced     1082          * progress. This ensures a balanced {get,put}_task_struct().
1161          *                                       1083          *
1162          * The race against __run_timer() cle    1084          * The race against __run_timer() clearing the enqueued state is
1163          * harmless because we're holding tas    1085          * harmless because we're holding task_rq()->lock, therefore the timer
1164          * expiring after we've done the chec    1086          * expiring after we've done the check will wait on its task_rq_lock()
1165          * and observe our state.                1087          * and observe our state.
1166          */                                      1088          */
1167         if (!hrtimer_is_queued(timer)) {         1089         if (!hrtimer_is_queued(timer)) {
1168                 if (!dl_server(dl_se))        !! 1090                 get_task_struct(p);
1169                         get_task_struct(dl_ta << 
1170                 hrtimer_start(timer, act, HRT    1091                 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
1171         }                                        1092         }
1172                                                  1093 
1173         return 1;                                1094         return 1;
1174 }                                                1095 }
1175                                                  1096 
1176 static void __push_dl_task(struct rq *rq, str << 
1177 {                                             << 
1178 #ifdef CONFIG_SMP                             << 
1179         /*                                    << 
1180          * Queueing this task back might have << 
1181          * to kick someone away.              << 
1182          */                                   << 
1183         if (has_pushable_dl_tasks(rq)) {      << 
1184                 /*                            << 
1185                  * Nothing relies on rq->lock << 
1186                  * rq->lock.                  << 
1187                  */                           << 
1188                 rq_unpin_lock(rq, rf);        << 
1189                 push_dl_task(rq);             << 
1190                 rq_repin_lock(rq, rf);        << 
1191         }                                     << 
1192 #endif                                        << 
1193 }                                             << 
1194                                               << 
1195 /* a defer timer will not be reset if the run << 
1196 static const u64 dl_server_min_res = 1 * NSEC << 
1197                                               << 
1198 static enum hrtimer_restart dl_server_timer(s << 
1199 {                                             << 
1200         struct rq *rq = rq_of_dl_se(dl_se);   << 
1201         u64 fw;                               << 
1202                                               << 
1203         scoped_guard (rq_lock, rq) {          << 
1204                 struct rq_flags *rf = &scope. << 
1205                                               << 
1206                 if (!dl_se->dl_throttled || ! << 
1207                         return HRTIMER_NOREST << 
1208                                               << 
1209                 sched_clock_tick();           << 
1210                 update_rq_clock(rq);          << 
1211                                               << 
1212                 if (!dl_se->dl_runtime)       << 
1213                         return HRTIMER_NOREST << 
1214                                               << 
1215                 if (!dl_se->server_has_tasks( << 
1216                         replenish_dl_entity(d << 
1217                         return HRTIMER_NOREST << 
1218                 }                             << 
1219                                               << 
1220                 if (dl_se->dl_defer_armed) {  << 
1221                         /*                    << 
1222                          * First check if the << 
1223                          * If so, it is possi << 
1224                          * of time. The dl_se << 
1225                          * forwarding the tim << 
1226                          */                   << 
1227                         if (dl_time_before(rq << 
1228                                            (d << 
1229                                               << 
1230                                 /* reset the  << 
1231                                 fw = dl_se->d << 
1232                                               << 
1233                                 hrtimer_forwa << 
1234                                 return HRTIME << 
1235                         }                     << 
1236                                               << 
1237                         dl_se->dl_defer_runni << 
1238                 }                             << 
1239                                               << 
1240                 enqueue_dl_entity(dl_se, ENQU << 
1241                                               << 
1242                 if (!dl_task(dl_se->rq->curr) << 
1243                         resched_curr(rq);     << 
1244                                               << 
1245                 __push_dl_task(rq, rf);       << 
1246         }                                     << 
1247                                               << 
1248         return HRTIMER_NORESTART;             << 
1249 }                                             << 
1250                                               << 
1251 /*                                               1097 /*
1252  * This is the bandwidth enforcement timer ca    1098  * This is the bandwidth enforcement timer callback. If here, we know
1253  * a task is not on its dl_rq, since the fact    1099  * a task is not on its dl_rq, since the fact that the timer was running
1254  * means the task is throttled and needs a ru    1100  * means the task is throttled and needs a runtime replenishment.
1255  *                                               1101  *
1256  * However, what we actually do depends on th    1102  * However, what we actually do depends on the fact the task is active,
1257  * (it is on its rq) or has been removed from    1103  * (it is on its rq) or has been removed from there by a call to
1258  * dequeue_task_dl(). In the former case we m    1104  * dequeue_task_dl(). In the former case we must issue the runtime
1259  * replenishment and add the task back to the    1105  * replenishment and add the task back to the dl_rq; in the latter, we just
1260  * do nothing but clearing dl_throttled, so t    1106  * do nothing but clearing dl_throttled, so that runtime and deadline
1261  * updating (and the queueing back to dl_rq)     1107  * updating (and the queueing back to dl_rq) will be done by the
1262  * next call to enqueue_task_dl().               1108  * next call to enqueue_task_dl().
1263  */                                              1109  */
1264 static enum hrtimer_restart dl_task_timer(str    1110 static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1265 {                                                1111 {
1266         struct sched_dl_entity *dl_se = conta    1112         struct sched_dl_entity *dl_se = container_of(timer,
1267                                                  1113                                                      struct sched_dl_entity,
1268                                                  1114                                                      dl_timer);
1269         struct task_struct *p;                !! 1115         struct task_struct *p = dl_task_of(dl_se);
1270         struct rq_flags rf;                      1116         struct rq_flags rf;
1271         struct rq *rq;                           1117         struct rq *rq;
1272                                                  1118 
1273         if (dl_server(dl_se))                 << 
1274                 return dl_server_timer(timer, << 
1275                                               << 
1276         p = dl_task_of(dl_se);                << 
1277         rq = task_rq_lock(p, &rf);               1119         rq = task_rq_lock(p, &rf);
1278                                                  1120 
1279         /*                                       1121         /*
1280          * The task might have changed its sc    1122          * The task might have changed its scheduling policy to something
1281          * different than SCHED_DEADLINE (thr    1123          * different than SCHED_DEADLINE (through switched_from_dl()).
1282          */                                      1124          */
1283         if (!dl_task(p))                         1125         if (!dl_task(p))
1284                 goto unlock;                     1126                 goto unlock;
1285                                                  1127 
1286         /*                                       1128         /*
1287          * The task might have been boosted b    1129          * The task might have been boosted by someone else and might be in the
1288          * boosting/deboosting path, its not     1130          * boosting/deboosting path, its not throttled.
1289          */                                      1131          */
1290         if (is_dl_boosted(dl_se))                1132         if (is_dl_boosted(dl_se))
1291                 goto unlock;                     1133                 goto unlock;
1292                                                  1134 
1293         /*                                       1135         /*
1294          * Spurious timer due to start_dl_tim    1136          * Spurious timer due to start_dl_timer() race; or we already received
1295          * a replenishment from rt_mutex_setp    1137          * a replenishment from rt_mutex_setprio().
1296          */                                      1138          */
1297         if (!dl_se->dl_throttled)                1139         if (!dl_se->dl_throttled)
1298                 goto unlock;                     1140                 goto unlock;
1299                                                  1141 
1300         sched_clock_tick();                      1142         sched_clock_tick();
1301         update_rq_clock(rq);                     1143         update_rq_clock(rq);
1302                                                  1144 
1303         /*                                       1145         /*
1304          * If the throttle happened during sc    1146          * If the throttle happened during sched-out; like:
1305          *                                       1147          *
1306          *   schedule()                          1148          *   schedule()
1307          *     deactivate_task()                 1149          *     deactivate_task()
1308          *       dequeue_task_dl()               1150          *       dequeue_task_dl()
1309          *         update_curr_dl()              1151          *         update_curr_dl()
1310          *           start_dl_timer()            1152          *           start_dl_timer()
1311          *         __dequeue_task_dl()           1153          *         __dequeue_task_dl()
1312          *     prev->on_rq = 0;                  1154          *     prev->on_rq = 0;
1313          *                                       1155          *
1314          * We can be both throttled and !queu    1156          * We can be both throttled and !queued. Replenish the counter
1315          * but do not enqueue -- wait for our    1157          * but do not enqueue -- wait for our wakeup to do that.
1316          */                                      1158          */
1317         if (!task_on_rq_queued(p)) {             1159         if (!task_on_rq_queued(p)) {
1318                 replenish_dl_entity(dl_se);      1160                 replenish_dl_entity(dl_se);
1319                 goto unlock;                     1161                 goto unlock;
1320         }                                        1162         }
1321                                                  1163 
1322 #ifdef CONFIG_SMP                                1164 #ifdef CONFIG_SMP
1323         if (unlikely(!rq->online)) {             1165         if (unlikely(!rq->online)) {
1324                 /*                               1166                 /*
1325                  * If the runqueue is no long    1167                  * If the runqueue is no longer available, migrate the
1326                  * task elsewhere. This neces    1168                  * task elsewhere. This necessarily changes rq.
1327                  */                              1169                  */
1328                 lockdep_unpin_lock(__rq_lockp    1170                 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
1329                 rq = dl_task_offline_migratio    1171                 rq = dl_task_offline_migration(rq, p);
1330                 rf.cookie = lockdep_pin_lock(    1172                 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
1331                 update_rq_clock(rq);             1173                 update_rq_clock(rq);
1332                                                  1174 
1333                 /*                               1175                 /*
1334                  * Now that the task has been    1176                  * Now that the task has been migrated to the new RQ and we
1335                  * have that locked, proceed     1177                  * have that locked, proceed as normal and enqueue the task
1336                  * there.                        1178                  * there.
1337                  */                              1179                  */
1338         }                                        1180         }
1339 #endif                                           1181 #endif
1340                                                  1182 
1341         enqueue_task_dl(rq, p, ENQUEUE_REPLEN    1183         enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1342         if (dl_task(rq->curr))                   1184         if (dl_task(rq->curr))
1343                 wakeup_preempt_dl(rq, p, 0);  !! 1185                 check_preempt_curr_dl(rq, p, 0);
1344         else                                     1186         else
1345                 resched_curr(rq);                1187                 resched_curr(rq);
1346                                                  1188 
1347         __push_dl_task(rq, &rf);              !! 1189 #ifdef CONFIG_SMP
                                                   >> 1190         /*
                                                   >> 1191          * Queueing this task back might have overloaded rq, check if we need
                                                   >> 1192          * to kick someone away.
                                                   >> 1193          */
                                                   >> 1194         if (has_pushable_dl_tasks(rq)) {
                                                   >> 1195                 /*
                                                   >> 1196                  * Nothing relies on rq->lock after this, so its safe to drop
                                                   >> 1197                  * rq->lock.
                                                   >> 1198                  */
                                                   >> 1199                 rq_unpin_lock(rq, &rf);
                                                   >> 1200                 push_dl_task(rq);
                                                   >> 1201                 rq_repin_lock(rq, &rf);
                                                   >> 1202         }
                                                   >> 1203 #endif
1348                                                  1204 
1349 unlock:                                          1205 unlock:
1350         task_rq_unlock(rq, p, &rf);              1206         task_rq_unlock(rq, p, &rf);
1351                                                  1207 
1352         /*                                       1208         /*
1353          * This can free the task_struct, inc    1209          * This can free the task_struct, including this hrtimer, do not touch
1354          * anything related to that after thi    1210          * anything related to that after this.
1355          */                                      1211          */
1356         put_task_struct(p);                      1212         put_task_struct(p);
1357                                                  1213 
1358         return HRTIMER_NORESTART;                1214         return HRTIMER_NORESTART;
1359 }                                                1215 }
1360                                                  1216 
1361 static void init_dl_task_timer(struct sched_d !! 1217 void init_dl_task_timer(struct sched_dl_entity *dl_se)
1362 {                                                1218 {
1363         struct hrtimer *timer = &dl_se->dl_ti    1219         struct hrtimer *timer = &dl_se->dl_timer;
1364                                                  1220 
1365         hrtimer_init(timer, CLOCK_MONOTONIC,     1221         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1366         timer->function = dl_task_timer;         1222         timer->function = dl_task_timer;
1367 }                                                1223 }
1368                                                  1224 
1369 /*                                               1225 /*
1370  * During the activation, CBS checks if it ca    1226  * During the activation, CBS checks if it can reuse the current task's
1371  * runtime and period. If the deadline of the    1227  * runtime and period. If the deadline of the task is in the past, CBS
1372  * cannot use the runtime, and so it replenis    1228  * cannot use the runtime, and so it replenishes the task. This rule
1373  * works fine for implicit deadline tasks (de    1229  * works fine for implicit deadline tasks (deadline == period), and the
1374  * CBS was designed for implicit deadline tas    1230  * CBS was designed for implicit deadline tasks. However, a task with
1375  * constrained deadline (deadline < period) m    1231  * constrained deadline (deadline < period) might be awakened after the
1376  * deadline, but before the next period. In t    1232  * deadline, but before the next period. In this case, replenishing the
1377  * task would allow it to run for runtime / d    1233  * task would allow it to run for runtime / deadline. As in this case
1378  * deadline < period, CBS enables a task to r    1234  * deadline < period, CBS enables a task to run for more than the
1379  * runtime / period. In a very loaded system,    1235  * runtime / period. In a very loaded system, this can cause a domino
1380  * effect, making other tasks miss their dead    1236  * effect, making other tasks miss their deadlines.
1381  *                                               1237  *
1382  * To avoid this problem, in the activation o    1238  * To avoid this problem, in the activation of a constrained deadline
1383  * task after the deadline but before the nex    1239  * task after the deadline but before the next period, throttle the
1384  * task and set the replenishing timer to the    1240  * task and set the replenishing timer to the begin of the next period,
1385  * unless it is boosted.                         1241  * unless it is boosted.
1386  */                                              1242  */
1387 static inline void dl_check_constrained_dl(st    1243 static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1388 {                                                1244 {
1389         struct rq *rq = rq_of_dl_se(dl_se);   !! 1245         struct task_struct *p = dl_task_of(dl_se);
                                                   >> 1246         struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1390                                                  1247 
1391         if (dl_time_before(dl_se->deadline, r    1248         if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1392             dl_time_before(rq_clock(rq), dl_n    1249             dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1393                 if (unlikely(is_dl_boosted(dl !! 1250                 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1394                         return;                  1251                         return;
1395                 dl_se->dl_throttled = 1;         1252                 dl_se->dl_throttled = 1;
1396                 if (dl_se->runtime > 0)          1253                 if (dl_se->runtime > 0)
1397                         dl_se->runtime = 0;      1254                         dl_se->runtime = 0;
1398         }                                        1255         }
1399 }                                                1256 }
1400                                                  1257 
1401 static                                           1258 static
1402 int dl_runtime_exceeded(struct sched_dl_entit    1259 int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
1403 {                                                1260 {
1404         return (dl_se->runtime <= 0);            1261         return (dl_se->runtime <= 0);
1405 }                                                1262 }
1406                                                  1263 
1407 /*                                               1264 /*
1408  * This function implements the GRUB accounti !! 1265  * This function implements the GRUB accounting rule:
1409  * GRUB reclaiming algorithm, the runtime is  !! 1266  * according to the GRUB reclaiming algorithm, the runtime is
1410  * but as "dq = -(max{u, (Umax - Uinact - Uex !! 1267  * not decreased as "dq = -dt", but as
                                                   >> 1268  * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1411  * where u is the utilization of the task, Um    1269  * where u is the utilization of the task, Umax is the maximum reclaimable
1412  * utilization, Uinact is the (per-runqueue)     1270  * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1413  * as the difference between the "total runqu    1271  * as the difference between the "total runqueue utilization" and the
1414  * "runqueue active utilization", and Uextra  !! 1272  * runqueue active utilization, and Uextra is the (per runqueue) extra
1415  * reclaimable utilization.                      1273  * reclaimable utilization.
1416  * Since rq->dl.running_bw and rq->dl.this_bw !! 1274  * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
1417  * by 2^BW_SHIFT, the result has to be shifte !! 1275  * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1418  * Since rq->dl.bw_ratio contains 1 / Umax mu !! 1276  * BW_SHIFT.
1419  * is multiplied by rq->dl.bw_ratio and shift !! 1277  * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT,
1420  * Since delta is a 64 bit variable, to have  !! 1278  * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1421  * larger than 2^(64 - 20 - 8), which is more !! 1279  * Since delta is a 64 bit variable, to have an overflow its value
1422  * not an issue here.                         !! 1280  * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
                                                   >> 1281  * So, overflow is not an issue here.
1423  */                                              1282  */
1424 static u64 grub_reclaim(u64 delta, struct rq     1283 static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
1425 {                                                1284 {
1426         u64 u_act;                            << 
1427         u64 u_inact = rq->dl.this_bw - rq->dl    1285         u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
                                                   >> 1286         u64 u_act;
                                                   >> 1287         u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
1428                                                  1288 
1429         /*                                       1289         /*
1430          * Instead of computing max{u, (u_max !! 1290          * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1431          * compare u_inact + u_extra with u_m !! 1291          * we compare u_inact + rq->dl.extra_bw with
1432          * can be larger than u_max. So, u_ma !! 1292          * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1433          * negative leading to wrong results. !! 1293          * u_inact + rq->dl.extra_bw can be larger than
                                                   >> 1294          * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
                                                   >> 1295          * leading to wrong results)
1434          */                                      1296          */
1435         if (u_inact + rq->dl.extra_bw > rq->d !! 1297         if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1436                 u_act = dl_se->dl_bw;         !! 1298                 u_act = u_act_min;
1437         else                                     1299         else
1438                 u_act = rq->dl.max_bw - u_ina !! 1300                 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
1439                                                  1301 
1440         u_act = (u_act * rq->dl.bw_ratio) >>  << 
1441         return (delta * u_act) >> BW_SHIFT;      1302         return (delta * u_act) >> BW_SHIFT;
1442 }                                                1303 }
1443                                                  1304 
1444 s64 dl_scaled_delta_exec(struct rq *rq, struc !! 1305 /*
                                                   >> 1306  * Update the current task's runtime statistics (provided it is still
                                                   >> 1307  * a -deadline task and has not been removed from the dl_rq).
                                                   >> 1308  */
                                                   >> 1309 static void update_curr_dl(struct rq *rq)
1445 {                                                1310 {
1446         s64 scaled_delta_exec;                !! 1311         struct task_struct *curr = rq->curr;
                                                   >> 1312         struct sched_dl_entity *dl_se = &curr->dl;
                                                   >> 1313         u64 delta_exec, scaled_delta_exec;
                                                   >> 1314         int cpu = cpu_of(rq);
                                                   >> 1315         u64 now;
                                                   >> 1316 
                                                   >> 1317         if (!dl_task(curr) || !on_dl_rq(dl_se))
                                                   >> 1318                 return;
                                                   >> 1319 
                                                   >> 1320         /*
                                                   >> 1321          * Consumed budget is computed considering the time as
                                                   >> 1322          * observed by schedulable tasks (excluding time spent
                                                   >> 1323          * in hardirq context, etc.). Deadlines are instead
                                                   >> 1324          * computed using hard walltime. This seems to be the more
                                                   >> 1325          * natural solution, but the full ramifications of this
                                                   >> 1326          * approach need further study.
                                                   >> 1327          */
                                                   >> 1328         now = rq_clock_task(rq);
                                                   >> 1329         delta_exec = now - curr->se.exec_start;
                                                   >> 1330         if (unlikely((s64)delta_exec <= 0)) {
                                                   >> 1331                 if (unlikely(dl_se->dl_yielded))
                                                   >> 1332                         goto throttle;
                                                   >> 1333                 return;
                                                   >> 1334         }
                                                   >> 1335 
                                                   >> 1336         schedstat_set(curr->stats.exec_max,
                                                   >> 1337                       max(curr->stats.exec_max, delta_exec));
                                                   >> 1338 
                                                   >> 1339         trace_sched_stat_runtime(curr, delta_exec, 0);
                                                   >> 1340 
                                                   >> 1341         update_current_exec_runtime(curr, now, delta_exec);
                                                   >> 1342 
                                                   >> 1343         if (dl_entity_is_special(dl_se))
                                                   >> 1344                 return;
1447                                                  1345 
1448         /*                                       1346         /*
1449          * For tasks that participate in GRUB    1347          * For tasks that participate in GRUB, we implement GRUB-PA: the
1450          * spare reclaimed bandwidth is used     1348          * spare reclaimed bandwidth is used to clock down frequency.
1451          *                                       1349          *
1452          * For the others, we still need to s    1350          * For the others, we still need to scale reservation parameters
1453          * according to current frequency and    1351          * according to current frequency and CPU maximum capacity.
1454          */                                      1352          */
1455         if (unlikely(dl_se->flags & SCHED_FLA    1353         if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1456                 scaled_delta_exec = grub_recl !! 1354                 scaled_delta_exec = grub_reclaim(delta_exec,
                                                   >> 1355                                                  rq,
                                                   >> 1356                                                  &curr->dl);
1457         } else {                                 1357         } else {
1458                 int cpu = cpu_of(rq);         << 
1459                 unsigned long scale_freq = ar    1358                 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
1460                 unsigned long scale_cpu = arc    1359                 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
1461                                                  1360 
1462                 scaled_delta_exec = cap_scale    1361                 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1463                 scaled_delta_exec = cap_scale    1362                 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1464         }                                        1363         }
1465                                                  1364 
1466         return scaled_delta_exec;             << 
1467 }                                             << 
1468                                               << 
1469 static inline void                            << 
1470 update_stats_dequeue_dl(struct dl_rq *dl_rq,  << 
1471                         int flags);           << 
1472 static void update_curr_dl_se(struct rq *rq,  << 
1473 {                                             << 
1474         s64 scaled_delta_exec;                << 
1475                                               << 
1476         if (unlikely(delta_exec <= 0)) {      << 
1477                 if (unlikely(dl_se->dl_yielde << 
1478                         goto throttle;        << 
1479                 return;                       << 
1480         }                                     << 
1481                                               << 
1482         if (dl_server(dl_se) && dl_se->dl_thr << 
1483                 return;                       << 
1484                                               << 
1485         if (dl_entity_is_special(dl_se))      << 
1486                 return;                       << 
1487                                               << 
1488         scaled_delta_exec = dl_scaled_delta_e << 
1489                                               << 
1490         dl_se->runtime -= scaled_delta_exec;     1365         dl_se->runtime -= scaled_delta_exec;
1491                                                  1366 
1492         /*                                    << 
1493          * The fair server can consume its ru << 
1494          * running as regular CFS).           << 
1495          *                                    << 
1496          * If the server consumes its entire  << 
1497          * is not required for the current pe << 
1498          * starting a new period, pushing the << 
1499          */                                   << 
1500         if (dl_se->dl_defer && dl_se->dl_thro << 
1501                 /*                            << 
1502                  * If the server was previous << 
1503                  * took place, it this point  << 
1504                  * was able to get runtime in << 
1505                  * state.                     << 
1506                  */                           << 
1507                 dl_se->dl_defer_running = 0;  << 
1508                                               << 
1509                 hrtimer_try_to_cancel(&dl_se- << 
1510                                               << 
1511                 replenish_dl_new_period(dl_se << 
1512                                               << 
1513                 /*                            << 
1514                  * Not being able to start th << 
1515                  * be started for whatever re << 
1516                  * and queue right away. Othe << 
1517                  * to what enqueue_dl_entity( << 
1518                  */                           << 
1519                 WARN_ON_ONCE(!start_dl_timer( << 
1520                                               << 
1521                 return;                       << 
1522         }                                     << 
1523                                               << 
1524 throttle:                                        1367 throttle:
1525         if (dl_runtime_exceeded(dl_se) || dl_    1368         if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1526                 dl_se->dl_throttled = 1;         1369                 dl_se->dl_throttled = 1;
1527                                                  1370 
1528                 /* If requested, inform the u    1371                 /* If requested, inform the user about runtime overruns. */
1529                 if (dl_runtime_exceeded(dl_se    1372                 if (dl_runtime_exceeded(dl_se) &&
1530                     (dl_se->flags & SCHED_FLA    1373                     (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1531                         dl_se->dl_overrun = 1    1374                         dl_se->dl_overrun = 1;
1532                                                  1375 
1533                 dequeue_dl_entity(dl_se, 0);  !! 1376                 __dequeue_task_dl(rq, curr, 0);
1534                 if (!dl_server(dl_se)) {      !! 1377                 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1535                         update_stats_dequeue_ !! 1378                         enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1536                         dequeue_pushable_dl_t << 
1537                 }                             << 
1538                                               << 
1539                 if (unlikely(is_dl_boosted(dl << 
1540                         if (dl_server(dl_se)) << 
1541                                 enqueue_dl_en << 
1542                         else                  << 
1543                                 enqueue_task_ << 
1544                 }                             << 
1545                                                  1379 
1546                 if (!is_leftmost(dl_se, &rq-> !! 1380                 if (!is_leftmost(curr, &rq->dl))
1547                         resched_curr(rq);        1381                         resched_curr(rq);
1548         }                                        1382         }
1549                                                  1383 
1550         /*                                       1384         /*
1551          * The fair server (sole dl_server) d << 
1552          * workload because it is running fai << 
1553          */                                   << 
1554         if (dl_se == &rq->fair_server)        << 
1555                 return;                       << 
1556                                               << 
1557 #ifdef CONFIG_RT_GROUP_SCHED                  << 
1558         /*                                    << 
1559          * Because -- for now -- we share the    1385          * Because -- for now -- we share the rt bandwidth, we need to
1560          * account our runtime there too, oth    1386          * account our runtime there too, otherwise actual rt tasks
1561          * would be able to exceed the shared    1387          * would be able to exceed the shared quota.
1562          *                                       1388          *
1563          * Account to the root rt group for n    1389          * Account to the root rt group for now.
1564          *                                       1390          *
1565          * The solution we're working towards    1391          * The solution we're working towards is having the RT groups scheduled
1566          * using deadline servers -- however     1392          * using deadline servers -- however there's a few nasties to figure
1567          * out before that can happen.           1393          * out before that can happen.
1568          */                                      1394          */
1569         if (rt_bandwidth_enabled()) {            1395         if (rt_bandwidth_enabled()) {
1570                 struct rt_rq *rt_rq = &rq->rt    1396                 struct rt_rq *rt_rq = &rq->rt;
1571                                                  1397 
1572                 raw_spin_lock(&rt_rq->rt_runt    1398                 raw_spin_lock(&rt_rq->rt_runtime_lock);
1573                 /*                               1399                 /*
1574                  * We'll let actual RT tasks     1400                  * We'll let actual RT tasks worry about the overflow here, we
1575                  * have our own CBS to keep u    1401                  * have our own CBS to keep us inline; only account when RT
1576                  * bandwidth is relevant.        1402                  * bandwidth is relevant.
1577                  */                              1403                  */
1578                 if (sched_rt_bandwidth_accoun    1404                 if (sched_rt_bandwidth_account(rt_rq))
1579                         rt_rq->rt_time += del    1405                         rt_rq->rt_time += delta_exec;
1580                 raw_spin_unlock(&rt_rq->rt_ru    1406                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1581         }                                        1407         }
1582 #endif                                        << 
1583 }                                             << 
1584                                               << 
1585 /*                                            << 
1586  * In the non-defer mode, the idle time is no << 
1587  * server provides a guarantee.               << 
1588  *                                            << 
1589  * If the dl_server is in defer mode, the idl << 
1590  * as time available for the fair server, avo << 
1591  * rt scheduler that did not consumed that ti << 
1592  */                                           << 
1593 void dl_server_update_idle_time(struct rq *rq << 
1594 {                                             << 
1595         s64 delta_exec, scaled_delta_exec;    << 
1596                                               << 
1597         if (!rq->fair_server.dl_defer)        << 
1598                 return;                       << 
1599                                               << 
1600         /* no need to discount more */        << 
1601         if (rq->fair_server.runtime < 0)      << 
1602                 return;                       << 
1603                                               << 
1604         delta_exec = rq_clock_task(rq) - p->s << 
1605         if (delta_exec < 0)                   << 
1606                 return;                       << 
1607                                               << 
1608         scaled_delta_exec = dl_scaled_delta_e << 
1609                                               << 
1610         rq->fair_server.runtime -= scaled_del << 
1611                                               << 
1612         if (rq->fair_server.runtime < 0) {    << 
1613                 rq->fair_server.dl_defer_runn << 
1614                 rq->fair_server.runtime = 0;  << 
1615         }                                     << 
1616                                               << 
1617         p->se.exec_start = rq_clock_task(rq); << 
1618 }                                             << 
1619                                               << 
1620 void dl_server_update(struct sched_dl_entity  << 
1621 {                                             << 
1622         /* 0 runtime = fair server disabled * << 
1623         if (dl_se->dl_runtime)                << 
1624                 update_curr_dl_se(dl_se->rq,  << 
1625 }                                             << 
1626                                               << 
1627 void dl_server_start(struct sched_dl_entity * << 
1628 {                                             << 
1629         struct rq *rq = dl_se->rq;            << 
1630                                               << 
1631         /*                                    << 
1632          * XXX: the apply do not work fine at << 
1633          * fair server because things are not << 
1634          * this before getting generic.       << 
1635          */                                   << 
1636         if (!dl_server(dl_se)) {              << 
1637                 u64 runtime =  50 * NSEC_PER_ << 
1638                 u64 period = 1000 * NSEC_PER_ << 
1639                                               << 
1640                 dl_server_apply_params(dl_se, << 
1641                                               << 
1642                 dl_se->dl_server = 1;         << 
1643                 dl_se->dl_defer = 1;          << 
1644                 setup_new_dl_entity(dl_se);   << 
1645         }                                     << 
1646                                               << 
1647         if (!dl_se->dl_runtime)               << 
1648                 return;                       << 
1649                                               << 
1650         enqueue_dl_entity(dl_se, ENQUEUE_WAKE << 
1651         if (!dl_task(dl_se->rq->curr) || dl_e << 
1652                 resched_curr(dl_se->rq);      << 
1653 }                                             << 
1654                                               << 
1655 void dl_server_stop(struct sched_dl_entity *d << 
1656 {                                             << 
1657         if (!dl_se->dl_runtime)               << 
1658                 return;                       << 
1659                                               << 
1660         dequeue_dl_entity(dl_se, DEQUEUE_SLEE << 
1661         hrtimer_try_to_cancel(&dl_se->dl_time << 
1662         dl_se->dl_defer_armed = 0;            << 
1663         dl_se->dl_throttled = 0;              << 
1664 }                                             << 
1665                                               << 
1666 void dl_server_init(struct sched_dl_entity *d << 
1667                     dl_server_has_tasks_f has << 
1668                     dl_server_pick_f pick_tas << 
1669 {                                             << 
1670         dl_se->rq = rq;                       << 
1671         dl_se->server_has_tasks = has_tasks;  << 
1672         dl_se->server_pick_task = pick_task;  << 
1673 }                                             << 
1674                                               << 
1675 void __dl_server_attach_root(struct sched_dl_ << 
1676 {                                             << 
1677         u64 new_bw = dl_se->dl_bw;            << 
1678         int cpu = cpu_of(rq);                 << 
1679         struct dl_bw *dl_b;                   << 
1680                                               << 
1681         dl_b = dl_bw_of(cpu_of(rq));          << 
1682         guard(raw_spinlock)(&dl_b->lock);     << 
1683                                               << 
1684         if (!dl_bw_cpus(cpu))                 << 
1685                 return;                       << 
1686                                               << 
1687         __dl_add(dl_b, new_bw, dl_bw_cpus(cpu << 
1688 }                                             << 
1689                                               << 
1690 int dl_server_apply_params(struct sched_dl_en << 
1691 {                                             << 
1692         u64 old_bw = init ? 0 : to_ratio(dl_s << 
1693         u64 new_bw = to_ratio(period, runtime << 
1694         struct rq *rq = dl_se->rq;            << 
1695         int cpu = cpu_of(rq);                 << 
1696         struct dl_bw *dl_b;                   << 
1697         unsigned long cap;                    << 
1698         int retval = 0;                       << 
1699         int cpus;                             << 
1700                                               << 
1701         dl_b = dl_bw_of(cpu);                 << 
1702         guard(raw_spinlock)(&dl_b->lock);     << 
1703                                               << 
1704         cpus = dl_bw_cpus(cpu);               << 
1705         cap = dl_bw_capacity(cpu);            << 
1706                                               << 
1707         if (__dl_overflow(dl_b, cap, old_bw,  << 
1708                 return -EBUSY;                << 
1709                                               << 
1710         if (init) {                           << 
1711                 __add_rq_bw(new_bw, &rq->dl); << 
1712                 __dl_add(dl_b, new_bw, cpus); << 
1713         } else {                              << 
1714                 __dl_sub(dl_b, dl_se->dl_bw,  << 
1715                 __dl_add(dl_b, new_bw, cpus); << 
1716                                               << 
1717                 dl_rq_change_utilization(rq,  << 
1718         }                                     << 
1719                                               << 
1720         dl_se->dl_runtime = runtime;          << 
1721         dl_se->dl_deadline = period;          << 
1722         dl_se->dl_period = period;            << 
1723                                               << 
1724         dl_se->runtime = 0;                   << 
1725         dl_se->deadline = 0;                  << 
1726                                               << 
1727         dl_se->dl_bw = to_ratio(dl_se->dl_per << 
1728         dl_se->dl_density = to_ratio(dl_se->d << 
1729                                               << 
1730         return retval;                        << 
1731 }                                             << 
1732                                               << 
1733 /*                                            << 
1734  * Update the current task's runtime statisti << 
1735  * a -deadline task and has not been removed  << 
1736  */                                           << 
1737 static void update_curr_dl(struct rq *rq)     << 
1738 {                                             << 
1739         struct task_struct *curr = rq->curr;  << 
1740         struct sched_dl_entity *dl_se = &curr << 
1741         s64 delta_exec;                       << 
1742                                               << 
1743         if (!dl_task(curr) || !on_dl_rq(dl_se << 
1744                 return;                       << 
1745                                               << 
1746         /*                                    << 
1747          * Consumed budget is computed consid << 
1748          * observed by schedulable tasks (exc << 
1749          * in hardirq context, etc.). Deadlin << 
1750          * computed using hard walltime. This << 
1751          * natural solution, but the full ram << 
1752          * approach need further study.       << 
1753          */                                   << 
1754         delta_exec = update_curr_common(rq);  << 
1755         update_curr_dl_se(rq, dl_se, delta_ex << 
1756 }                                                1408 }
1757                                                  1409 
1758 static enum hrtimer_restart inactive_task_tim    1410 static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1759 {                                                1411 {
1760         struct sched_dl_entity *dl_se = conta    1412         struct sched_dl_entity *dl_se = container_of(timer,
1761                                                  1413                                                      struct sched_dl_entity,
1762                                                  1414                                                      inactive_timer);
1763         struct task_struct *p = NULL;         !! 1415         struct task_struct *p = dl_task_of(dl_se);
1764         struct rq_flags rf;                      1416         struct rq_flags rf;
1765         struct rq *rq;                           1417         struct rq *rq;
1766                                                  1418 
1767         if (!dl_server(dl_se)) {              !! 1419         rq = task_rq_lock(p, &rf);
1768                 p = dl_task_of(dl_se);        << 
1769                 rq = task_rq_lock(p, &rf);    << 
1770         } else {                              << 
1771                 rq = dl_se->rq;               << 
1772                 rq_lock(rq, &rf);             << 
1773         }                                     << 
1774                                                  1420 
1775         sched_clock_tick();                      1421         sched_clock_tick();
1776         update_rq_clock(rq);                     1422         update_rq_clock(rq);
1777                                                  1423 
1778         if (dl_server(dl_se))                 << 
1779                 goto no_task;                 << 
1780                                               << 
1781         if (!dl_task(p) || READ_ONCE(p->__sta    1424         if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
1782                 struct dl_bw *dl_b = dl_bw_of    1425                 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1783                                                  1426 
1784                 if (READ_ONCE(p->__state) ==     1427                 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
1785                         sub_running_bw(&p->dl    1428                         sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1786                         sub_rq_bw(&p->dl, dl_    1429                         sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
1787                         dl_se->dl_non_contend    1430                         dl_se->dl_non_contending = 0;
1788                 }                                1431                 }
1789                                                  1432 
1790                 raw_spin_lock(&dl_b->lock);      1433                 raw_spin_lock(&dl_b->lock);
1791                 __dl_sub(dl_b, p->dl.dl_bw, d    1434                 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
1792                 raw_spin_unlock(&dl_b->lock);    1435                 raw_spin_unlock(&dl_b->lock);
1793                 __dl_clear_params(dl_se);     !! 1436                 __dl_clear_params(p);
1794                                                  1437 
1795                 goto unlock;                     1438                 goto unlock;
1796         }                                        1439         }
1797                                               << 
1798 no_task:                                      << 
1799         if (dl_se->dl_non_contending == 0)       1440         if (dl_se->dl_non_contending == 0)
1800                 goto unlock;                     1441                 goto unlock;
1801                                                  1442 
1802         sub_running_bw(dl_se, &rq->dl);          1443         sub_running_bw(dl_se, &rq->dl);
1803         dl_se->dl_non_contending = 0;            1444         dl_se->dl_non_contending = 0;
1804 unlock:                                          1445 unlock:
1805                                               !! 1446         task_rq_unlock(rq, p, &rf);
1806         if (!dl_server(dl_se)) {              !! 1447         put_task_struct(p);
1807                 task_rq_unlock(rq, p, &rf);   << 
1808                 put_task_struct(p);           << 
1809         } else {                              << 
1810                 rq_unlock(rq, &rf);           << 
1811         }                                     << 
1812                                                  1448 
1813         return HRTIMER_NORESTART;                1449         return HRTIMER_NORESTART;
1814 }                                                1450 }
1815                                                  1451 
1816 static void init_dl_inactive_task_timer(struc !! 1452 void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1817 {                                                1453 {
1818         struct hrtimer *timer = &dl_se->inact    1454         struct hrtimer *timer = &dl_se->inactive_timer;
1819                                                  1455 
1820         hrtimer_init(timer, CLOCK_MONOTONIC,     1456         hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
1821         timer->function = inactive_task_timer    1457         timer->function = inactive_task_timer;
1822 }                                                1458 }
1823                                                  1459 
1824 #define __node_2_dle(node) \                     1460 #define __node_2_dle(node) \
1825         rb_entry((node), struct sched_dl_enti    1461         rb_entry((node), struct sched_dl_entity, rb_node)
1826                                                  1462 
1827 #ifdef CONFIG_SMP                                1463 #ifdef CONFIG_SMP
1828                                                  1464 
1829 static void inc_dl_deadline(struct dl_rq *dl_    1465 static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1830 {                                                1466 {
1831         struct rq *rq = rq_of_dl_rq(dl_rq);      1467         struct rq *rq = rq_of_dl_rq(dl_rq);
1832                                                  1468 
1833         if (dl_rq->earliest_dl.curr == 0 ||      1469         if (dl_rq->earliest_dl.curr == 0 ||
1834             dl_time_before(deadline, dl_rq->e    1470             dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1835                 if (dl_rq->earliest_dl.curr =    1471                 if (dl_rq->earliest_dl.curr == 0)
1836                         cpupri_set(&rq->rd->c    1472                         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1837                 dl_rq->earliest_dl.curr = dea    1473                 dl_rq->earliest_dl.curr = deadline;
1838                 cpudl_set(&rq->rd->cpudl, rq-    1474                 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1839         }                                        1475         }
1840 }                                                1476 }
1841                                                  1477 
1842 static void dec_dl_deadline(struct dl_rq *dl_    1478 static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1843 {                                                1479 {
1844         struct rq *rq = rq_of_dl_rq(dl_rq);      1480         struct rq *rq = rq_of_dl_rq(dl_rq);
1845                                                  1481 
1846         /*                                       1482         /*
1847          * Since we may have removed our earl    1483          * Since we may have removed our earliest (and/or next earliest)
1848          * task we must recompute them.          1484          * task we must recompute them.
1849          */                                      1485          */
1850         if (!dl_rq->dl_nr_running) {             1486         if (!dl_rq->dl_nr_running) {
1851                 dl_rq->earliest_dl.curr = 0;     1487                 dl_rq->earliest_dl.curr = 0;
1852                 dl_rq->earliest_dl.next = 0;     1488                 dl_rq->earliest_dl.next = 0;
1853                 cpudl_clear(&rq->rd->cpudl, r    1489                 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1854                 cpupri_set(&rq->rd->cpupri, r    1490                 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1855         } else {                                 1491         } else {
1856                 struct rb_node *leftmost = rb    1492                 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1857                 struct sched_dl_entity *entry    1493                 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1858                                                  1494 
1859                 dl_rq->earliest_dl.curr = ent    1495                 dl_rq->earliest_dl.curr = entry->deadline;
1860                 cpudl_set(&rq->rd->cpudl, rq-    1496                 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1861         }                                        1497         }
1862 }                                                1498 }
1863                                                  1499 
1864 #else                                            1500 #else
1865                                                  1501 
1866 static inline void inc_dl_deadline(struct dl_    1502 static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1867 static inline void dec_dl_deadline(struct dl_    1503 static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1868                                                  1504 
1869 #endif /* CONFIG_SMP */                          1505 #endif /* CONFIG_SMP */
1870                                                  1506 
1871 static inline                                    1507 static inline
1872 void inc_dl_tasks(struct sched_dl_entity *dl_    1508 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1873 {                                                1509 {
                                                   >> 1510         int prio = dl_task_of(dl_se)->prio;
1874         u64 deadline = dl_se->deadline;          1511         u64 deadline = dl_se->deadline;
1875                                                  1512 
                                                   >> 1513         WARN_ON(!dl_prio(prio));
1876         dl_rq->dl_nr_running++;                  1514         dl_rq->dl_nr_running++;
1877         add_nr_running(rq_of_dl_rq(dl_rq), 1)    1515         add_nr_running(rq_of_dl_rq(dl_rq), 1);
1878                                                  1516 
1879         inc_dl_deadline(dl_rq, deadline);        1517         inc_dl_deadline(dl_rq, deadline);
                                                   >> 1518         inc_dl_migration(dl_se, dl_rq);
1880 }                                                1519 }
1881                                                  1520 
1882 static inline                                    1521 static inline
1883 void dec_dl_tasks(struct sched_dl_entity *dl_    1522 void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1884 {                                                1523 {
                                                   >> 1524         int prio = dl_task_of(dl_se)->prio;
                                                   >> 1525 
                                                   >> 1526         WARN_ON(!dl_prio(prio));
1885         WARN_ON(!dl_rq->dl_nr_running);          1527         WARN_ON(!dl_rq->dl_nr_running);
1886         dl_rq->dl_nr_running--;                  1528         dl_rq->dl_nr_running--;
1887         sub_nr_running(rq_of_dl_rq(dl_rq), 1)    1529         sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1888                                                  1530 
1889         dec_dl_deadline(dl_rq, dl_se->deadlin    1531         dec_dl_deadline(dl_rq, dl_se->deadline);
                                                   >> 1532         dec_dl_migration(dl_se, dl_rq);
1890 }                                                1533 }
1891                                                  1534 
1892 static inline bool __dl_less(struct rb_node *    1535 static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1893 {                                                1536 {
1894         return dl_time_before(__node_2_dle(a)    1537         return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1895 }                                                1538 }
1896                                                  1539 
1897 static __always_inline struct sched_statistic !! 1540 static inline struct sched_statistics *
1898 __schedstats_from_dl_se(struct sched_dl_entit    1541 __schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1899 {                                                1542 {
1900         if (!schedstat_enabled())             << 
1901                 return NULL;                  << 
1902                                               << 
1903         if (dl_server(dl_se))                 << 
1904                 return NULL;                  << 
1905                                               << 
1906         return &dl_task_of(dl_se)->stats;        1543         return &dl_task_of(dl_se)->stats;
1907 }                                                1544 }
1908                                                  1545 
1909 static inline void                               1546 static inline void
1910 update_stats_wait_start_dl(struct dl_rq *dl_r    1547 update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1911 {                                                1548 {
1912         struct sched_statistics *stats = __sc !! 1549         struct sched_statistics *stats;
1913         if (stats)                            !! 1550 
1914                 __update_stats_wait_start(rq_ !! 1551         if (!schedstat_enabled())
                                                   >> 1552                 return;
                                                   >> 1553 
                                                   >> 1554         stats = __schedstats_from_dl_se(dl_se);
                                                   >> 1555         __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1915 }                                                1556 }
1916                                                  1557 
1917 static inline void                               1558 static inline void
1918 update_stats_wait_end_dl(struct dl_rq *dl_rq,    1559 update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1919 {                                                1560 {
1920         struct sched_statistics *stats = __sc !! 1561         struct sched_statistics *stats;
1921         if (stats)                            !! 1562 
1922                 __update_stats_wait_end(rq_of !! 1563         if (!schedstat_enabled())
                                                   >> 1564                 return;
                                                   >> 1565 
                                                   >> 1566         stats = __schedstats_from_dl_se(dl_se);
                                                   >> 1567         __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1923 }                                                1568 }
1924                                                  1569 
1925 static inline void                               1570 static inline void
1926 update_stats_enqueue_sleeper_dl(struct dl_rq     1571 update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1927 {                                                1572 {
1928         struct sched_statistics *stats = __sc !! 1573         struct sched_statistics *stats;
1929         if (stats)                            !! 1574 
1930                 __update_stats_enqueue_sleepe !! 1575         if (!schedstat_enabled())
                                                   >> 1576                 return;
                                                   >> 1577 
                                                   >> 1578         stats = __schedstats_from_dl_se(dl_se);
                                                   >> 1579         __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1931 }                                                1580 }
1932                                                  1581 
1933 static inline void                               1582 static inline void
1934 update_stats_enqueue_dl(struct dl_rq *dl_rq,     1583 update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1935                         int flags)               1584                         int flags)
1936 {                                                1585 {
1937         if (!schedstat_enabled())                1586         if (!schedstat_enabled())
1938                 return;                          1587                 return;
1939                                                  1588 
1940         if (flags & ENQUEUE_WAKEUP)              1589         if (flags & ENQUEUE_WAKEUP)
1941                 update_stats_enqueue_sleeper_    1590                 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1942 }                                                1591 }
1943                                                  1592 
1944 static inline void                               1593 static inline void
1945 update_stats_dequeue_dl(struct dl_rq *dl_rq,     1594 update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1946                         int flags)               1595                         int flags)
1947 {                                                1596 {
1948         struct task_struct *p = dl_task_of(dl    1597         struct task_struct *p = dl_task_of(dl_se);
1949                                                  1598 
1950         if (!schedstat_enabled())                1599         if (!schedstat_enabled())
1951                 return;                          1600                 return;
1952                                                  1601 
1953         if ((flags & DEQUEUE_SLEEP)) {           1602         if ((flags & DEQUEUE_SLEEP)) {
1954                 unsigned int state;              1603                 unsigned int state;
1955                                                  1604 
1956                 state = READ_ONCE(p->__state)    1605                 state = READ_ONCE(p->__state);
1957                 if (state & TASK_INTERRUPTIBL    1606                 if (state & TASK_INTERRUPTIBLE)
1958                         __schedstat_set(p->st    1607                         __schedstat_set(p->stats.sleep_start,
1959                                         rq_cl    1608                                         rq_clock(rq_of_dl_rq(dl_rq)));
1960                                                  1609 
1961                 if (state & TASK_UNINTERRUPTI    1610                 if (state & TASK_UNINTERRUPTIBLE)
1962                         __schedstat_set(p->st    1611                         __schedstat_set(p->stats.block_start,
1963                                         rq_cl    1612                                         rq_clock(rq_of_dl_rq(dl_rq)));
1964         }                                        1613         }
1965 }                                                1614 }
1966                                                  1615 
1967 static void __enqueue_dl_entity(struct sched_    1616 static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1968 {                                                1617 {
1969         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1618         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1970                                                  1619 
1971         WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->r    1620         WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
1972                                                  1621 
1973         rb_add_cached(&dl_se->rb_node, &dl_rq    1622         rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
1974                                                  1623 
1975         inc_dl_tasks(dl_se, dl_rq);              1624         inc_dl_tasks(dl_se, dl_rq);
1976 }                                                1625 }
1977                                                  1626 
1978 static void __dequeue_dl_entity(struct sched_    1627 static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1979 {                                                1628 {
1980         struct dl_rq *dl_rq = dl_rq_of_se(dl_    1629         struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1981                                                  1630 
1982         if (RB_EMPTY_NODE(&dl_se->rb_node))      1631         if (RB_EMPTY_NODE(&dl_se->rb_node))
1983                 return;                          1632                 return;
1984                                                  1633 
1985         rb_erase_cached(&dl_se->rb_node, &dl_    1634         rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1986                                                  1635 
1987         RB_CLEAR_NODE(&dl_se->rb_node);          1636         RB_CLEAR_NODE(&dl_se->rb_node);
1988                                                  1637 
1989         dec_dl_tasks(dl_se, dl_rq);              1638         dec_dl_tasks(dl_se, dl_rq);
1990 }                                                1639 }
1991                                                  1640 
1992 static void                                      1641 static void
1993 enqueue_dl_entity(struct sched_dl_entity *dl_    1642 enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1994 {                                                1643 {
1995         WARN_ON_ONCE(on_dl_rq(dl_se));           1644         WARN_ON_ONCE(on_dl_rq(dl_se));
1996                                                  1645 
1997         update_stats_enqueue_dl(dl_rq_of_se(d    1646         update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1998                                                  1647 
1999         /*                                       1648         /*
2000          * Check if a constrained deadline ta << 
2001          * after the deadline but before the  << 
2002          * If that is the case, the task will << 
2003          * the replenishment timer will be se << 
2004          */                                   << 
2005         if (!dl_se->dl_throttled && !dl_is_im << 
2006                 dl_check_constrained_dl(dl_se << 
2007                                               << 
2008         if (flags & (ENQUEUE_RESTORE|ENQUEUE_ << 
2009                 struct dl_rq *dl_rq = dl_rq_o << 
2010                                               << 
2011                 add_rq_bw(dl_se, dl_rq);      << 
2012                 add_running_bw(dl_se, dl_rq); << 
2013         }                                     << 
2014                                               << 
2015         /*                                    << 
2016          * If p is throttled, we do not enque << 
2017          * its budget it needs a replenishmen << 
2018          * its rq, the bandwidth timer callba << 
2019          * run yet) will take care of this.   << 
2020          * However, the active utilization do << 
2021          * that the task is on the runqueue o << 
2022          * task's state - in GRUB parlance, " << 
2023          * In other words, even if a task is  << 
2024          * be counted in the active utilizati << 
2025          * add_running_bw().                  << 
2026          */                                   << 
2027         if (!dl_se->dl_defer && dl_se->dl_thr << 
2028                 if (flags & ENQUEUE_WAKEUP)   << 
2029                         task_contending(dl_se << 
2030                                               << 
2031                 return;                       << 
2032         }                                     << 
2033                                               << 
2034         /*                                    << 
2035          * If this is a wakeup or a new insta    1649          * If this is a wakeup or a new instance, the scheduling
2036          * parameters of the task might need     1650          * parameters of the task might need updating. Otherwise,
2037          * we want a replenishment of its run    1651          * we want a replenishment of its runtime.
2038          */                                      1652          */
2039         if (flags & ENQUEUE_WAKEUP) {            1653         if (flags & ENQUEUE_WAKEUP) {
2040                 task_contending(dl_se, flags)    1654                 task_contending(dl_se, flags);
2041                 update_dl_entity(dl_se);         1655                 update_dl_entity(dl_se);
2042         } else if (flags & ENQUEUE_REPLENISH)    1656         } else if (flags & ENQUEUE_REPLENISH) {
2043                 replenish_dl_entity(dl_se);      1657                 replenish_dl_entity(dl_se);
2044         } else if ((flags & ENQUEUE_RESTORE)     1658         } else if ((flags & ENQUEUE_RESTORE) &&
2045                    dl_time_before(dl_se->dead !! 1659                   dl_time_before(dl_se->deadline,
                                                   >> 1660                                  rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
2046                 setup_new_dl_entity(dl_se);      1661                 setup_new_dl_entity(dl_se);
2047         }                                        1662         }
2048                                                  1663 
2049         /*                                    << 
2050          * If the reservation is still thrott << 
2051          * deferred task and still got to wai << 
2052          */                                   << 
2053         if (dl_se->dl_throttled && start_dl_t << 
2054                 return;                       << 
2055                                               << 
2056         /*                                    << 
2057          * We're about to enqueue, make sure  << 
2058          * In case the timer was not started, << 
2059          * has passed, mark as not throttled  << 
2060          * Also cancel earlier timers, since  << 
2061          */                                   << 
2062         if (dl_se->dl_throttled) {            << 
2063                 hrtimer_try_to_cancel(&dl_se- << 
2064                 dl_se->dl_defer_armed = 0;    << 
2065                 dl_se->dl_throttled = 0;      << 
2066         }                                     << 
2067                                               << 
2068         __enqueue_dl_entity(dl_se);              1664         __enqueue_dl_entity(dl_se);
2069 }                                                1665 }
2070                                                  1666 
2071 static void dequeue_dl_entity(struct sched_dl !! 1667 static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
2072 {                                                1668 {
2073         __dequeue_dl_entity(dl_se);              1669         __dequeue_dl_entity(dl_se);
2074                                               << 
2075         if (flags & (DEQUEUE_SAVE|DEQUEUE_MIG << 
2076                 struct dl_rq *dl_rq = dl_rq_o << 
2077                                               << 
2078                 sub_running_bw(dl_se, dl_rq); << 
2079                 sub_rq_bw(dl_se, dl_rq);      << 
2080         }                                     << 
2081                                               << 
2082         /*                                    << 
2083          * This check allows to start the ina << 
2084          * decrease the active utilization, i << 
2085          * when the task blocks and when it i << 
2086          * (p->state == TASK_DEAD). We can ha << 
2087          * way, because from GRUB's point of  << 
2088          * (the task moves from "active conte << 
2089          * or "inactive")                     << 
2090          */                                   << 
2091         if (flags & DEQUEUE_SLEEP)            << 
2092                 task_non_contending(dl_se);   << 
2093 }                                                1670 }
2094                                                  1671 
2095 static void enqueue_task_dl(struct rq *rq, st    1672 static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2096 {                                                1673 {
2097         if (is_dl_boosted(&p->dl)) {             1674         if (is_dl_boosted(&p->dl)) {
2098                 /*                               1675                 /*
2099                  * Because of delays in the d    1676                  * Because of delays in the detection of the overrun of a
2100                  * thread's runtime, it might    1677                  * thread's runtime, it might be the case that a thread
2101                  * goes to sleep in a rt mute    1678                  * goes to sleep in a rt mutex with negative runtime. As
2102                  * a consequence, the thread     1679                  * a consequence, the thread will be throttled.
2103                  *                               1680                  *
2104                  * While waiting for the mute    1681                  * While waiting for the mutex, this thread can also be
2105                  * boosted via PI, resulting     1682                  * boosted via PI, resulting in a thread that is throttled
2106                  * and boosted at the same ti    1683                  * and boosted at the same time.
2107                  *                               1684                  *
2108                  * In this case, the boost ov    1685                  * In this case, the boost overrides the throttle.
2109                  */                              1686                  */
2110                 if (p->dl.dl_throttled) {        1687                 if (p->dl.dl_throttled) {
2111                         /*                       1688                         /*
2112                          * The replenish time    1689                          * The replenish timer needs to be canceled. No
2113                          * problem if it fire    1690                          * problem if it fires concurrently: boosted threads
2114                          * are ignored in dl_    1691                          * are ignored in dl_task_timer().
2115                          *                    << 
2116                          * If the timer callb << 
2117                          * it will eventually << 
2118                          */                      1692                          */
2119                         if (hrtimer_try_to_ca !! 1693                         hrtimer_try_to_cancel(&p->dl.dl_timer);
2120                             !dl_server(&p->dl << 
2121                                 put_task_stru << 
2122                         p->dl.dl_throttled =     1694                         p->dl.dl_throttled = 0;
2123                 }                                1695                 }
2124         } else if (!dl_prio(p->normal_prio))     1696         } else if (!dl_prio(p->normal_prio)) {
2125                 /*                               1697                 /*
2126                  * Special case in which we h    1698                  * Special case in which we have a !SCHED_DEADLINE task that is going
2127                  * to be deboosted, but excee    1699                  * to be deboosted, but exceeds its runtime while doing so. No point in
2128                  * replenishing it, as it's g    1700                  * replenishing it, as it's going to return back to its original
2129                  * scheduling class after thi    1701                  * scheduling class after this. If it has been throttled, we need to
2130                  * clear the flag, otherwise     1702                  * clear the flag, otherwise the task may wake up as throttled after
2131                  * being boosted again with n    1703                  * being boosted again with no means to replenish the runtime and clear
2132                  * the throttle.                 1704                  * the throttle.
2133                  */                              1705                  */
2134                 p->dl.dl_throttled = 0;          1706                 p->dl.dl_throttled = 0;
2135                 if (!(flags & ENQUEUE_REPLENI    1707                 if (!(flags & ENQUEUE_REPLENISH))
2136                         printk_deferred_once(    1708                         printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
2137                                                  1709                                              task_pid_nr(p));
2138                                                  1710 
2139                 return;                          1711                 return;
2140         }                                        1712         }
2141                                                  1713 
2142         check_schedstat_required();           !! 1714         /*
2143         update_stats_wait_start_dl(dl_rq_of_s !! 1715          * Check if a constrained deadline task was activated
                                                   >> 1716          * after the deadline but before the next period.
                                                   >> 1717          * If that is the case, the task will be throttled and
                                                   >> 1718          * the replenishment timer will be set to the next period.
                                                   >> 1719          */
                                                   >> 1720         if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
                                                   >> 1721                 dl_check_constrained_dl(&p->dl);
2144                                                  1722 
2145         if (p->on_rq == TASK_ON_RQ_MIGRATING) !! 1723         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
2146                 flags |= ENQUEUE_MIGRATING;   !! 1724                 add_rq_bw(&p->dl, &rq->dl);
                                                   >> 1725                 add_running_bw(&p->dl, &rq->dl);
                                                   >> 1726         }
2147                                                  1727 
2148         enqueue_dl_entity(&p->dl, flags);     !! 1728         /*
                                                   >> 1729          * If p is throttled, we do not enqueue it. In fact, if it exhausted
                                                   >> 1730          * its budget it needs a replenishment and, since it now is on
                                                   >> 1731          * its rq, the bandwidth timer callback (which clearly has not
                                                   >> 1732          * run yet) will take care of this.
                                                   >> 1733          * However, the active utilization does not depend on the fact
                                                   >> 1734          * that the task is on the runqueue or not (but depends on the
                                                   >> 1735          * task's state - in GRUB parlance, "inactive" vs "active contending").
                                                   >> 1736          * In other words, even if a task is throttled its utilization must
                                                   >> 1737          * be counted in the active utilization; hence, we need to call
                                                   >> 1738          * add_running_bw().
                                                   >> 1739          */
                                                   >> 1740         if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
                                                   >> 1741                 if (flags & ENQUEUE_WAKEUP)
                                                   >> 1742                         task_contending(&p->dl, flags);
2149                                                  1743 
2150         if (dl_server(&p->dl))                << 
2151                 return;                          1744                 return;
                                                   >> 1745         }
                                                   >> 1746 
                                                   >> 1747         check_schedstat_required();
                                                   >> 1748         update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
                                                   >> 1749 
                                                   >> 1750         enqueue_dl_entity(&p->dl, flags);
2152                                                  1751 
2153         if (!task_current(rq, p) && !p->dl.dl !! 1752         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
2154                 enqueue_pushable_dl_task(rq,     1753                 enqueue_pushable_dl_task(rq, p);
2155 }                                                1754 }
2156                                                  1755 
2157 static bool dequeue_task_dl(struct rq *rq, st !! 1756 static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2158 {                                                1757 {
2159         update_curr_dl(rq);                   !! 1758         update_stats_dequeue_dl(&rq->dl, &p->dl, flags);
                                                   >> 1759         dequeue_dl_entity(&p->dl);
                                                   >> 1760         dequeue_pushable_dl_task(rq, p);
                                                   >> 1761 }
2160                                                  1762 
2161         if (p->on_rq == TASK_ON_RQ_MIGRATING) !! 1763 static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2162                 flags |= DEQUEUE_MIGRATING;   !! 1764 {
                                                   >> 1765         update_curr_dl(rq);
                                                   >> 1766         __dequeue_task_dl(rq, p, flags);
2163                                                  1767 
2164         dequeue_dl_entity(&p->dl, flags);     !! 1768         if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
2165         if (!p->dl.dl_throttled && !dl_server !! 1769                 sub_running_bw(&p->dl, &rq->dl);
2166                 dequeue_pushable_dl_task(rq,  !! 1770                 sub_rq_bw(&p->dl, &rq->dl);
                                                   >> 1771         }
2167                                                  1772 
2168         return true;                          !! 1773         /*
                                                   >> 1774          * This check allows to start the inactive timer (or to immediately
                                                   >> 1775          * decrease the active utilization, if needed) in two cases:
                                                   >> 1776          * when the task blocks and when it is terminating
                                                   >> 1777          * (p->state == TASK_DEAD). We can handle the two cases in the same
                                                   >> 1778          * way, because from GRUB's point of view the same thing is happening
                                                   >> 1779          * (the task moves from "active contending" to "active non contending"
                                                   >> 1780          * or "inactive")
                                                   >> 1781          */
                                                   >> 1782         if (flags & DEQUEUE_SLEEP)
                                                   >> 1783                 task_non_contending(p);
2169 }                                                1784 }
2170                                                  1785 
2171 /*                                               1786 /*
2172  * Yield task semantic for -deadline tasks is    1787  * Yield task semantic for -deadline tasks is:
2173  *                                               1788  *
2174  *   get off from the CPU until our next inst    1789  *   get off from the CPU until our next instance, with
2175  *   a new runtime. This is of little use now    1790  *   a new runtime. This is of little use now, since we
2176  *   don't have a bandwidth reclaiming mechan    1791  *   don't have a bandwidth reclaiming mechanism. Anyway,
2177  *   bandwidth reclaiming is planned for the     1792  *   bandwidth reclaiming is planned for the future, and
2178  *   yield_task_dl will indicate that some sp    1793  *   yield_task_dl will indicate that some spare budget
2179  *   is available for other task instances to    1794  *   is available for other task instances to use it.
2180  */                                              1795  */
2181 static void yield_task_dl(struct rq *rq)         1796 static void yield_task_dl(struct rq *rq)
2182 {                                                1797 {
2183         /*                                       1798         /*
2184          * We make the task go to sleep until    1799          * We make the task go to sleep until its current deadline by
2185          * forcing its runtime to zero. This     1800          * forcing its runtime to zero. This way, update_curr_dl() stops
2186          * it and the bandwidth timer will wa    1801          * it and the bandwidth timer will wake it up and will give it
2187          * new scheduling parameters (thanks     1802          * new scheduling parameters (thanks to dl_yielded=1).
2188          */                                      1803          */
2189         rq->curr->dl.dl_yielded = 1;             1804         rq->curr->dl.dl_yielded = 1;
2190                                                  1805 
2191         update_rq_clock(rq);                     1806         update_rq_clock(rq);
2192         update_curr_dl(rq);                      1807         update_curr_dl(rq);
2193         /*                                       1808         /*
2194          * Tell update_rq_clock() that we've     1809          * Tell update_rq_clock() that we've just updated,
2195          * so we don't do microscopic update     1810          * so we don't do microscopic update in schedule()
2196          * and double the fastpath cost.         1811          * and double the fastpath cost.
2197          */                                      1812          */
2198         rq_clock_skip_update(rq);                1813         rq_clock_skip_update(rq);
2199 }                                                1814 }
2200                                                  1815 
2201 #ifdef CONFIG_SMP                                1816 #ifdef CONFIG_SMP
2202                                                  1817 
2203 static inline bool dl_task_is_earliest_deadli    1818 static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
2204                                                  1819                                                  struct rq *rq)
2205 {                                                1820 {
2206         return (!rq->dl.dl_nr_running ||         1821         return (!rq->dl.dl_nr_running ||
2207                 dl_time_before(p->dl.deadline    1822                 dl_time_before(p->dl.deadline,
2208                                rq->dl.earlies    1823                                rq->dl.earliest_dl.curr));
2209 }                                                1824 }
2210                                                  1825 
2211 static int find_later_rq(struct task_struct *    1826 static int find_later_rq(struct task_struct *task);
2212                                                  1827 
2213 static int                                       1828 static int
2214 select_task_rq_dl(struct task_struct *p, int     1829 select_task_rq_dl(struct task_struct *p, int cpu, int flags)
2215 {                                                1830 {
2216         struct task_struct *curr;                1831         struct task_struct *curr;
2217         bool select_rq;                          1832         bool select_rq;
2218         struct rq *rq;                           1833         struct rq *rq;
2219                                                  1834 
2220         if (!(flags & WF_TTWU))                  1835         if (!(flags & WF_TTWU))
2221                 goto out;                        1836                 goto out;
2222                                                  1837 
2223         rq = cpu_rq(cpu);                        1838         rq = cpu_rq(cpu);
2224                                                  1839 
2225         rcu_read_lock();                         1840         rcu_read_lock();
2226         curr = READ_ONCE(rq->curr); /* unlock    1841         curr = READ_ONCE(rq->curr); /* unlocked access */
2227                                                  1842 
2228         /*                                       1843         /*
2229          * If we are dealing with a -deadline    1844          * If we are dealing with a -deadline task, we must
2230          * decide where to wake it up.           1845          * decide where to wake it up.
2231          * If it has a later deadline and the    1846          * If it has a later deadline and the current task
2232          * on this rq can't move (provided th    1847          * on this rq can't move (provided the waking task
2233          * can!) we prefer to send it somewhe    1848          * can!) we prefer to send it somewhere else. On the
2234          * other hand, if it has a shorter de    1849          * other hand, if it has a shorter deadline, we
2235          * try to make it stay here, it might    1850          * try to make it stay here, it might be important.
2236          */                                      1851          */
2237         select_rq = unlikely(dl_task(curr)) &    1852         select_rq = unlikely(dl_task(curr)) &&
2238                     (curr->nr_cpus_allowed <     1853                     (curr->nr_cpus_allowed < 2 ||
2239                      !dl_entity_preempt(&p->d    1854                      !dl_entity_preempt(&p->dl, &curr->dl)) &&
2240                     p->nr_cpus_allowed > 1;      1855                     p->nr_cpus_allowed > 1;
2241                                                  1856 
2242         /*                                       1857         /*
2243          * Take the capacity of the CPU into     1858          * Take the capacity of the CPU into account to
2244          * ensure it fits the requirement of     1859          * ensure it fits the requirement of the task.
2245          */                                      1860          */
2246         if (sched_asym_cpucap_active())          1861         if (sched_asym_cpucap_active())
2247                 select_rq |= !dl_task_fits_ca    1862                 select_rq |= !dl_task_fits_capacity(p, cpu);
2248                                                  1863 
2249         if (select_rq) {                         1864         if (select_rq) {
2250                 int target = find_later_rq(p)    1865                 int target = find_later_rq(p);
2251                                                  1866 
2252                 if (target != -1 &&              1867                 if (target != -1 &&
2253                     dl_task_is_earliest_deadl    1868                     dl_task_is_earliest_deadline(p, cpu_rq(target)))
2254                         cpu = target;            1869                         cpu = target;
2255         }                                        1870         }
2256         rcu_read_unlock();                       1871         rcu_read_unlock();
2257                                                  1872 
2258 out:                                             1873 out:
2259         return cpu;                              1874         return cpu;
2260 }                                                1875 }
2261                                                  1876 
2262 static void migrate_task_rq_dl(struct task_st    1877 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
2263 {                                                1878 {
2264         struct rq_flags rf;                      1879         struct rq_flags rf;
2265         struct rq *rq;                           1880         struct rq *rq;
2266                                                  1881 
2267         if (READ_ONCE(p->__state) != TASK_WAK    1882         if (READ_ONCE(p->__state) != TASK_WAKING)
2268                 return;                          1883                 return;
2269                                                  1884 
2270         rq = task_rq(p);                         1885         rq = task_rq(p);
2271         /*                                       1886         /*
2272          * Since p->state == TASK_WAKING, set    1887          * Since p->state == TASK_WAKING, set_task_cpu() has been called
2273          * from try_to_wake_up(). Hence, p->p    1888          * from try_to_wake_up(). Hence, p->pi_lock is locked, but
2274          * rq->lock is not... So, lock it        1889          * rq->lock is not... So, lock it
2275          */                                      1890          */
2276         rq_lock(rq, &rf);                        1891         rq_lock(rq, &rf);
2277         if (p->dl.dl_non_contending) {           1892         if (p->dl.dl_non_contending) {
2278                 update_rq_clock(rq);             1893                 update_rq_clock(rq);
2279                 sub_running_bw(&p->dl, &rq->d    1894                 sub_running_bw(&p->dl, &rq->dl);
2280                 p->dl.dl_non_contending = 0;     1895                 p->dl.dl_non_contending = 0;
2281                 /*                               1896                 /*
2282                  * If the timer handler is cu    1897                  * If the timer handler is currently running and the
2283                  * timer cannot be canceled,     1898                  * timer cannot be canceled, inactive_task_timer()
2284                  * will see that dl_not_conte    1899                  * will see that dl_not_contending is not set, and
2285                  * will not touch the rq's ac    1900                  * will not touch the rq's active utilization,
2286                  * so we are still safe.         1901                  * so we are still safe.
2287                  */                              1902                  */
2288                 if (hrtimer_try_to_cancel(&p-    1903                 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2289                         put_task_struct(p);      1904                         put_task_struct(p);
2290         }                                        1905         }
2291         sub_rq_bw(&p->dl, &rq->dl);              1906         sub_rq_bw(&p->dl, &rq->dl);
2292         rq_unlock(rq, &rf);                      1907         rq_unlock(rq, &rf);
2293 }                                                1908 }
2294                                                  1909 
2295 static void check_preempt_equal_dl(struct rq     1910 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
2296 {                                                1911 {
2297         /*                                       1912         /*
2298          * Current can't be migrated, useless    1913          * Current can't be migrated, useless to reschedule,
2299          * let's hope p can move out.            1914          * let's hope p can move out.
2300          */                                      1915          */
2301         if (rq->curr->nr_cpus_allowed == 1 ||    1916         if (rq->curr->nr_cpus_allowed == 1 ||
2302             !cpudl_find(&rq->rd->cpudl, rq->c    1917             !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
2303                 return;                          1918                 return;
2304                                                  1919 
2305         /*                                       1920         /*
2306          * p is migratable, so let's not sche    1921          * p is migratable, so let's not schedule it and
2307          * see if it is pushed or pulled some    1922          * see if it is pushed or pulled somewhere else.
2308          */                                      1923          */
2309         if (p->nr_cpus_allowed != 1 &&           1924         if (p->nr_cpus_allowed != 1 &&
2310             cpudl_find(&rq->rd->cpudl, p, NUL    1925             cpudl_find(&rq->rd->cpudl, p, NULL))
2311                 return;                          1926                 return;
2312                                                  1927 
2313         resched_curr(rq);                        1928         resched_curr(rq);
2314 }                                                1929 }
2315                                                  1930 
2316 static int balance_dl(struct rq *rq, struct t    1931 static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2317 {                                                1932 {
2318         if (!on_dl_rq(&p->dl) && need_pull_dl    1933         if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2319                 /*                               1934                 /*
2320                  * This is OK, because curren    1935                  * This is OK, because current is on_cpu, which avoids it being
2321                  * picked for load-balance an    1936                  * picked for load-balance and preemption/IRQs are still
2322                  * disabled avoiding further     1937                  * disabled avoiding further scheduler activity on it and we've
2323                  * not yet started the pickin    1938                  * not yet started the picking loop.
2324                  */                              1939                  */
2325                 rq_unpin_lock(rq, rf);           1940                 rq_unpin_lock(rq, rf);
2326                 pull_dl_task(rq);                1941                 pull_dl_task(rq);
2327                 rq_repin_lock(rq, rf);           1942                 rq_repin_lock(rq, rf);
2328         }                                        1943         }
2329                                                  1944 
2330         return sched_stop_runnable(rq) || sch    1945         return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2331 }                                                1946 }
2332 #endif /* CONFIG_SMP */                          1947 #endif /* CONFIG_SMP */
2333                                                  1948 
2334 /*                                               1949 /*
2335  * Only called when both the current and waki    1950  * Only called when both the current and waking task are -deadline
2336  * tasks.                                        1951  * tasks.
2337  */                                              1952  */
2338 static void wakeup_preempt_dl(struct rq *rq,  !! 1953 static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
2339                                   int flags)     1954                                   int flags)
2340 {                                                1955 {
2341         if (dl_entity_preempt(&p->dl, &rq->cu    1956         if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
2342                 resched_curr(rq);                1957                 resched_curr(rq);
2343                 return;                          1958                 return;
2344         }                                        1959         }
2345                                                  1960 
2346 #ifdef CONFIG_SMP                                1961 #ifdef CONFIG_SMP
2347         /*                                       1962         /*
2348          * In the unlikely case current and p    1963          * In the unlikely case current and p have the same deadline
2349          * let us try to decide what's the be    1964          * let us try to decide what's the best thing to do...
2350          */                                      1965          */
2351         if ((p->dl.deadline == rq->curr->dl.d    1966         if ((p->dl.deadline == rq->curr->dl.deadline) &&
2352             !test_tsk_need_resched(rq->curr))    1967             !test_tsk_need_resched(rq->curr))
2353                 check_preempt_equal_dl(rq, p)    1968                 check_preempt_equal_dl(rq, p);
2354 #endif /* CONFIG_SMP */                          1969 #endif /* CONFIG_SMP */
2355 }                                                1970 }
2356                                                  1971 
2357 #ifdef CONFIG_SCHED_HRTICK                       1972 #ifdef CONFIG_SCHED_HRTICK
2358 static void start_hrtick_dl(struct rq *rq, st !! 1973 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2359 {                                                1974 {
2360         hrtick_start(rq, dl_se->runtime);     !! 1975         hrtick_start(rq, p->dl.runtime);
2361 }                                                1976 }
2362 #else /* !CONFIG_SCHED_HRTICK */                 1977 #else /* !CONFIG_SCHED_HRTICK */
2363 static void start_hrtick_dl(struct rq *rq, st !! 1978 static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
2364 {                                                1979 {
2365 }                                                1980 }
2366 #endif                                           1981 #endif
2367                                                  1982 
2368 static void set_next_task_dl(struct rq *rq, s    1983 static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
2369 {                                                1984 {
2370         struct sched_dl_entity *dl_se = &p->d    1985         struct sched_dl_entity *dl_se = &p->dl;
2371         struct dl_rq *dl_rq = &rq->dl;           1986         struct dl_rq *dl_rq = &rq->dl;
2372                                                  1987 
2373         p->se.exec_start = rq_clock_task(rq);    1988         p->se.exec_start = rq_clock_task(rq);
2374         if (on_dl_rq(&p->dl))                    1989         if (on_dl_rq(&p->dl))
2375                 update_stats_wait_end_dl(dl_r    1990                 update_stats_wait_end_dl(dl_rq, dl_se);
2376                                                  1991 
2377         /* You can't push away the running ta    1992         /* You can't push away the running task */
2378         dequeue_pushable_dl_task(rq, p);         1993         dequeue_pushable_dl_task(rq, p);
2379                                                  1994 
2380         if (!first)                              1995         if (!first)
2381                 return;                          1996                 return;
2382                                                  1997 
                                                   >> 1998         if (hrtick_enabled_dl(rq))
                                                   >> 1999                 start_hrtick_dl(rq, p);
                                                   >> 2000 
2383         if (rq->curr->sched_class != &dl_sche    2001         if (rq->curr->sched_class != &dl_sched_class)
2384                 update_dl_rq_load_avg(rq_cloc    2002                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2385                                                  2003 
2386         deadline_queue_push_tasks(rq);           2004         deadline_queue_push_tasks(rq);
2387                                               << 
2388         if (hrtick_enabled_dl(rq))            << 
2389                 start_hrtick_dl(rq, &p->dl);  << 
2390 }                                                2005 }
2391                                                  2006 
2392 static struct sched_dl_entity *pick_next_dl_e    2007 static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
2393 {                                                2008 {
2394         struct rb_node *left = rb_first_cache    2009         struct rb_node *left = rb_first_cached(&dl_rq->root);
2395                                                  2010 
2396         if (!left)                               2011         if (!left)
2397                 return NULL;                     2012                 return NULL;
2398                                                  2013 
2399         return __node_2_dle(left);               2014         return __node_2_dle(left);
2400 }                                                2015 }
2401                                                  2016 
2402 /*                                            !! 2017 static struct task_struct *pick_task_dl(struct rq *rq)
2403  * __pick_next_task_dl - Helper to pick the n << 
2404  * @rq: The runqueue to pick the next task fr << 
2405  */                                           << 
2406 static struct task_struct *__pick_task_dl(str << 
2407 {                                                2018 {
2408         struct sched_dl_entity *dl_se;           2019         struct sched_dl_entity *dl_se;
2409         struct dl_rq *dl_rq = &rq->dl;           2020         struct dl_rq *dl_rq = &rq->dl;
2410         struct task_struct *p;                   2021         struct task_struct *p;
2411                                                  2022 
2412 again:                                        << 
2413         if (!sched_dl_runnable(rq))              2023         if (!sched_dl_runnable(rq))
2414                 return NULL;                     2024                 return NULL;
2415                                                  2025 
2416         dl_se = pick_next_dl_entity(dl_rq);      2026         dl_se = pick_next_dl_entity(dl_rq);
2417         WARN_ON_ONCE(!dl_se);                    2027         WARN_ON_ONCE(!dl_se);
2418                                               !! 2028         p = dl_task_of(dl_se);
2419         if (dl_server(dl_se)) {               << 
2420                 p = dl_se->server_pick_task(d << 
2421                 if (!p) {                     << 
2422                         dl_se->dl_yielded = 1 << 
2423                         update_curr_dl_se(rq, << 
2424                         goto again;           << 
2425                 }                             << 
2426                 rq->dl_server = dl_se;        << 
2427         } else {                              << 
2428                 p = dl_task_of(dl_se);        << 
2429         }                                     << 
2430                                                  2029 
2431         return p;                                2030         return p;
2432 }                                                2031 }
2433                                                  2032 
2434 static struct task_struct *pick_task_dl(struc !! 2033 static struct task_struct *pick_next_task_dl(struct rq *rq)
2435 {                                                2034 {
2436         return __pick_task_dl(rq);            !! 2035         struct task_struct *p;
                                                   >> 2036 
                                                   >> 2037         p = pick_task_dl(rq);
                                                   >> 2038         if (p)
                                                   >> 2039                 set_next_task_dl(rq, p, true);
                                                   >> 2040 
                                                   >> 2041         return p;
2437 }                                                2042 }
2438                                                  2043 
2439 static void put_prev_task_dl(struct rq *rq, s !! 2044 static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
2440 {                                                2045 {
2441         struct sched_dl_entity *dl_se = &p->d    2046         struct sched_dl_entity *dl_se = &p->dl;
2442         struct dl_rq *dl_rq = &rq->dl;           2047         struct dl_rq *dl_rq = &rq->dl;
2443                                                  2048 
2444         if (on_dl_rq(&p->dl))                    2049         if (on_dl_rq(&p->dl))
2445                 update_stats_wait_start_dl(dl    2050                 update_stats_wait_start_dl(dl_rq, dl_se);
2446                                                  2051 
2447         update_curr_dl(rq);                      2052         update_curr_dl(rq);
2448                                                  2053 
2449         update_dl_rq_load_avg(rq_clock_pelt(r    2054         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2450         if (on_dl_rq(&p->dl) && p->nr_cpus_al    2055         if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
2451                 enqueue_pushable_dl_task(rq,     2056                 enqueue_pushable_dl_task(rq, p);
2452 }                                                2057 }
2453                                                  2058 
2454 /*                                               2059 /*
2455  * scheduler tick hitting a task of our sched    2060  * scheduler tick hitting a task of our scheduling class.
2456  *                                               2061  *
2457  * NOTE: This function can be called remotely    2062  * NOTE: This function can be called remotely by the tick offload that
2458  * goes along full dynticks. Therefore no loc    2063  * goes along full dynticks. Therefore no local assumption can be made
2459  * and everything must be accessed through th    2064  * and everything must be accessed through the @rq and @curr passed in
2460  * parameters.                                   2065  * parameters.
2461  */                                              2066  */
2462 static void task_tick_dl(struct rq *rq, struc    2067 static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2463 {                                                2068 {
2464         update_curr_dl(rq);                      2069         update_curr_dl(rq);
2465                                                  2070 
2466         update_dl_rq_load_avg(rq_clock_pelt(r    2071         update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2467         /*                                       2072         /*
2468          * Even when we have runtime, update_    2073          * Even when we have runtime, update_curr_dl() might have resulted in us
2469          * not being the leftmost task anymor    2074          * not being the leftmost task anymore. In that case NEED_RESCHED will
2470          * be set and schedule() will start a    2075          * be set and schedule() will start a new hrtick for the next task.
2471          */                                      2076          */
2472         if (hrtick_enabled_dl(rq) && queued &    2077         if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
2473             is_leftmost(&p->dl, &rq->dl))     !! 2078             is_leftmost(p, &rq->dl))
2474                 start_hrtick_dl(rq, &p->dl);  !! 2079                 start_hrtick_dl(rq, p);
2475 }                                                2080 }
2476                                                  2081 
2477 static void task_fork_dl(struct task_struct *    2082 static void task_fork_dl(struct task_struct *p)
2478 {                                                2083 {
2479         /*                                       2084         /*
2480          * SCHED_DEADLINE tasks cannot fork a    2085          * SCHED_DEADLINE tasks cannot fork and this is achieved through
2481          * sched_fork()                          2086          * sched_fork()
2482          */                                      2087          */
2483 }                                                2088 }
2484                                                  2089 
2485 #ifdef CONFIG_SMP                                2090 #ifdef CONFIG_SMP
2486                                                  2091 
2487 /* Only try algorithms three times */            2092 /* Only try algorithms three times */
2488 #define DL_MAX_TRIES 3                           2093 #define DL_MAX_TRIES 3
2489                                                  2094 
2490 static int pick_dl_task(struct rq *rq, struct    2095 static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2491 {                                                2096 {
2492         if (!task_on_cpu(rq, p) &&               2097         if (!task_on_cpu(rq, p) &&
2493             cpumask_test_cpu(cpu, &p->cpus_ma    2098             cpumask_test_cpu(cpu, &p->cpus_mask))
2494                 return 1;                        2099                 return 1;
2495         return 0;                                2100         return 0;
2496 }                                                2101 }
2497                                                  2102 
2498 /*                                               2103 /*
2499  * Return the earliest pushable rq's task, wh    2104  * Return the earliest pushable rq's task, which is suitable to be executed
2500  * on the CPU, NULL otherwise:                   2105  * on the CPU, NULL otherwise:
2501  */                                              2106  */
2502 static struct task_struct *pick_earliest_push    2107 static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2503 {                                                2108 {
2504         struct task_struct *p = NULL;            2109         struct task_struct *p = NULL;
2505         struct rb_node *next_node;               2110         struct rb_node *next_node;
2506                                                  2111 
2507         if (!has_pushable_dl_tasks(rq))          2112         if (!has_pushable_dl_tasks(rq))
2508                 return NULL;                     2113                 return NULL;
2509                                                  2114 
2510         next_node = rb_first_cached(&rq->dl.p    2115         next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2511                                                  2116 
2512 next_node:                                       2117 next_node:
2513         if (next_node) {                         2118         if (next_node) {
2514                 p = __node_2_pdl(next_node);     2119                 p = __node_2_pdl(next_node);
2515                                                  2120 
2516                 if (pick_dl_task(rq, p, cpu))    2121                 if (pick_dl_task(rq, p, cpu))
2517                         return p;                2122                         return p;
2518                                                  2123 
2519                 next_node = rb_next(next_node    2124                 next_node = rb_next(next_node);
2520                 goto next_node;                  2125                 goto next_node;
2521         }                                        2126         }
2522                                                  2127 
2523         return NULL;                             2128         return NULL;
2524 }                                                2129 }
2525                                                  2130 
2526 static DEFINE_PER_CPU(cpumask_var_t, local_cp    2131 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2527                                                  2132 
2528 static int find_later_rq(struct task_struct *    2133 static int find_later_rq(struct task_struct *task)
2529 {                                                2134 {
2530         struct sched_domain *sd;                 2135         struct sched_domain *sd;
2531         struct cpumask *later_mask = this_cpu    2136         struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
2532         int this_cpu = smp_processor_id();       2137         int this_cpu = smp_processor_id();
2533         int cpu = task_cpu(task);                2138         int cpu = task_cpu(task);
2534                                                  2139 
2535         /* Make sure the mask is initialized     2140         /* Make sure the mask is initialized first */
2536         if (unlikely(!later_mask))               2141         if (unlikely(!later_mask))
2537                 return -1;                       2142                 return -1;
2538                                                  2143 
2539         if (task->nr_cpus_allowed == 1)          2144         if (task->nr_cpus_allowed == 1)
2540                 return -1;                       2145                 return -1;
2541                                                  2146 
2542         /*                                       2147         /*
2543          * We have to consider system topolog    2148          * We have to consider system topology and task affinity
2544          * first, then we can look for a suit    2149          * first, then we can look for a suitable CPU.
2545          */                                      2150          */
2546         if (!cpudl_find(&task_rq(task)->rd->c    2151         if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
2547                 return -1;                       2152                 return -1;
2548                                                  2153 
2549         /*                                       2154         /*
2550          * If we are here, some targets have     2155          * If we are here, some targets have been found, including
2551          * the most suitable which is, among     2156          * the most suitable which is, among the runqueues where the
2552          * current tasks have later deadlines    2157          * current tasks have later deadlines than the task's one, the
2553          * rq with the latest possible one.      2158          * rq with the latest possible one.
2554          *                                       2159          *
2555          * Now we check how well this matches    2160          * Now we check how well this matches with task's
2556          * affinity and system topology.         2161          * affinity and system topology.
2557          *                                       2162          *
2558          * The last CPU where the task run is    2163          * The last CPU where the task run is our first
2559          * guess, since it is most likely cac    2164          * guess, since it is most likely cache-hot there.
2560          */                                      2165          */
2561         if (cpumask_test_cpu(cpu, later_mask)    2166         if (cpumask_test_cpu(cpu, later_mask))
2562                 return cpu;                      2167                 return cpu;
2563         /*                                       2168         /*
2564          * Check if this_cpu is to be skipped    2169          * Check if this_cpu is to be skipped (i.e., it is
2565          * not in the mask) or not.              2170          * not in the mask) or not.
2566          */                                      2171          */
2567         if (!cpumask_test_cpu(this_cpu, later    2172         if (!cpumask_test_cpu(this_cpu, later_mask))
2568                 this_cpu = -1;                   2173                 this_cpu = -1;
2569                                                  2174 
2570         rcu_read_lock();                         2175         rcu_read_lock();
2571         for_each_domain(cpu, sd) {               2176         for_each_domain(cpu, sd) {
2572                 if (sd->flags & SD_WAKE_AFFIN    2177                 if (sd->flags & SD_WAKE_AFFINE) {
2573                         int best_cpu;            2178                         int best_cpu;
2574                                                  2179 
2575                         /*                       2180                         /*
2576                          * If possible, preem    2181                          * If possible, preempting this_cpu is
2577                          * cheaper than migra    2182                          * cheaper than migrating.
2578                          */                      2183                          */
2579                         if (this_cpu != -1 &&    2184                         if (this_cpu != -1 &&
2580                             cpumask_test_cpu(    2185                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2581                                 rcu_read_unlo    2186                                 rcu_read_unlock();
2582                                 return this_c    2187                                 return this_cpu;
2583                         }                        2188                         }
2584                                                  2189 
2585                         best_cpu = cpumask_an    2190                         best_cpu = cpumask_any_and_distribute(later_mask,
2586                                                  2191                                                               sched_domain_span(sd));
2587                         /*                       2192                         /*
2588                          * Last chance: if a     2193                          * Last chance: if a CPU being in both later_mask
2589                          * and current sd spa    2194                          * and current sd span is valid, that becomes our
2590                          * choice. Of course,    2195                          * choice. Of course, the latest possible CPU is
2591                          * already under cons    2196                          * already under consideration through later_mask.
2592                          */                      2197                          */
2593                         if (best_cpu < nr_cpu    2198                         if (best_cpu < nr_cpu_ids) {
2594                                 rcu_read_unlo    2199                                 rcu_read_unlock();
2595                                 return best_c    2200                                 return best_cpu;
2596                         }                        2201                         }
2597                 }                                2202                 }
2598         }                                        2203         }
2599         rcu_read_unlock();                       2204         rcu_read_unlock();
2600                                                  2205 
2601         /*                                       2206         /*
2602          * At this point, all our guesses fai    2207          * At this point, all our guesses failed, we just return
2603          * 'something', and let the caller so    2208          * 'something', and let the caller sort the things out.
2604          */                                      2209          */
2605         if (this_cpu != -1)                      2210         if (this_cpu != -1)
2606                 return this_cpu;                 2211                 return this_cpu;
2607                                                  2212 
2608         cpu = cpumask_any_distribute(later_ma    2213         cpu = cpumask_any_distribute(later_mask);
2609         if (cpu < nr_cpu_ids)                    2214         if (cpu < nr_cpu_ids)
2610                 return cpu;                      2215                 return cpu;
2611                                                  2216 
2612         return -1;                               2217         return -1;
2613 }                                                2218 }
2614                                                  2219 
2615 /* Locks the rq it finds */                      2220 /* Locks the rq it finds */
2616 static struct rq *find_lock_later_rq(struct t    2221 static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2617 {                                                2222 {
2618         struct rq *later_rq = NULL;              2223         struct rq *later_rq = NULL;
2619         int tries;                               2224         int tries;
2620         int cpu;                                 2225         int cpu;
2621                                                  2226 
2622         for (tries = 0; tries < DL_MAX_TRIES;    2227         for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2623                 cpu = find_later_rq(task);       2228                 cpu = find_later_rq(task);
2624                                                  2229 
2625                 if ((cpu == -1) || (cpu == rq    2230                 if ((cpu == -1) || (cpu == rq->cpu))
2626                         break;                   2231                         break;
2627                                                  2232 
2628                 later_rq = cpu_rq(cpu);          2233                 later_rq = cpu_rq(cpu);
2629                                                  2234 
2630                 if (!dl_task_is_earliest_dead    2235                 if (!dl_task_is_earliest_deadline(task, later_rq)) {
2631                         /*                       2236                         /*
2632                          * Target rq has task    2237                          * Target rq has tasks of equal or earlier deadline,
2633                          * retrying does not     2238                          * retrying does not release any lock and is unlikely
2634                          * to yield a differe    2239                          * to yield a different result.
2635                          */                      2240                          */
2636                         later_rq = NULL;         2241                         later_rq = NULL;
2637                         break;                   2242                         break;
2638                 }                                2243                 }
2639                                                  2244 
2640                 /* Retry if something changed    2245                 /* Retry if something changed. */
2641                 if (double_lock_balance(rq, l    2246                 if (double_lock_balance(rq, later_rq)) {
2642                         if (unlikely(task_rq(    2247                         if (unlikely(task_rq(task) != rq ||
2643                                      !cpumask    2248                                      !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
2644                                      task_on_    2249                                      task_on_cpu(rq, task) ||
2645                                      !dl_task    2250                                      !dl_task(task) ||
2646                                      is_migra    2251                                      is_migration_disabled(task) ||
2647                                      !task_on    2252                                      !task_on_rq_queued(task))) {
2648                                 double_unlock    2253                                 double_unlock_balance(rq, later_rq);
2649                                 later_rq = NU    2254                                 later_rq = NULL;
2650                                 break;           2255                                 break;
2651                         }                        2256                         }
2652                 }                                2257                 }
2653                                                  2258 
2654                 /*                               2259                 /*
2655                  * If the rq we found has no     2260                  * If the rq we found has no -deadline task, or
2656                  * its earliest one has a lat    2261                  * its earliest one has a later deadline than our
2657                  * task, the rq is a good one    2262                  * task, the rq is a good one.
2658                  */                              2263                  */
2659                 if (dl_task_is_earliest_deadl    2264                 if (dl_task_is_earliest_deadline(task, later_rq))
2660                         break;                   2265                         break;
2661                                                  2266 
2662                 /* Otherwise we try again. */    2267                 /* Otherwise we try again. */
2663                 double_unlock_balance(rq, lat    2268                 double_unlock_balance(rq, later_rq);
2664                 later_rq = NULL;                 2269                 later_rq = NULL;
2665         }                                        2270         }
2666                                                  2271 
2667         return later_rq;                         2272         return later_rq;
2668 }                                                2273 }
2669                                                  2274 
2670 static struct task_struct *pick_next_pushable    2275 static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2671 {                                                2276 {
2672         struct task_struct *p;                   2277         struct task_struct *p;
2673                                                  2278 
2674         if (!has_pushable_dl_tasks(rq))          2279         if (!has_pushable_dl_tasks(rq))
2675                 return NULL;                     2280                 return NULL;
2676                                                  2281 
2677         p = __node_2_pdl(rb_first_cached(&rq-    2282         p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
2678                                                  2283 
2679         WARN_ON_ONCE(rq->cpu != task_cpu(p));    2284         WARN_ON_ONCE(rq->cpu != task_cpu(p));
2680         WARN_ON_ONCE(task_current(rq, p));       2285         WARN_ON_ONCE(task_current(rq, p));
2681         WARN_ON_ONCE(p->nr_cpus_allowed <= 1)    2286         WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
2682                                                  2287 
2683         WARN_ON_ONCE(!task_on_rq_queued(p));     2288         WARN_ON_ONCE(!task_on_rq_queued(p));
2684         WARN_ON_ONCE(!dl_task(p));               2289         WARN_ON_ONCE(!dl_task(p));
2685                                                  2290 
2686         return p;                                2291         return p;
2687 }                                                2292 }
2688                                                  2293 
2689 /*                                               2294 /*
2690  * See if the non running -deadline tasks on     2295  * See if the non running -deadline tasks on this rq
2691  * can be sent to some other CPU where they c    2296  * can be sent to some other CPU where they can preempt
2692  * and start executing.                          2297  * and start executing.
2693  */                                              2298  */
2694 static int push_dl_task(struct rq *rq)           2299 static int push_dl_task(struct rq *rq)
2695 {                                                2300 {
2696         struct task_struct *next_task;           2301         struct task_struct *next_task;
2697         struct rq *later_rq;                     2302         struct rq *later_rq;
2698         int ret = 0;                             2303         int ret = 0;
2699                                                  2304 
                                                   >> 2305         if (!rq->dl.overloaded)
                                                   >> 2306                 return 0;
                                                   >> 2307 
2700         next_task = pick_next_pushable_dl_tas    2308         next_task = pick_next_pushable_dl_task(rq);
2701         if (!next_task)                          2309         if (!next_task)
2702                 return 0;                        2310                 return 0;
2703                                                  2311 
2704 retry:                                           2312 retry:
2705         /*                                       2313         /*
2706          * If next_task preempts rq->curr, an    2314          * If next_task preempts rq->curr, and rq->curr
2707          * can move away, it makes sense to j    2315          * can move away, it makes sense to just reschedule
2708          * without going further in pushing n    2316          * without going further in pushing next_task.
2709          */                                      2317          */
2710         if (dl_task(rq->curr) &&                 2318         if (dl_task(rq->curr) &&
2711             dl_time_before(next_task->dl.dead    2319             dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
2712             rq->curr->nr_cpus_allowed > 1) {     2320             rq->curr->nr_cpus_allowed > 1) {
2713                 resched_curr(rq);                2321                 resched_curr(rq);
2714                 return 0;                        2322                 return 0;
2715         }                                        2323         }
2716                                                  2324 
2717         if (is_migration_disabled(next_task))    2325         if (is_migration_disabled(next_task))
2718                 return 0;                        2326                 return 0;
2719                                                  2327 
2720         if (WARN_ON(next_task == rq->curr))      2328         if (WARN_ON(next_task == rq->curr))
2721                 return 0;                        2329                 return 0;
2722                                                  2330 
2723         /* We might release rq lock */           2331         /* We might release rq lock */
2724         get_task_struct(next_task);              2332         get_task_struct(next_task);
2725                                                  2333 
2726         /* Will lock the rq it'll find */        2334         /* Will lock the rq it'll find */
2727         later_rq = find_lock_later_rq(next_ta    2335         later_rq = find_lock_later_rq(next_task, rq);
2728         if (!later_rq) {                         2336         if (!later_rq) {
2729                 struct task_struct *task;        2337                 struct task_struct *task;
2730                                                  2338 
2731                 /*                               2339                 /*
2732                  * We must check all this aga    2340                  * We must check all this again, since
2733                  * find_lock_later_rq release    2341                  * find_lock_later_rq releases rq->lock and it is
2734                  * then possible that next_ta    2342                  * then possible that next_task has migrated.
2735                  */                              2343                  */
2736                 task = pick_next_pushable_dl_    2344                 task = pick_next_pushable_dl_task(rq);
2737                 if (task == next_task) {         2345                 if (task == next_task) {
2738                         /*                       2346                         /*
2739                          * The task is still     2347                          * The task is still there. We don't try
2740                          * again, some other     2348                          * again, some other CPU will pull it when ready.
2741                          */                      2349                          */
2742                         goto out;                2350                         goto out;
2743                 }                                2351                 }
2744                                                  2352 
2745                 if (!task)                       2353                 if (!task)
2746                         /* No more tasks */      2354                         /* No more tasks */
2747                         goto out;                2355                         goto out;
2748                                                  2356 
2749                 put_task_struct(next_task);      2357                 put_task_struct(next_task);
2750                 next_task = task;                2358                 next_task = task;
2751                 goto retry;                      2359                 goto retry;
2752         }                                        2360         }
2753                                                  2361 
2754         deactivate_task(rq, next_task, 0);       2362         deactivate_task(rq, next_task, 0);
2755         set_task_cpu(next_task, later_rq->cpu    2363         set_task_cpu(next_task, later_rq->cpu);
2756         activate_task(later_rq, next_task, 0)    2364         activate_task(later_rq, next_task, 0);
2757         ret = 1;                                 2365         ret = 1;
2758                                                  2366 
2759         resched_curr(later_rq);                  2367         resched_curr(later_rq);
2760                                                  2368 
2761         double_unlock_balance(rq, later_rq);     2369         double_unlock_balance(rq, later_rq);
2762                                                  2370 
2763 out:                                             2371 out:
2764         put_task_struct(next_task);              2372         put_task_struct(next_task);
2765                                                  2373 
2766         return ret;                              2374         return ret;
2767 }                                                2375 }
2768                                                  2376 
2769 static void push_dl_tasks(struct rq *rq)         2377 static void push_dl_tasks(struct rq *rq)
2770 {                                                2378 {
2771         /* push_dl_task() will return true if    2379         /* push_dl_task() will return true if it moved a -deadline task */
2772         while (push_dl_task(rq))                 2380         while (push_dl_task(rq))
2773                 ;                                2381                 ;
2774 }                                                2382 }
2775                                                  2383 
2776 static void pull_dl_task(struct rq *this_rq)     2384 static void pull_dl_task(struct rq *this_rq)
2777 {                                                2385 {
2778         int this_cpu = this_rq->cpu, cpu;        2386         int this_cpu = this_rq->cpu, cpu;
2779         struct task_struct *p, *push_task;       2387         struct task_struct *p, *push_task;
2780         bool resched = false;                    2388         bool resched = false;
2781         struct rq *src_rq;                       2389         struct rq *src_rq;
2782         u64 dmin = LONG_MAX;                     2390         u64 dmin = LONG_MAX;
2783                                                  2391 
2784         if (likely(!dl_overloaded(this_rq)))     2392         if (likely(!dl_overloaded(this_rq)))
2785                 return;                          2393                 return;
2786                                                  2394 
2787         /*                                       2395         /*
2788          * Match the barrier from dl_set_over    2396          * Match the barrier from dl_set_overloaded; this guarantees that if we
2789          * see overloaded we must also see th    2397          * see overloaded we must also see the dlo_mask bit.
2790          */                                      2398          */
2791         smp_rmb();                               2399         smp_rmb();
2792                                                  2400 
2793         for_each_cpu(cpu, this_rq->rd->dlo_ma    2401         for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2794                 if (this_cpu == cpu)             2402                 if (this_cpu == cpu)
2795                         continue;                2403                         continue;
2796                                                  2404 
2797                 src_rq = cpu_rq(cpu);            2405                 src_rq = cpu_rq(cpu);
2798                                                  2406 
2799                 /*                               2407                 /*
2800                  * It looks racy, and it is!  !! 2408                  * It looks racy, abd it is! However, as in sched_rt.c,
2801                  * we are fine with this.        2409                  * we are fine with this.
2802                  */                              2410                  */
2803                 if (this_rq->dl.dl_nr_running    2411                 if (this_rq->dl.dl_nr_running &&
2804                     dl_time_before(this_rq->d    2412                     dl_time_before(this_rq->dl.earliest_dl.curr,
2805                                    src_rq->dl    2413                                    src_rq->dl.earliest_dl.next))
2806                         continue;                2414                         continue;
2807                                                  2415 
2808                 /* Might drop this_rq->lock *    2416                 /* Might drop this_rq->lock */
2809                 push_task = NULL;                2417                 push_task = NULL;
2810                 double_lock_balance(this_rq,     2418                 double_lock_balance(this_rq, src_rq);
2811                                                  2419 
2812                 /*                               2420                 /*
2813                  * If there are no more pulla    2421                  * If there are no more pullable tasks on the
2814                  * rq, we're done with it.       2422                  * rq, we're done with it.
2815                  */                              2423                  */
2816                 if (src_rq->dl.dl_nr_running     2424                 if (src_rq->dl.dl_nr_running <= 1)
2817                         goto skip;               2425                         goto skip;
2818                                                  2426 
2819                 p = pick_earliest_pushable_dl    2427                 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
2820                                                  2428 
2821                 /*                               2429                 /*
2822                  * We found a task to be pull    2430                  * We found a task to be pulled if:
2823                  *  - it preempts our current    2431                  *  - it preempts our current (if there's one),
2824                  *  - it will preempt the las    2432                  *  - it will preempt the last one we pulled (if any).
2825                  */                              2433                  */
2826                 if (p && dl_time_before(p->dl    2434                 if (p && dl_time_before(p->dl.deadline, dmin) &&
2827                     dl_task_is_earliest_deadl    2435                     dl_task_is_earliest_deadline(p, this_rq)) {
2828                         WARN_ON(p == src_rq->    2436                         WARN_ON(p == src_rq->curr);
2829                         WARN_ON(!task_on_rq_q    2437                         WARN_ON(!task_on_rq_queued(p));
2830                                                  2438 
2831                         /*                       2439                         /*
2832                          * Then we pull iff p    2440                          * Then we pull iff p has actually an earlier
2833                          * deadline than the     2441                          * deadline than the current task of its runqueue.
2834                          */                      2442                          */
2835                         if (dl_time_before(p-    2443                         if (dl_time_before(p->dl.deadline,
2836                                            sr    2444                                            src_rq->curr->dl.deadline))
2837                                 goto skip;       2445                                 goto skip;
2838                                                  2446 
2839                         if (is_migration_disa    2447                         if (is_migration_disabled(p)) {
2840                                 push_task = g    2448                                 push_task = get_push_task(src_rq);
2841                         } else {                 2449                         } else {
2842                                 deactivate_ta    2450                                 deactivate_task(src_rq, p, 0);
2843                                 set_task_cpu(    2451                                 set_task_cpu(p, this_cpu);
2844                                 activate_task    2452                                 activate_task(this_rq, p, 0);
2845                                 dmin = p->dl.    2453                                 dmin = p->dl.deadline;
2846                                 resched = tru    2454                                 resched = true;
2847                         }                        2455                         }
2848                                                  2456 
2849                         /* Is there any other    2457                         /* Is there any other task even earlier? */
2850                 }                                2458                 }
2851 skip:                                            2459 skip:
2852                 double_unlock_balance(this_rq    2460                 double_unlock_balance(this_rq, src_rq);
2853                                                  2461 
2854                 if (push_task) {                 2462                 if (push_task) {
2855                         preempt_disable();       2463                         preempt_disable();
2856                         raw_spin_rq_unlock(th    2464                         raw_spin_rq_unlock(this_rq);
2857                         stop_one_cpu_nowait(s    2465                         stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2858                                             p    2466                                             push_task, &src_rq->push_work);
2859                         preempt_enable();        2467                         preempt_enable();
2860                         raw_spin_rq_lock(this    2468                         raw_spin_rq_lock(this_rq);
2861                 }                                2469                 }
2862         }                                        2470         }
2863                                                  2471 
2864         if (resched)                             2472         if (resched)
2865                 resched_curr(this_rq);           2473                 resched_curr(this_rq);
2866 }                                                2474 }
2867                                                  2475 
2868 /*                                               2476 /*
2869  * Since the task is not running and a resche    2477  * Since the task is not running and a reschedule is not going to happen
2870  * anytime soon on its runqueue, we try pushi    2478  * anytime soon on its runqueue, we try pushing it away now.
2871  */                                              2479  */
2872 static void task_woken_dl(struct rq *rq, stru    2480 static void task_woken_dl(struct rq *rq, struct task_struct *p)
2873 {                                                2481 {
2874         if (!task_on_cpu(rq, p) &&               2482         if (!task_on_cpu(rq, p) &&
2875             !test_tsk_need_resched(rq->curr)     2483             !test_tsk_need_resched(rq->curr) &&
2876             p->nr_cpus_allowed > 1 &&            2484             p->nr_cpus_allowed > 1 &&
2877             dl_task(rq->curr) &&                 2485             dl_task(rq->curr) &&
2878             (rq->curr->nr_cpus_allowed < 2 ||    2486             (rq->curr->nr_cpus_allowed < 2 ||
2879              !dl_entity_preempt(&p->dl, &rq->    2487              !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
2880                 push_dl_tasks(rq);               2488                 push_dl_tasks(rq);
2881         }                                        2489         }
2882 }                                                2490 }
2883                                                  2491 
2884 static void set_cpus_allowed_dl(struct task_s    2492 static void set_cpus_allowed_dl(struct task_struct *p,
2885                                 struct affini !! 2493                                 const struct cpumask *new_mask,
                                                   >> 2494                                 u32 flags)
2886 {                                                2495 {
2887         struct root_domain *src_rd;              2496         struct root_domain *src_rd;
2888         struct rq *rq;                           2497         struct rq *rq;
2889                                                  2498 
2890         WARN_ON_ONCE(!dl_task(p));               2499         WARN_ON_ONCE(!dl_task(p));
2891                                                  2500 
2892         rq = task_rq(p);                         2501         rq = task_rq(p);
2893         src_rd = rq->rd;                         2502         src_rd = rq->rd;
2894         /*                                       2503         /*
2895          * Migrating a SCHED_DEADLINE task be    2504          * Migrating a SCHED_DEADLINE task between exclusive
2896          * cpusets (different root_domains) e    2505          * cpusets (different root_domains) entails a bandwidth
2897          * update. We already made space for     2506          * update. We already made space for us in the destination
2898          * domain (see cpuset_can_attach()).     2507          * domain (see cpuset_can_attach()).
2899          */                                      2508          */
2900         if (!cpumask_intersects(src_rd->span, !! 2509         if (!cpumask_intersects(src_rd->span, new_mask)) {
2901                 struct dl_bw *src_dl_b;          2510                 struct dl_bw *src_dl_b;
2902                                                  2511 
2903                 src_dl_b = dl_bw_of(cpu_of(rq    2512                 src_dl_b = dl_bw_of(cpu_of(rq));
2904                 /*                               2513                 /*
2905                  * We now free resources of t    2514                  * We now free resources of the root_domain we are migrating
2906                  * off. In the worst case, sc    2515                  * off. In the worst case, sched_setattr() may temporary fail
2907                  * until we complete the upda    2516                  * until we complete the update.
2908                  */                              2517                  */
2909                 raw_spin_lock(&src_dl_b->lock    2518                 raw_spin_lock(&src_dl_b->lock);
2910                 __dl_sub(src_dl_b, p->dl.dl_b    2519                 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
2911                 raw_spin_unlock(&src_dl_b->lo    2520                 raw_spin_unlock(&src_dl_b->lock);
2912         }                                        2521         }
2913                                                  2522 
2914         set_cpus_allowed_common(p, ctx);      !! 2523         set_cpus_allowed_common(p, new_mask, flags);
2915 }                                                2524 }
2916                                                  2525 
2917 /* Assumes rq->lock is held */                   2526 /* Assumes rq->lock is held */
2918 static void rq_online_dl(struct rq *rq)          2527 static void rq_online_dl(struct rq *rq)
2919 {                                                2528 {
2920         if (rq->dl.overloaded)                   2529         if (rq->dl.overloaded)
2921                 dl_set_overload(rq);             2530                 dl_set_overload(rq);
2922                                                  2531 
2923         cpudl_set_freecpu(&rq->rd->cpudl, rq-    2532         cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
2924         if (rq->dl.dl_nr_running > 0)            2533         if (rq->dl.dl_nr_running > 0)
2925                 cpudl_set(&rq->rd->cpudl, rq-    2534                 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
2926 }                                                2535 }
2927                                                  2536 
2928 /* Assumes rq->lock is held */                   2537 /* Assumes rq->lock is held */
2929 static void rq_offline_dl(struct rq *rq)         2538 static void rq_offline_dl(struct rq *rq)
2930 {                                                2539 {
2931         if (rq->dl.overloaded)                   2540         if (rq->dl.overloaded)
2932                 dl_clear_overload(rq);           2541                 dl_clear_overload(rq);
2933                                                  2542 
2934         cpudl_clear(&rq->rd->cpudl, rq->cpu);    2543         cpudl_clear(&rq->rd->cpudl, rq->cpu);
2935         cpudl_clear_freecpu(&rq->rd->cpudl, r    2544         cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
2936 }                                                2545 }
2937                                                  2546 
2938 void __init init_sched_dl_class(void)            2547 void __init init_sched_dl_class(void)
2939 {                                                2548 {
2940         unsigned int i;                          2549         unsigned int i;
2941                                                  2550 
2942         for_each_possible_cpu(i)                 2551         for_each_possible_cpu(i)
2943                 zalloc_cpumask_var_node(&per_    2552                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2944                                         GFP_K    2553                                         GFP_KERNEL, cpu_to_node(i));
2945 }                                                2554 }
2946                                                  2555 
2947 void dl_add_task_root_domain(struct task_stru    2556 void dl_add_task_root_domain(struct task_struct *p)
2948 {                                                2557 {
2949         struct rq_flags rf;                      2558         struct rq_flags rf;
2950         struct rq *rq;                           2559         struct rq *rq;
2951         struct dl_bw *dl_b;                      2560         struct dl_bw *dl_b;
2952                                                  2561 
2953         raw_spin_lock_irqsave(&p->pi_lock, rf    2562         raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2954         if (!dl_task(p)) {                       2563         if (!dl_task(p)) {
2955                 raw_spin_unlock_irqrestore(&p    2564                 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2956                 return;                          2565                 return;
2957         }                                        2566         }
2958                                                  2567 
2959         rq = __task_rq_lock(p, &rf);             2568         rq = __task_rq_lock(p, &rf);
2960                                                  2569 
2961         dl_b = &rq->rd->dl_bw;                   2570         dl_b = &rq->rd->dl_bw;
2962         raw_spin_lock(&dl_b->lock);              2571         raw_spin_lock(&dl_b->lock);
2963                                                  2572 
2964         __dl_add(dl_b, p->dl.dl_bw, cpumask_w    2573         __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2965                                                  2574 
2966         raw_spin_unlock(&dl_b->lock);            2575         raw_spin_unlock(&dl_b->lock);
2967                                                  2576 
2968         task_rq_unlock(rq, p, &rf);              2577         task_rq_unlock(rq, p, &rf);
2969 }                                                2578 }
2970                                                  2579 
2971 void dl_clear_root_domain(struct root_domain     2580 void dl_clear_root_domain(struct root_domain *rd)
2972 {                                                2581 {
2973         unsigned long flags;                     2582         unsigned long flags;
2974                                                  2583 
2975         raw_spin_lock_irqsave(&rd->dl_bw.lock    2584         raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2976         rd->dl_bw.total_bw = 0;                  2585         rd->dl_bw.total_bw = 0;
2977         raw_spin_unlock_irqrestore(&rd->dl_bw    2586         raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2978 }                                                2587 }
2979                                                  2588 
2980 #endif /* CONFIG_SMP */                          2589 #endif /* CONFIG_SMP */
2981                                                  2590 
2982 static void switched_from_dl(struct rq *rq, s    2591 static void switched_from_dl(struct rq *rq, struct task_struct *p)
2983 {                                                2592 {
2984         /*                                       2593         /*
2985          * task_non_contending() can start th    2594          * task_non_contending() can start the "inactive timer" (if the 0-lag
2986          * time is in the future). If the tas    2595          * time is in the future). If the task switches back to dl before
2987          * the "inactive timer" fires, it can    2596          * the "inactive timer" fires, it can continue to consume its current
2988          * runtime using its current deadline    2597          * runtime using its current deadline. If it stays outside of
2989          * SCHED_DEADLINE until the 0-lag tim    2598          * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2990          * will reset the task parameters.       2599          * will reset the task parameters.
2991          */                                      2600          */
2992         if (task_on_rq_queued(p) && p->dl.dl_    2601         if (task_on_rq_queued(p) && p->dl.dl_runtime)
2993                 task_non_contending(&p->dl);  !! 2602                 task_non_contending(p);
2994                                                  2603 
2995         /*                                       2604         /*
2996          * In case a task is setscheduled out    2605          * In case a task is setscheduled out from SCHED_DEADLINE we need to
2997          * keep track of that on its cpuset (    2606          * keep track of that on its cpuset (for correct bandwidth tracking).
2998          */                                      2607          */
2999         dec_dl_tasks_cs(p);                      2608         dec_dl_tasks_cs(p);
3000                                                  2609 
3001         if (!task_on_rq_queued(p)) {             2610         if (!task_on_rq_queued(p)) {
3002                 /*                               2611                 /*
3003                  * Inactive timer is armed. H    2612                  * Inactive timer is armed. However, p is leaving DEADLINE and
3004                  * might migrate away from th    2613                  * might migrate away from this rq while continuing to run on
3005                  * some other class. We need     2614                  * some other class. We need to remove its contribution from
3006                  * this rq running_bw now, or    2615                  * this rq running_bw now, or sub_rq_bw (below) will complain.
3007                  */                              2616                  */
3008                 if (p->dl.dl_non_contending)     2617                 if (p->dl.dl_non_contending)
3009                         sub_running_bw(&p->dl    2618                         sub_running_bw(&p->dl, &rq->dl);
3010                 sub_rq_bw(&p->dl, &rq->dl);      2619                 sub_rq_bw(&p->dl, &rq->dl);
3011         }                                        2620         }
3012                                                  2621 
3013         /*                                       2622         /*
3014          * We cannot use inactive_task_timer(    2623          * We cannot use inactive_task_timer() to invoke sub_running_bw()
3015          * at the 0-lag time, because the tas    2624          * at the 0-lag time, because the task could have been migrated
3016          * while SCHED_OTHER in the meanwhile    2625          * while SCHED_OTHER in the meanwhile.
3017          */                                      2626          */
3018         if (p->dl.dl_non_contending)             2627         if (p->dl.dl_non_contending)
3019                 p->dl.dl_non_contending = 0;     2628                 p->dl.dl_non_contending = 0;
3020                                                  2629 
3021         /*                                       2630         /*
3022          * Since this might be the only -dead    2631          * Since this might be the only -deadline task on the rq,
3023          * this is the right place to try to     2632          * this is the right place to try to pull some other one
3024          * from an overloaded CPU, if any.       2633          * from an overloaded CPU, if any.
3025          */                                      2634          */
3026         if (!task_on_rq_queued(p) || rq->dl.d    2635         if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
3027                 return;                          2636                 return;
3028                                                  2637 
3029         deadline_queue_pull_task(rq);            2638         deadline_queue_pull_task(rq);
3030 }                                                2639 }
3031                                                  2640 
3032 /*                                               2641 /*
3033  * When switching to -deadline, we may overlo    2642  * When switching to -deadline, we may overload the rq, then
3034  * we try to push someone off, if possible.      2643  * we try to push someone off, if possible.
3035  */                                              2644  */
3036 static void switched_to_dl(struct rq *rq, str    2645 static void switched_to_dl(struct rq *rq, struct task_struct *p)
3037 {                                                2646 {
3038         if (hrtimer_try_to_cancel(&p->dl.inac    2647         if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
3039                 put_task_struct(p);              2648                 put_task_struct(p);
3040                                                  2649 
3041         /*                                       2650         /*
3042          * In case a task is setscheduled to     2651          * In case a task is setscheduled to SCHED_DEADLINE we need to keep
3043          * track of that on its cpuset (for c    2652          * track of that on its cpuset (for correct bandwidth tracking).
3044          */                                      2653          */
3045         inc_dl_tasks_cs(p);                      2654         inc_dl_tasks_cs(p);
3046                                                  2655 
3047         /* If p is not queued we will update     2656         /* If p is not queued we will update its parameters at next wakeup. */
3048         if (!task_on_rq_queued(p)) {             2657         if (!task_on_rq_queued(p)) {
3049                 add_rq_bw(&p->dl, &rq->dl);      2658                 add_rq_bw(&p->dl, &rq->dl);
3050                                                  2659 
3051                 return;                          2660                 return;
3052         }                                        2661         }
3053                                                  2662 
3054         if (rq->curr != p) {                     2663         if (rq->curr != p) {
3055 #ifdef CONFIG_SMP                                2664 #ifdef CONFIG_SMP
3056                 if (p->nr_cpus_allowed > 1 &&    2665                 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
3057                         deadline_queue_push_t    2666                         deadline_queue_push_tasks(rq);
3058 #endif                                           2667 #endif
3059                 if (dl_task(rq->curr))           2668                 if (dl_task(rq->curr))
3060                         wakeup_preempt_dl(rq, !! 2669                         check_preempt_curr_dl(rq, p, 0);
3061                 else                             2670                 else
3062                         resched_curr(rq);        2671                         resched_curr(rq);
3063         } else {                                 2672         } else {
3064                 update_dl_rq_load_avg(rq_cloc    2673                 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
3065         }                                        2674         }
3066 }                                                2675 }
3067                                                  2676 
3068 /*                                               2677 /*
3069  * If the scheduling parameters of a -deadlin    2678  * If the scheduling parameters of a -deadline task changed,
3070  * a push or pull operation might be needed.     2679  * a push or pull operation might be needed.
3071  */                                              2680  */
3072 static void prio_changed_dl(struct rq *rq, st    2681 static void prio_changed_dl(struct rq *rq, struct task_struct *p,
3073                             int oldprio)         2682                             int oldprio)
3074 {                                                2683 {
3075         if (!task_on_rq_queued(p))            !! 2684         if (task_on_rq_queued(p) || task_current(rq, p)) {
3076                 return;                       << 
3077                                               << 
3078 #ifdef CONFIG_SMP                                2685 #ifdef CONFIG_SMP
3079         /*                                    !! 2686                 /*
3080          * This might be too much, but unfort !! 2687                  * This might be too much, but unfortunately
3081          * we don't have the old deadline val !! 2688                  * we don't have the old deadline value, and
3082          * we can't argue if the task is incr !! 2689                  * we can't argue if the task is increasing
3083          * or lowering its prio, so...        !! 2690                  * or lowering its prio, so...
3084          */                                   !! 2691                  */
3085         if (!rq->dl.overloaded)               !! 2692                 if (!rq->dl.overloaded)
3086                 deadline_queue_pull_task(rq); !! 2693                         deadline_queue_pull_task(rq);
3087                                                  2694 
3088         if (task_current(rq, p)) {            << 
3089                 /*                               2695                 /*
3090                  * If we now have a earlier d    2696                  * If we now have a earlier deadline task than p,
3091                  * then reschedule, provided     2697                  * then reschedule, provided p is still on this
3092                  * runqueue.                     2698                  * runqueue.
3093                  */                              2699                  */
3094                 if (dl_time_before(rq->dl.ear    2700                 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
3095                         resched_curr(rq);        2701                         resched_curr(rq);
3096         } else {                              !! 2702 #else
3097                 /*                               2703                 /*
3098                  * Current may not be deadlin !! 2704                  * Again, we don't know if p has a earlier
3099                  * have just replenished it ( !! 2705                  * or later deadline, so let's blindly set a
3100                  *                            !! 2706                  * (maybe not needed) rescheduling point.
3101                  * Otherwise, if p was given  << 
3102                  */                              2707                  */
3103                 if (!dl_task(rq->curr) ||     !! 2708                 resched_curr(rq);
3104                     dl_time_before(p->dl.dead !! 2709 #endif /* CONFIG_SMP */
3105                         resched_curr(rq);     << 
3106         }                                        2710         }
3107 #else                                         << 
3108         /*                                    << 
3109          * We don't know if p has a earlier o << 
3110          * set a (maybe not needed) reschedul << 
3111          */                                   << 
3112         resched_curr(rq);                     << 
3113 #endif                                        << 
3114 }                                             << 
3115                                               << 
3116 #ifdef CONFIG_SCHED_CORE                      << 
3117 static int task_is_throttled_dl(struct task_s << 
3118 {                                             << 
3119         return p->dl.dl_throttled;            << 
3120 }                                                2711 }
3121 #endif                                        << 
3122                                                  2712 
3123 DEFINE_SCHED_CLASS(dl) = {                       2713 DEFINE_SCHED_CLASS(dl) = {
3124                                                  2714 
3125         .enqueue_task           = enqueue_tas    2715         .enqueue_task           = enqueue_task_dl,
3126         .dequeue_task           = dequeue_tas    2716         .dequeue_task           = dequeue_task_dl,
3127         .yield_task             = yield_task_    2717         .yield_task             = yield_task_dl,
3128                                                  2718 
3129         .wakeup_preempt         = wakeup_pree !! 2719         .check_preempt_curr     = check_preempt_curr_dl,
3130                                                  2720 
3131         .pick_task              = pick_task_d !! 2721         .pick_next_task         = pick_next_task_dl,
3132         .put_prev_task          = put_prev_ta    2722         .put_prev_task          = put_prev_task_dl,
3133         .set_next_task          = set_next_ta    2723         .set_next_task          = set_next_task_dl,
3134                                                  2724 
3135 #ifdef CONFIG_SMP                                2725 #ifdef CONFIG_SMP
3136         .balance                = balance_dl,    2726         .balance                = balance_dl,
                                                   >> 2727         .pick_task              = pick_task_dl,
3137         .select_task_rq         = select_task    2728         .select_task_rq         = select_task_rq_dl,
3138         .migrate_task_rq        = migrate_tas    2729         .migrate_task_rq        = migrate_task_rq_dl,
3139         .set_cpus_allowed       = set_cpus_al    2730         .set_cpus_allowed       = set_cpus_allowed_dl,
3140         .rq_online              = rq_online_d    2731         .rq_online              = rq_online_dl,
3141         .rq_offline             = rq_offline_    2732         .rq_offline             = rq_offline_dl,
3142         .task_woken             = task_woken_    2733         .task_woken             = task_woken_dl,
3143         .find_lock_rq           = find_lock_l    2734         .find_lock_rq           = find_lock_later_rq,
3144 #endif                                           2735 #endif
3145                                                  2736 
3146         .task_tick              = task_tick_d    2737         .task_tick              = task_tick_dl,
3147         .task_fork              = task_fork_d    2738         .task_fork              = task_fork_dl,
3148                                                  2739 
3149         .prio_changed           = prio_change    2740         .prio_changed           = prio_changed_dl,
3150         .switched_from          = switched_fr    2741         .switched_from          = switched_from_dl,
3151         .switched_to            = switched_to    2742         .switched_to            = switched_to_dl,
3152                                                  2743 
3153         .update_curr            = update_curr    2744         .update_curr            = update_curr_dl,
3154 #ifdef CONFIG_SCHED_CORE                      << 
3155         .task_is_throttled      = task_is_thr << 
3156 #endif                                        << 
3157 };                                               2745 };
3158                                                  2746 
3159 /* Used for dl_bw check and update, used unde    2747 /* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
3160 static u64 dl_generation;                        2748 static u64 dl_generation;
3161                                                  2749 
3162 int sched_dl_global_validate(void)               2750 int sched_dl_global_validate(void)
3163 {                                                2751 {
3164         u64 runtime = global_rt_runtime();       2752         u64 runtime = global_rt_runtime();
3165         u64 period = global_rt_period();         2753         u64 period = global_rt_period();
3166         u64 new_bw = to_ratio(period, runtime    2754         u64 new_bw = to_ratio(period, runtime);
3167         u64 gen = ++dl_generation;               2755         u64 gen = ++dl_generation;
3168         struct dl_bw *dl_b;                      2756         struct dl_bw *dl_b;
3169         int cpu, cpus, ret = 0;                  2757         int cpu, cpus, ret = 0;
3170         unsigned long flags;                     2758         unsigned long flags;
3171                                                  2759 
3172         /*                                       2760         /*
3173          * Here we want to check the bandwidt    2761          * Here we want to check the bandwidth not being set to some
3174          * value smaller than the currently a    2762          * value smaller than the currently allocated bandwidth in
3175          * any of the root_domains.              2763          * any of the root_domains.
3176          */                                      2764          */
3177         for_each_possible_cpu(cpu) {             2765         for_each_possible_cpu(cpu) {
3178                 rcu_read_lock_sched();           2766                 rcu_read_lock_sched();
3179                                                  2767 
3180                 if (dl_bw_visited(cpu, gen))     2768                 if (dl_bw_visited(cpu, gen))
3181                         goto next;               2769                         goto next;
3182                                                  2770 
3183                 dl_b = dl_bw_of(cpu);            2771                 dl_b = dl_bw_of(cpu);
3184                 cpus = dl_bw_cpus(cpu);          2772                 cpus = dl_bw_cpus(cpu);
3185                                                  2773 
3186                 raw_spin_lock_irqsave(&dl_b->    2774                 raw_spin_lock_irqsave(&dl_b->lock, flags);
3187                 if (new_bw * cpus < dl_b->tot    2775                 if (new_bw * cpus < dl_b->total_bw)
3188                         ret = -EBUSY;            2776                         ret = -EBUSY;
3189                 raw_spin_unlock_irqrestore(&d    2777                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3190                                                  2778 
3191 next:                                            2779 next:
3192                 rcu_read_unlock_sched();         2780                 rcu_read_unlock_sched();
3193                                                  2781 
3194                 if (ret)                         2782                 if (ret)
3195                         break;                   2783                         break;
3196         }                                        2784         }
3197                                                  2785 
3198         return ret;                              2786         return ret;
3199 }                                                2787 }
3200                                                  2788 
3201 static void init_dl_rq_bw_ratio(struct dl_rq     2789 static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
3202 {                                                2790 {
3203         if (global_rt_runtime() == RUNTIME_IN    2791         if (global_rt_runtime() == RUNTIME_INF) {
3204                 dl_rq->bw_ratio = 1 << RATIO_    2792                 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
3205                 dl_rq->max_bw = dl_rq->extra_ !! 2793                 dl_rq->extra_bw = 1 << BW_SHIFT;
3206         } else {                                 2794         } else {
3207                 dl_rq->bw_ratio = to_ratio(gl    2795                 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
3208                           global_rt_period())    2796                           global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
3209                 dl_rq->max_bw = dl_rq->extra_ !! 2797                 dl_rq->extra_bw = to_ratio(global_rt_period(),
3210                         to_ratio(global_rt_pe !! 2798                                                     global_rt_runtime());
3211         }                                        2799         }
3212 }                                                2800 }
3213                                                  2801 
3214 void sched_dl_do_global(void)                    2802 void sched_dl_do_global(void)
3215 {                                                2803 {
3216         u64 new_bw = -1;                         2804         u64 new_bw = -1;
3217         u64 gen = ++dl_generation;               2805         u64 gen = ++dl_generation;
3218         struct dl_bw *dl_b;                      2806         struct dl_bw *dl_b;
3219         int cpu;                                 2807         int cpu;
3220         unsigned long flags;                     2808         unsigned long flags;
3221                                                  2809 
3222         if (global_rt_runtime() != RUNTIME_IN    2810         if (global_rt_runtime() != RUNTIME_INF)
3223                 new_bw = to_ratio(global_rt_p    2811                 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
3224                                                  2812 
3225         for_each_possible_cpu(cpu) {             2813         for_each_possible_cpu(cpu) {
3226                 rcu_read_lock_sched();           2814                 rcu_read_lock_sched();
3227                                                  2815 
3228                 if (dl_bw_visited(cpu, gen))     2816                 if (dl_bw_visited(cpu, gen)) {
3229                         rcu_read_unlock_sched    2817                         rcu_read_unlock_sched();
3230                         continue;                2818                         continue;
3231                 }                                2819                 }
3232                                                  2820 
3233                 dl_b = dl_bw_of(cpu);            2821                 dl_b = dl_bw_of(cpu);
3234                                                  2822 
3235                 raw_spin_lock_irqsave(&dl_b->    2823                 raw_spin_lock_irqsave(&dl_b->lock, flags);
3236                 dl_b->bw = new_bw;               2824                 dl_b->bw = new_bw;
3237                 raw_spin_unlock_irqrestore(&d    2825                 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3238                                                  2826 
3239                 rcu_read_unlock_sched();         2827                 rcu_read_unlock_sched();
3240                 init_dl_rq_bw_ratio(&cpu_rq(c    2828                 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
3241         }                                        2829         }
3242 }                                                2830 }
3243                                                  2831 
3244 /*                                               2832 /*
3245  * We must be sure that accepting a new task     2833  * We must be sure that accepting a new task (or allowing changing the
3246  * parameters of an existing one) is consiste    2834  * parameters of an existing one) is consistent with the bandwidth
3247  * constraints. If yes, this function also ac    2835  * constraints. If yes, this function also accordingly updates the currently
3248  * allocated bandwidth to reflect the new sit    2836  * allocated bandwidth to reflect the new situation.
3249  *                                               2837  *
3250  * This function is called while holding p's     2838  * This function is called while holding p's rq->lock.
3251  */                                              2839  */
3252 int sched_dl_overflow(struct task_struct *p,     2840 int sched_dl_overflow(struct task_struct *p, int policy,
3253                       const struct sched_attr    2841                       const struct sched_attr *attr)
3254 {                                                2842 {
3255         u64 period = attr->sched_period ?: at    2843         u64 period = attr->sched_period ?: attr->sched_deadline;
3256         u64 runtime = attr->sched_runtime;       2844         u64 runtime = attr->sched_runtime;
3257         u64 new_bw = dl_policy(policy) ? to_r    2845         u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
3258         int cpus, err = -1, cpu = task_cpu(p)    2846         int cpus, err = -1, cpu = task_cpu(p);
3259         struct dl_bw *dl_b = dl_bw_of(cpu);      2847         struct dl_bw *dl_b = dl_bw_of(cpu);
3260         unsigned long cap;                       2848         unsigned long cap;
3261                                                  2849 
3262         if (attr->sched_flags & SCHED_FLAG_SU    2850         if (attr->sched_flags & SCHED_FLAG_SUGOV)
3263                 return 0;                        2851                 return 0;
3264                                                  2852 
3265         /* !deadline task may carry old deadl    2853         /* !deadline task may carry old deadline bandwidth */
3266         if (new_bw == p->dl.dl_bw && task_has    2854         if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
3267                 return 0;                        2855                 return 0;
3268                                                  2856 
3269         /*                                       2857         /*
3270          * Either if a task, enters, leave, o    2858          * Either if a task, enters, leave, or stays -deadline but changes
3271          * its parameters, we may need to upd    2859          * its parameters, we may need to update accordingly the total
3272          * allocated bandwidth of the contain    2860          * allocated bandwidth of the container.
3273          */                                      2861          */
3274         raw_spin_lock(&dl_b->lock);              2862         raw_spin_lock(&dl_b->lock);
3275         cpus = dl_bw_cpus(cpu);                  2863         cpus = dl_bw_cpus(cpu);
3276         cap = dl_bw_capacity(cpu);               2864         cap = dl_bw_capacity(cpu);
3277                                                  2865 
3278         if (dl_policy(policy) && !task_has_dl    2866         if (dl_policy(policy) && !task_has_dl_policy(p) &&
3279             !__dl_overflow(dl_b, cap, 0, new_    2867             !__dl_overflow(dl_b, cap, 0, new_bw)) {
3280                 if (hrtimer_active(&p->dl.ina    2868                 if (hrtimer_active(&p->dl.inactive_timer))
3281                         __dl_sub(dl_b, p->dl.    2869                         __dl_sub(dl_b, p->dl.dl_bw, cpus);
3282                 __dl_add(dl_b, new_bw, cpus);    2870                 __dl_add(dl_b, new_bw, cpus);
3283                 err = 0;                         2871                 err = 0;
3284         } else if (dl_policy(policy) && task_    2872         } else if (dl_policy(policy) && task_has_dl_policy(p) &&
3285                    !__dl_overflow(dl_b, cap,     2873                    !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
3286                 /*                               2874                 /*
3287                  * XXX this is slightly incor    2875                  * XXX this is slightly incorrect: when the task
3288                  * utilization decreases, we     2876                  * utilization decreases, we should delay the total
3289                  * utilization change until t    2877                  * utilization change until the task's 0-lag point.
3290                  * But this would require to     2878                  * But this would require to set the task's "inactive
3291                  * timer" when the task is no    2879                  * timer" when the task is not inactive.
3292                  */                              2880                  */
3293                 __dl_sub(dl_b, p->dl.dl_bw, c    2881                 __dl_sub(dl_b, p->dl.dl_bw, cpus);
3294                 __dl_add(dl_b, new_bw, cpus);    2882                 __dl_add(dl_b, new_bw, cpus);
3295                 dl_change_utilization(p, new_    2883                 dl_change_utilization(p, new_bw);
3296                 err = 0;                         2884                 err = 0;
3297         } else if (!dl_policy(policy) && task    2885         } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
3298                 /*                               2886                 /*
3299                  * Do not decrease the total     2887                  * Do not decrease the total deadline utilization here,
3300                  * switched_from_dl() will ta    2888                  * switched_from_dl() will take care to do it at the correct
3301                  * (0-lag) time.                 2889                  * (0-lag) time.
3302                  */                              2890                  */
3303                 err = 0;                         2891                 err = 0;
3304         }                                        2892         }
3305         raw_spin_unlock(&dl_b->lock);            2893         raw_spin_unlock(&dl_b->lock);
3306                                                  2894 
3307         return err;                              2895         return err;
3308 }                                                2896 }
3309                                                  2897 
3310 /*                                               2898 /*
3311  * This function initializes the sched_dl_ent    2899  * This function initializes the sched_dl_entity of a newly becoming
3312  * SCHED_DEADLINE task.                          2900  * SCHED_DEADLINE task.
3313  *                                               2901  *
3314  * Only the static values are considered here    2902  * Only the static values are considered here, the actual runtime and the
3315  * absolute deadline will be properly calcula    2903  * absolute deadline will be properly calculated when the task is enqueued
3316  * for the first time with its new policy.       2904  * for the first time with its new policy.
3317  */                                              2905  */
3318 void __setparam_dl(struct task_struct *p, con    2906 void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3319 {                                                2907 {
3320         struct sched_dl_entity *dl_se = &p->d    2908         struct sched_dl_entity *dl_se = &p->dl;
3321                                                  2909 
3322         dl_se->dl_runtime = attr->sched_runti    2910         dl_se->dl_runtime = attr->sched_runtime;
3323         dl_se->dl_deadline = attr->sched_dead    2911         dl_se->dl_deadline = attr->sched_deadline;
3324         dl_se->dl_period = attr->sched_period    2912         dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3325         dl_se->flags = attr->sched_flags & SC    2913         dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
3326         dl_se->dl_bw = to_ratio(dl_se->dl_per    2914         dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3327         dl_se->dl_density = to_ratio(dl_se->d    2915         dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3328 }                                                2916 }
3329                                                  2917 
3330 void __getparam_dl(struct task_struct *p, str    2918 void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3331 {                                                2919 {
3332         struct sched_dl_entity *dl_se = &p->d    2920         struct sched_dl_entity *dl_se = &p->dl;
3333                                                  2921 
3334         attr->sched_priority = p->rt_priority    2922         attr->sched_priority = p->rt_priority;
3335         attr->sched_runtime = dl_se->dl_runti    2923         attr->sched_runtime = dl_se->dl_runtime;
3336         attr->sched_deadline = dl_se->dl_dead    2924         attr->sched_deadline = dl_se->dl_deadline;
3337         attr->sched_period = dl_se->dl_period    2925         attr->sched_period = dl_se->dl_period;
3338         attr->sched_flags &= ~SCHED_DL_FLAGS;    2926         attr->sched_flags &= ~SCHED_DL_FLAGS;
3339         attr->sched_flags |= dl_se->flags;       2927         attr->sched_flags |= dl_se->flags;
3340 }                                                2928 }
3341                                                  2929 
3342 /*                                               2930 /*
3343  * This function validates the new parameters    2931  * This function validates the new parameters of a -deadline task.
3344  * We ask for the deadline not being zero, an    2932  * We ask for the deadline not being zero, and greater or equal
3345  * than the runtime, as well as the period of    2933  * than the runtime, as well as the period of being zero or
3346  * greater than deadline. Furthermore, we hav    2934  * greater than deadline. Furthermore, we have to be sure that
3347  * user parameters are above the internal res    2935  * user parameters are above the internal resolution of 1us (we
3348  * check sched_runtime only since it is alway    2936  * check sched_runtime only since it is always the smaller one) and
3349  * below 2^63 ns (we have to check both sched    2937  * below 2^63 ns (we have to check both sched_deadline and
3350  * sched_period, as the latter can be zero).     2938  * sched_period, as the latter can be zero).
3351  */                                              2939  */
3352 bool __checkparam_dl(const struct sched_attr     2940 bool __checkparam_dl(const struct sched_attr *attr)
3353 {                                                2941 {
3354         u64 period, max, min;                    2942         u64 period, max, min;
3355                                                  2943 
3356         /* special dl tasks don't actually us    2944         /* special dl tasks don't actually use any parameter */
3357         if (attr->sched_flags & SCHED_FLAG_SU    2945         if (attr->sched_flags & SCHED_FLAG_SUGOV)
3358                 return true;                     2946                 return true;
3359                                                  2947 
3360         /* deadline != 0 */                      2948         /* deadline != 0 */
3361         if (attr->sched_deadline == 0)           2949         if (attr->sched_deadline == 0)
3362                 return false;                    2950                 return false;
3363                                                  2951 
3364         /*                                       2952         /*
3365          * Since we truncate DL_SCALE bits, m    2953          * Since we truncate DL_SCALE bits, make sure we're at least
3366          * that big.                             2954          * that big.
3367          */                                      2955          */
3368         if (attr->sched_runtime < (1ULL << DL    2956         if (attr->sched_runtime < (1ULL << DL_SCALE))
3369                 return false;                    2957                 return false;
3370                                                  2958 
3371         /*                                       2959         /*
3372          * Since we use the MSB for wrap-arou    2960          * Since we use the MSB for wrap-around and sign issues, make
3373          * sure it's not set (mind that perio    2961          * sure it's not set (mind that period can be equal to zero).
3374          */                                      2962          */
3375         if (attr->sched_deadline & (1ULL << 6    2963         if (attr->sched_deadline & (1ULL << 63) ||
3376             attr->sched_period & (1ULL << 63)    2964             attr->sched_period & (1ULL << 63))
3377                 return false;                    2965                 return false;
3378                                                  2966 
3379         period = attr->sched_period;             2967         period = attr->sched_period;
3380         if (!period)                             2968         if (!period)
3381                 period = attr->sched_deadline    2969                 period = attr->sched_deadline;
3382                                                  2970 
3383         /* runtime <= deadline <= period (if     2971         /* runtime <= deadline <= period (if period != 0) */
3384         if (period < attr->sched_deadline ||     2972         if (period < attr->sched_deadline ||
3385             attr->sched_deadline < attr->sche    2973             attr->sched_deadline < attr->sched_runtime)
3386                 return false;                    2974                 return false;
3387                                                  2975 
3388         max = (u64)READ_ONCE(sysctl_sched_dl_    2976         max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3389         min = (u64)READ_ONCE(sysctl_sched_dl_    2977         min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3390                                                  2978 
3391         if (period < min || period > max)        2979         if (period < min || period > max)
3392                 return false;                    2980                 return false;
3393                                                  2981 
3394         return true;                             2982         return true;
3395 }                                                2983 }
3396                                                  2984 
3397 /*                                               2985 /*
3398  * This function clears the sched_dl_entity s    2986  * This function clears the sched_dl_entity static params.
3399  */                                              2987  */
3400 static void __dl_clear_params(struct sched_dl !! 2988 void __dl_clear_params(struct task_struct *p)
3401 {                                                2989 {
                                                   >> 2990         struct sched_dl_entity *dl_se = &p->dl;
                                                   >> 2991 
3402         dl_se->dl_runtime               = 0;     2992         dl_se->dl_runtime               = 0;
3403         dl_se->dl_deadline              = 0;     2993         dl_se->dl_deadline              = 0;
3404         dl_se->dl_period                = 0;     2994         dl_se->dl_period                = 0;
3405         dl_se->flags                    = 0;     2995         dl_se->flags                    = 0;
3406         dl_se->dl_bw                    = 0;     2996         dl_se->dl_bw                    = 0;
3407         dl_se->dl_density               = 0;     2997         dl_se->dl_density               = 0;
3408                                                  2998 
3409         dl_se->dl_throttled             = 0;     2999         dl_se->dl_throttled             = 0;
3410         dl_se->dl_yielded               = 0;     3000         dl_se->dl_yielded               = 0;
3411         dl_se->dl_non_contending        = 0;     3001         dl_se->dl_non_contending        = 0;
3412         dl_se->dl_overrun               = 0;     3002         dl_se->dl_overrun               = 0;
3413         dl_se->dl_server                = 0;  << 
3414                                                  3003 
3415 #ifdef CONFIG_RT_MUTEXES                         3004 #ifdef CONFIG_RT_MUTEXES
3416         dl_se->pi_se                    = dl_    3005         dl_se->pi_se                    = dl_se;
3417 #endif                                           3006 #endif
3418 }                                             << 
3419                                               << 
3420 void init_dl_entity(struct sched_dl_entity *d << 
3421 {                                             << 
3422         RB_CLEAR_NODE(&dl_se->rb_node);       << 
3423         init_dl_task_timer(dl_se);            << 
3424         init_dl_inactive_task_timer(dl_se);   << 
3425         __dl_clear_params(dl_se);             << 
3426 }                                                3007 }
3427                                                  3008 
3428 bool dl_param_changed(struct task_struct *p,     3009 bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3429 {                                                3010 {
3430         struct sched_dl_entity *dl_se = &p->d    3011         struct sched_dl_entity *dl_se = &p->dl;
3431                                                  3012 
3432         if (dl_se->dl_runtime != attr->sched_    3013         if (dl_se->dl_runtime != attr->sched_runtime ||
3433             dl_se->dl_deadline != attr->sched    3014             dl_se->dl_deadline != attr->sched_deadline ||
3434             dl_se->dl_period != attr->sched_p    3015             dl_se->dl_period != attr->sched_period ||
3435             dl_se->flags != (attr->sched_flag    3016             dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
3436                 return true;                     3017                 return true;
3437                                                  3018 
3438         return false;                            3019         return false;
3439 }                                                3020 }
3440                                                  3021 
3441 #ifdef CONFIG_SMP                                3022 #ifdef CONFIG_SMP
3442 int dl_cpuset_cpumask_can_shrink(const struct    3023 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3443                                  const struct    3024                                  const struct cpumask *trial)
3444 {                                                3025 {
3445         unsigned long flags, cap;                3026         unsigned long flags, cap;
3446         struct dl_bw *cur_dl_b;                  3027         struct dl_bw *cur_dl_b;
3447         int ret = 1;                             3028         int ret = 1;
3448                                                  3029 
3449         rcu_read_lock_sched();                   3030         rcu_read_lock_sched();
3450         cur_dl_b = dl_bw_of(cpumask_any(cur))    3031         cur_dl_b = dl_bw_of(cpumask_any(cur));
3451         cap = __dl_bw_capacity(trial);           3032         cap = __dl_bw_capacity(trial);
3452         raw_spin_lock_irqsave(&cur_dl_b->lock    3033         raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
3453         if (__dl_overflow(cur_dl_b, cap, 0, 0    3034         if (__dl_overflow(cur_dl_b, cap, 0, 0))
3454                 ret = 0;                         3035                 ret = 0;
3455         raw_spin_unlock_irqrestore(&cur_dl_b-    3036         raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3456         rcu_read_unlock_sched();                 3037         rcu_read_unlock_sched();
3457                                                  3038 
3458         return ret;                              3039         return ret;
3459 }                                                3040 }
3460                                                  3041 
3461 enum dl_bw_request {                             3042 enum dl_bw_request {
3462         dl_bw_req_check_overflow = 0,            3043         dl_bw_req_check_overflow = 0,
3463         dl_bw_req_alloc,                         3044         dl_bw_req_alloc,
3464         dl_bw_req_free                           3045         dl_bw_req_free
3465 };                                               3046 };
3466                                                  3047 
3467 static int dl_bw_manage(enum dl_bw_request re    3048 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
3468 {                                                3049 {
3469         unsigned long flags;                     3050         unsigned long flags;
3470         struct dl_bw *dl_b;                      3051         struct dl_bw *dl_b;
3471         bool overflow = 0;                       3052         bool overflow = 0;
3472                                                  3053 
3473         rcu_read_lock_sched();                   3054         rcu_read_lock_sched();
3474         dl_b = dl_bw_of(cpu);                    3055         dl_b = dl_bw_of(cpu);
3475         raw_spin_lock_irqsave(&dl_b->lock, fl    3056         raw_spin_lock_irqsave(&dl_b->lock, flags);
3476                                                  3057 
3477         if (req == dl_bw_req_free) {             3058         if (req == dl_bw_req_free) {
3478                 __dl_sub(dl_b, dl_bw, dl_bw_c    3059                 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3479         } else {                                 3060         } else {
3480                 unsigned long cap = dl_bw_cap    3061                 unsigned long cap = dl_bw_capacity(cpu);
3481                                                  3062 
3482                 overflow = __dl_overflow(dl_b    3063                 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3483                                                  3064 
3484                 if (req == dl_bw_req_alloc &&    3065                 if (req == dl_bw_req_alloc && !overflow) {
3485                         /*                       3066                         /*
3486                          * We reserve space i    3067                          * We reserve space in the destination
3487                          * root_domain, as we    3068                          * root_domain, as we can't fail after this point.
3488                          * We will free resou    3069                          * We will free resources in the source root_domain
3489                          * later on (see set_    3070                          * later on (see set_cpus_allowed_dl()).
3490                          */                      3071                          */
3491                         __dl_add(dl_b, dl_bw,    3072                         __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3492                 }                                3073                 }
3493         }                                        3074         }
3494                                                  3075 
3495         raw_spin_unlock_irqrestore(&dl_b->loc    3076         raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3496         rcu_read_unlock_sched();                 3077         rcu_read_unlock_sched();
3497                                                  3078 
3498         return overflow ? -EBUSY : 0;            3079         return overflow ? -EBUSY : 0;
3499 }                                                3080 }
3500                                                  3081 
3501 int dl_bw_check_overflow(int cpu)                3082 int dl_bw_check_overflow(int cpu)
3502 {                                                3083 {
3503         return dl_bw_manage(dl_bw_req_check_o    3084         return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3504 }                                                3085 }
3505                                                  3086 
3506 int dl_bw_alloc(int cpu, u64 dl_bw)              3087 int dl_bw_alloc(int cpu, u64 dl_bw)
3507 {                                                3088 {
3508         return dl_bw_manage(dl_bw_req_alloc,     3089         return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3509 }                                                3090 }
3510                                                  3091 
3511 void dl_bw_free(int cpu, u64 dl_bw)              3092 void dl_bw_free(int cpu, u64 dl_bw)
3512 {                                                3093 {
3513         dl_bw_manage(dl_bw_req_free, cpu, dl_    3094         dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3514 }                                                3095 }
3515 #endif                                           3096 #endif
3516                                                  3097 
3517 #ifdef CONFIG_SCHED_DEBUG                        3098 #ifdef CONFIG_SCHED_DEBUG
3518 void print_dl_stats(struct seq_file *m, int c    3099 void print_dl_stats(struct seq_file *m, int cpu)
3519 {                                                3100 {
3520         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl)    3101         print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3521 }                                                3102 }
3522 #endif /* CONFIG_SCHED_DEBUG */                  3103 #endif /* CONFIG_SCHED_DEBUG */
3523                                                  3104 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php