~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/sched_ext/scx_central.bpf.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * A central FIFO sched_ext scheduler which demonstrates the followings:
  4  *
  5  * a. Making all scheduling decisions from one CPU:
  6  *
  7  *    The central CPU is the only one making scheduling decisions. All other
  8  *    CPUs kick the central CPU when they run out of tasks to run.
  9  *
 10  *    There is one global BPF queue and the central CPU schedules all CPUs by
 11  *    dispatching from the global queue to each CPU's local dsq from dispatch().
 12  *    This isn't the most straightforward. e.g. It'd be easier to bounce
 13  *    through per-CPU BPF queues. The current design is chosen to maximally
 14  *    utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
 15  *
 16  * b. Tickless operation
 17  *
 18  *    All tasks are dispatched with the infinite slice which allows stopping the
 19  *    ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
 20  *    parameter. The tickless operation can be observed through
 21  *    /proc/interrupts.
 22  *
 23  *    Periodic switching is enforced by a periodic timer checking all CPUs and
 24  *    preempting them as necessary. Unfortunately, BPF timer currently doesn't
 25  *    have a way to pin to a specific CPU, so the periodic timer isn't pinned to
 26  *    the central CPU.
 27  *
 28  * c. Preemption
 29  *
 30  *    Kthreads are unconditionally queued to the head of a matching local dsq
 31  *    and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
 32  *    prioritized over user threads, which is required for ensuring forward
 33  *    progress as e.g. the periodic timer may run on a ksoftirqd and if the
 34  *    ksoftirqd gets starved by a user thread, there may not be anything else to
 35  *    vacate that user thread.
 36  *
 37  *    SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
 38  *    next tasks.
 39  *
 40  * This scheduler is designed to maximize usage of various SCX mechanisms. A
 41  * more practical implementation would likely put the scheduling loop outside
 42  * the central CPU's dispatch() path and add some form of priority mechanism.
 43  *
 44  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
 45  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
 46  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
 47  */
 48 #include <scx/common.bpf.h>
 49 
 50 char _license[] SEC("license") = "GPL";
 51 
 52 enum {
 53         FALLBACK_DSQ_ID         = 0,
 54         MS_TO_NS                = 1000LLU * 1000,
 55         TIMER_INTERVAL_NS       = 1 * MS_TO_NS,
 56 };
 57 
 58 const volatile s32 central_cpu;
 59 const volatile u32 nr_cpu_ids = 1;      /* !0 for veristat, set during init */
 60 const volatile u64 slice_ns = SCX_SLICE_DFL;
 61 
 62 bool timer_pinned = true;
 63 u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
 64 u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries;
 65 u64 nr_overflows;
 66 
 67 UEI_DEFINE(uei);
 68 
 69 struct {
 70         __uint(type, BPF_MAP_TYPE_QUEUE);
 71         __uint(max_entries, 4096);
 72         __type(value, s32);
 73 } central_q SEC(".maps");
 74 
 75 /* can't use percpu map due to bad lookups */
 76 bool RESIZABLE_ARRAY(data, cpu_gimme_task);
 77 u64 RESIZABLE_ARRAY(data, cpu_started_at);
 78 
 79 struct central_timer {
 80         struct bpf_timer timer;
 81 };
 82 
 83 struct {
 84         __uint(type, BPF_MAP_TYPE_ARRAY);
 85         __uint(max_entries, 1);
 86         __type(key, u32);
 87         __type(value, struct central_timer);
 88 } central_timer SEC(".maps");
 89 
 90 static bool vtime_before(u64 a, u64 b)
 91 {
 92         return (s64)(a - b) < 0;
 93 }
 94 
 95 s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p,
 96                    s32 prev_cpu, u64 wake_flags)
 97 {
 98         /*
 99          * Steer wakeups to the central CPU as much as possible to avoid
100          * disturbing other CPUs. It's safe to blindly return the central cpu as
101          * select_cpu() is a hint and if @p can't be on it, the kernel will
102          * automatically pick a fallback CPU.
103          */
104         return central_cpu;
105 }
106 
107 void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags)
108 {
109         s32 pid = p->pid;
110 
111         __sync_fetch_and_add(&nr_total, 1);
112 
113         /*
114          * Push per-cpu kthreads at the head of local dsq's and preempt the
115          * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
116          * behind other threads which is necessary for forward progress
117          * guarantee as we depend on the BPF timer which may run from ksoftirqd.
118          */
119         if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) {
120                 __sync_fetch_and_add(&nr_locals, 1);
121                 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_INF,
122                                  enq_flags | SCX_ENQ_PREEMPT);
123                 return;
124         }
125 
126         if (bpf_map_push_elem(&central_q, &pid, 0)) {
127                 __sync_fetch_and_add(&nr_overflows, 1);
128                 scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags);
129                 return;
130         }
131 
132         __sync_fetch_and_add(&nr_queued, 1);
133 
134         if (!scx_bpf_task_running(p))
135                 scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
136 }
137 
138 static bool dispatch_to_cpu(s32 cpu)
139 {
140         struct task_struct *p;
141         s32 pid;
142 
143         bpf_repeat(BPF_MAX_LOOPS) {
144                 if (bpf_map_pop_elem(&central_q, &pid))
145                         break;
146 
147                 __sync_fetch_and_sub(&nr_queued, 1);
148 
149                 p = bpf_task_from_pid(pid);
150                 if (!p) {
151                         __sync_fetch_and_add(&nr_lost_pids, 1);
152                         continue;
153                 }
154 
155                 /*
156                  * If we can't run the task at the top, do the dumb thing and
157                  * bounce it to the fallback dsq.
158                  */
159                 if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) {
160                         __sync_fetch_and_add(&nr_mismatches, 1);
161                         scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0);
162                         bpf_task_release(p);
163                         /*
164                          * We might run out of dispatch buffer slots if we continue dispatching
165                          * to the fallback DSQ, without dispatching to the local DSQ of the
166                          * target CPU. In such a case, break the loop now as will fail the
167                          * next dispatch operation.
168                          */
169                         if (!scx_bpf_dispatch_nr_slots())
170                                 break;
171                         continue;
172                 }
173 
174                 /* dispatch to local and mark that @cpu doesn't need more */
175                 scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0);
176 
177                 if (cpu != central_cpu)
178                         scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE);
179 
180                 bpf_task_release(p);
181                 return true;
182         }
183 
184         return false;
185 }
186 
187 void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
188 {
189         if (cpu == central_cpu) {
190                 /* dispatch for all other CPUs first */
191                 __sync_fetch_and_add(&nr_dispatches, 1);
192 
193                 bpf_for(cpu, 0, nr_cpu_ids) {
194                         bool *gimme;
195 
196                         if (!scx_bpf_dispatch_nr_slots())
197                                 break;
198 
199                         /* central's gimme is never set */
200                         gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
201                         if (!gimme || !*gimme)
202                                 continue;
203 
204                         if (dispatch_to_cpu(cpu))
205                                 *gimme = false;
206                 }
207 
208                 /*
209                  * Retry if we ran out of dispatch buffer slots as we might have
210                  * skipped some CPUs and also need to dispatch for self. The ext
211                  * core automatically retries if the local dsq is empty but we
212                  * can't rely on that as we're dispatching for other CPUs too.
213                  * Kick self explicitly to retry.
214                  */
215                 if (!scx_bpf_dispatch_nr_slots()) {
216                         __sync_fetch_and_add(&nr_retries, 1);
217                         scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
218                         return;
219                 }
220 
221                 /* look for a task to run on the central CPU */
222                 if (scx_bpf_consume(FALLBACK_DSQ_ID))
223                         return;
224                 dispatch_to_cpu(central_cpu);
225         } else {
226                 bool *gimme;
227 
228                 if (scx_bpf_consume(FALLBACK_DSQ_ID))
229                         return;
230 
231                 gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
232                 if (gimme)
233                         *gimme = true;
234 
235                 /*
236                  * Force dispatch on the scheduling CPU so that it finds a task
237                  * to run for us.
238                  */
239                 scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT);
240         }
241 }
242 
243 void BPF_STRUCT_OPS(central_running, struct task_struct *p)
244 {
245         s32 cpu = scx_bpf_task_cpu(p);
246         u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
247         if (started_at)
248                 *started_at = bpf_ktime_get_ns() ?: 1;  /* 0 indicates idle */
249 }
250 
251 void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable)
252 {
253         s32 cpu = scx_bpf_task_cpu(p);
254         u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
255         if (started_at)
256                 *started_at = 0;
257 }
258 
259 static int central_timerfn(void *map, int *key, struct bpf_timer *timer)
260 {
261         u64 now = bpf_ktime_get_ns();
262         u64 nr_to_kick = nr_queued;
263         s32 i, curr_cpu;
264 
265         curr_cpu = bpf_get_smp_processor_id();
266         if (timer_pinned && (curr_cpu != central_cpu)) {
267                 scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
268                               curr_cpu, central_cpu);
269                 return 0;
270         }
271 
272         bpf_for(i, 0, nr_cpu_ids) {
273                 s32 cpu = (nr_timers + i) % nr_cpu_ids;
274                 u64 *started_at;
275 
276                 if (cpu == central_cpu)
277                         continue;
278 
279                 /* kick iff the current one exhausted its slice */
280                 started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids);
281                 if (started_at && *started_at &&
282                     vtime_before(now, *started_at + slice_ns))
283                         continue;
284 
285                 /* and there's something pending */
286                 if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) ||
287                     scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu))
288                         ;
289                 else if (nr_to_kick)
290                         nr_to_kick--;
291                 else
292                         continue;
293 
294                 scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT);
295         }
296 
297         bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
298         __sync_fetch_and_add(&nr_timers, 1);
299         return 0;
300 }
301 
302 int BPF_STRUCT_OPS_SLEEPABLE(central_init)
303 {
304         u32 key = 0;
305         struct bpf_timer *timer;
306         int ret;
307 
308         ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1);
309         if (ret)
310                 return ret;
311 
312         timer = bpf_map_lookup_elem(&central_timer, &key);
313         if (!timer)
314                 return -ESRCH;
315 
316         if (bpf_get_smp_processor_id() != central_cpu) {
317                 scx_bpf_error("init from non-central CPU");
318                 return -EINVAL;
319         }
320 
321         bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
322         bpf_timer_set_callback(timer, central_timerfn);
323 
324         ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, BPF_F_TIMER_CPU_PIN);
325         /*
326          * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
327          * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
328          * Retry without the PIN. This would be the perfect use case for
329          * bpf_core_enum_value_exists() but the enum type doesn't have a name
330          * and can't be used with bpf_core_enum_value_exists(). Oh well...
331          */
332         if (ret == -EINVAL) {
333                 timer_pinned = false;
334                 ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0);
335         }
336         if (ret)
337                 scx_bpf_error("bpf_timer_start failed (%d)", ret);
338         return ret;
339 }
340 
341 void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei)
342 {
343         UEI_RECORD(uei, ei);
344 }
345 
346 SCX_OPS_DEFINE(central_ops,
347                /*
348                 * We are offloading all scheduling decisions to the central CPU
349                 * and thus being the last task on a given CPU doesn't mean
350                 * anything special. Enqueue the last tasks like any other tasks.
351                 */
352                .flags                   = SCX_OPS_ENQ_LAST,
353 
354                .select_cpu              = (void *)central_select_cpu,
355                .enqueue                 = (void *)central_enqueue,
356                .dispatch                = (void *)central_dispatch,
357                .running                 = (void *)central_running,
358                .stopping                = (void *)central_stopping,
359                .init                    = (void *)central_init,
360                .exit                    = (void *)central_exit,
361                .name                    = "central");
362 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php