~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/sched_ext/scx_simple.bpf.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /tools/sched_ext/scx_simple.bpf.c (Architecture alpha) and /tools/sched_ext/scx_simple.bpf.c (Architecture m68k)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*                                                  2 /*
  3  * A simple scheduler.                              3  * A simple scheduler.
  4  *                                                  4  *
  5  * By default, it operates as a simple global       5  * By default, it operates as a simple global weighted vtime scheduler and can
  6  * be switched to FIFO scheduling. It also dem      6  * be switched to FIFO scheduling. It also demonstrates the following niceties.
  7  *                                                  7  *
  8  * - Statistics tracking how many tasks are qu      8  * - Statistics tracking how many tasks are queued to local and global dsq's.
  9  * - Termination notification for userspace.        9  * - Termination notification for userspace.
 10  *                                                 10  *
 11  * While very simple, this scheduler should wo     11  * While very simple, this scheduler should work reasonably well on CPUs with a
 12  * uniform L3 cache topology. While preemption     12  * uniform L3 cache topology. While preemption is not implemented, the fact that
 13  * the scheduling queue is shared across all C     13  * the scheduling queue is shared across all CPUs means that whatever is at the
 14  * front of the queue is likely to be executed     14  * front of the queue is likely to be executed fairly quickly given enough
 15  * number of CPUs. The FIFO scheduling mode ma     15  * number of CPUs. The FIFO scheduling mode may be beneficial to some workloads
 16  * but comes with the usual problems with FIFO     16  * but comes with the usual problems with FIFO scheduling where saturating
 17  * threads can easily drown out interactive on     17  * threads can easily drown out interactive ones.
 18  *                                                 18  *
 19  * Copyright (c) 2022 Meta Platforms, Inc. and     19  * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
 20  * Copyright (c) 2022 Tejun Heo <tj@kernel.org     20  * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
 21  * Copyright (c) 2022 David Vernet <dvernet@me     21  * Copyright (c) 2022 David Vernet <dvernet@meta.com>
 22  */                                                22  */
 23 #include <scx/common.bpf.h>                        23 #include <scx/common.bpf.h>
 24                                                    24 
 25 char _license[] SEC("license") = "GPL";            25 char _license[] SEC("license") = "GPL";
 26                                                    26 
 27 const volatile bool fifo_sched;                    27 const volatile bool fifo_sched;
 28                                                    28 
 29 static u64 vtime_now;                              29 static u64 vtime_now;
 30 UEI_DEFINE(uei);                                   30 UEI_DEFINE(uei);
 31                                                    31 
 32 /*                                                 32 /*
 33  * Built-in DSQs such as SCX_DSQ_GLOBAL cannot     33  * Built-in DSQs such as SCX_DSQ_GLOBAL cannot be used as priority queues
 34  * (meaning, cannot be dispatched to with scx_     34  * (meaning, cannot be dispatched to with scx_bpf_dispatch_vtime()). We
 35  * therefore create a separate DSQ with ID 0 t     35  * therefore create a separate DSQ with ID 0 that we dispatch to and consume
 36  * from. If scx_simple only supported global F     36  * from. If scx_simple only supported global FIFO scheduling, then we could
 37  * just use SCX_DSQ_GLOBAL.                        37  * just use SCX_DSQ_GLOBAL.
 38  */                                                38  */
 39 #define SHARED_DSQ 0                               39 #define SHARED_DSQ 0
 40                                                    40 
 41 struct {                                           41 struct {
 42         __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY     42         __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 43         __uint(key_size, sizeof(u32));             43         __uint(key_size, sizeof(u32));
 44         __uint(value_size, sizeof(u64));           44         __uint(value_size, sizeof(u64));
 45         __uint(max_entries, 2);                    45         __uint(max_entries, 2);                 /* [local, global] */
 46 } stats SEC(".maps");                              46 } stats SEC(".maps");
 47                                                    47 
 48 static void stat_inc(u32 idx)                      48 static void stat_inc(u32 idx)
 49 {                                                  49 {
 50         u64 *cnt_p = bpf_map_lookup_elem(&stat     50         u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx);
 51         if (cnt_p)                                 51         if (cnt_p)
 52                 (*cnt_p)++;                        52                 (*cnt_p)++;
 53 }                                                  53 }
 54                                                    54 
 55 static inline bool vtime_before(u64 a, u64 b)      55 static inline bool vtime_before(u64 a, u64 b)
 56 {                                                  56 {
 57         return (s64)(a - b) < 0;                   57         return (s64)(a - b) < 0;
 58 }                                                  58 }
 59                                                    59 
 60 s32 BPF_STRUCT_OPS(simple_select_cpu, struct t     60 s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags)
 61 {                                                  61 {
 62         bool is_idle = false;                      62         bool is_idle = false;
 63         s32 cpu;                                   63         s32 cpu;
 64                                                    64 
 65         cpu = scx_bpf_select_cpu_dfl(p, prev_c     65         cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, wake_flags, &is_idle);
 66         if (is_idle) {                             66         if (is_idle) {
 67                 stat_inc(0);    /* count local     67                 stat_inc(0);    /* count local queueing */
 68                 scx_bpf_dispatch(p, SCX_DSQ_LO     68                 scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0);
 69         }                                          69         }
 70                                                    70 
 71         return cpu;                                71         return cpu;
 72 }                                                  72 }
 73                                                    73 
 74 void BPF_STRUCT_OPS(simple_enqueue, struct tas     74 void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
 75 {                                                  75 {
 76         stat_inc(1);    /* count global queuei     76         stat_inc(1);    /* count global queueing */
 77                                                    77 
 78         if (fifo_sched) {                          78         if (fifo_sched) {
 79                 scx_bpf_dispatch(p, SHARED_DSQ     79                 scx_bpf_dispatch(p, SHARED_DSQ, SCX_SLICE_DFL, enq_flags);
 80         } else {                                   80         } else {
 81                 u64 vtime = p->scx.dsq_vtime;      81                 u64 vtime = p->scx.dsq_vtime;
 82                                                    82 
 83                 /*                                 83                 /*
 84                  * Limit the amount of budget      84                  * Limit the amount of budget that an idling task can accumulate
 85                  * to one slice.                   85                  * to one slice.
 86                  */                                86                  */
 87                 if (vtime_before(vtime, vtime_     87                 if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL))
 88                         vtime = vtime_now - SC     88                         vtime = vtime_now - SCX_SLICE_DFL;
 89                                                    89 
 90                 scx_bpf_dispatch_vtime(p, SHAR     90                 scx_bpf_dispatch_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime,
 91                                        enq_fla     91                                        enq_flags);
 92         }                                          92         }
 93 }                                                  93 }
 94                                                    94 
 95 void BPF_STRUCT_OPS(simple_dispatch, s32 cpu,      95 void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev)
 96 {                                                  96 {
 97         scx_bpf_consume(SHARED_DSQ);               97         scx_bpf_consume(SHARED_DSQ);
 98 }                                                  98 }
 99                                                    99 
100 void BPF_STRUCT_OPS(simple_running, struct tas    100 void BPF_STRUCT_OPS(simple_running, struct task_struct *p)
101 {                                                 101 {
102         if (fifo_sched)                           102         if (fifo_sched)
103                 return;                           103                 return;
104                                                   104 
105         /*                                        105         /*
106          * Global vtime always progresses forw    106          * Global vtime always progresses forward as tasks start executing. The
107          * test and update can be performed co    107          * test and update can be performed concurrently from multiple CPUs and
108          * thus racy. Any error should be cont    108          * thus racy. Any error should be contained and temporary. Let's just
109          * live with it.                          109          * live with it.
110          */                                       110          */
111         if (vtime_before(vtime_now, p->scx.dsq    111         if (vtime_before(vtime_now, p->scx.dsq_vtime))
112                 vtime_now = p->scx.dsq_vtime;     112                 vtime_now = p->scx.dsq_vtime;
113 }                                                 113 }
114                                                   114 
115 void BPF_STRUCT_OPS(simple_stopping, struct ta    115 void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable)
116 {                                                 116 {
117         if (fifo_sched)                           117         if (fifo_sched)
118                 return;                           118                 return;
119                                                   119 
120         /*                                        120         /*
121          * Scale the execution time by the inv    121          * Scale the execution time by the inverse of the weight and charge.
122          *                                        122          *
123          * Note that the default yield impleme    123          * Note that the default yield implementation yields by setting
124          * @p->scx.slice to zero and the follo    124          * @p->scx.slice to zero and the following would treat the yielding task
125          * as if it has consumed all its slice    125          * as if it has consumed all its slice. If this penalizes yielding tasks
126          * too much, determine the execution t    126          * too much, determine the execution time by taking explicit timestamps
127          * instead of depending on @p->scx.sli    127          * instead of depending on @p->scx.slice.
128          */                                       128          */
129         p->scx.dsq_vtime += (SCX_SLICE_DFL - p    129         p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
130 }                                                 130 }
131                                                   131 
132 void BPF_STRUCT_OPS(simple_enable, struct task    132 void BPF_STRUCT_OPS(simple_enable, struct task_struct *p)
133 {                                                 133 {
134         p->scx.dsq_vtime = vtime_now;             134         p->scx.dsq_vtime = vtime_now;
135 }                                                 135 }
136                                                   136 
137 s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)         137 s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
138 {                                                 138 {
139         return scx_bpf_create_dsq(SHARED_DSQ,     139         return scx_bpf_create_dsq(SHARED_DSQ, -1);
140 }                                                 140 }
141                                                   141 
142 void BPF_STRUCT_OPS(simple_exit, struct scx_ex    142 void BPF_STRUCT_OPS(simple_exit, struct scx_exit_info *ei)
143 {                                                 143 {
144         UEI_RECORD(uei, ei);                      144         UEI_RECORD(uei, ei);
145 }                                                 145 }
146                                                   146 
147 SCX_OPS_DEFINE(simple_ops,                        147 SCX_OPS_DEFINE(simple_ops,
148                .select_cpu              = (voi    148                .select_cpu              = (void *)simple_select_cpu,
149                .enqueue                 = (voi    149                .enqueue                 = (void *)simple_enqueue,
150                .dispatch                = (voi    150                .dispatch                = (void *)simple_dispatch,
151                .running                 = (voi    151                .running                 = (void *)simple_running,
152                .stopping                = (voi    152                .stopping                = (void *)simple_stopping,
153                .enable                  = (voi    153                .enable                  = (void *)simple_enable,
154                .init                    = (voi    154                .init                    = (void *)simple_init,
155                .exit                    = (voi    155                .exit                    = (void *)simple_exit,
156                .name                    = "sim    156                .name                    = "simple");
157                                                   157 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php