~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/sched/stats.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/sched/stats.h (Version linux-6.12-rc7) and /kernel/sched/stats.h (Version linux-3.10.108)


  1 /* SPDX-License-Identifier: GPL-2.0 */         << 
  2 #ifndef _KERNEL_STATS_H                        << 
  3 #define _KERNEL_STATS_H                        << 
  4                                                     1 
  5 #ifdef CONFIG_SCHEDSTATS                            2 #ifdef CONFIG_SCHEDSTATS
  6                                                     3 
  7 extern struct static_key_false sched_schedstat << 
  8                                                << 
  9 /*                                                  4 /*
 10  * Expects runqueue lock to be held for atomic      5  * Expects runqueue lock to be held for atomicity of update
 11  */                                                 6  */
 12 static inline void                                  7 static inline void
 13 rq_sched_info_arrive(struct rq *rq, unsigned l      8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 14 {                                                   9 {
 15         if (rq) {                                  10         if (rq) {
 16                 rq->rq_sched_info.run_delay +=     11                 rq->rq_sched_info.run_delay += delta;
 17                 rq->rq_sched_info.pcount++;        12                 rq->rq_sched_info.pcount++;
 18         }                                          13         }
 19 }                                                  14 }
 20                                                    15 
 21 /*                                                 16 /*
 22  * Expects runqueue lock to be held for atomic     17  * Expects runqueue lock to be held for atomicity of update
 23  */                                                18  */
 24 static inline void                                 19 static inline void
 25 rq_sched_info_depart(struct rq *rq, unsigned l     20 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 26 {                                                  21 {
 27         if (rq)                                    22         if (rq)
 28                 rq->rq_cpu_time += delta;          23                 rq->rq_cpu_time += delta;
 29 }                                                  24 }
 30                                                    25 
 31 static inline void                                 26 static inline void
 32 rq_sched_info_dequeue(struct rq *rq, unsigned  !!  27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 33 {                                                  28 {
 34         if (rq)                                    29         if (rq)
 35                 rq->rq_sched_info.run_delay +=     30                 rq->rq_sched_info.run_delay += delta;
 36 }                                                  31 }
 37 #define   schedstat_enabled()           static !!  32 # define schedstat_inc(rq, field)       do { (rq)->field++; } while (0)
 38 #define __schedstat_inc(var)            do { v !!  33 # define schedstat_add(rq, field, amt)  do { (rq)->field += (amt); } while (0)
 39 #define   schedstat_inc(var)            do { i !!  34 # define schedstat_set(var, val)        do { var = (val); } while (0)
 40 #define __schedstat_add(var, amt)       do { v !!  35 #else /* !CONFIG_SCHEDSTATS */
 41 #define   schedstat_add(var, amt)       do { i << 
 42 #define __schedstat_set(var, val)       do { v << 
 43 #define   schedstat_set(var, val)       do { i << 
 44 #define   schedstat_val(var)            (var)  << 
 45 #define   schedstat_val_or_zero(var)    ((sche << 
 46                                                << 
 47 void __update_stats_wait_start(struct rq *rq,  << 
 48                                struct sched_st << 
 49                                                << 
 50 void __update_stats_wait_end(struct rq *rq, st << 
 51                              struct sched_stat << 
 52 void __update_stats_enqueue_sleeper(struct rq  << 
 53                                     struct sch << 
 54                                                << 
 55 static inline void                                 36 static inline void
 56 check_schedstat_required(void)                 !!  37 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 57 {                                              !!  38 {}
 58         if (schedstat_enabled())               !!  39 static inline void
 59                 return;                        !!  40 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 60                                                !!  41 {}
 61         /* Force schedstat enabled if a depend !!  42 static inline void
 62         if (trace_sched_stat_wait_enabled()    !!  43 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 63             trace_sched_stat_sleep_enabled()   !!  44 {}
 64             trace_sched_stat_iowait_enabled()  !!  45 # define schedstat_inc(rq, field)       do { } while (0)
 65             trace_sched_stat_blocked_enabled() !!  46 # define schedstat_add(rq, field, amt)  do { } while (0)
 66             trace_sched_stat_runtime_enabled() !!  47 # define schedstat_set(var, val)        do { } while (0)
 67                 printk_deferred_once("Schedule << 
 68 }                                              << 
 69                                                << 
 70 #else /* !CONFIG_SCHEDSTATS: */                << 
 71                                                << 
 72 static inline void rq_sched_info_arrive  (stru << 
 73 static inline void rq_sched_info_dequeue(struc << 
 74 static inline void rq_sched_info_depart  (stru << 
 75 # define   schedstat_enabled()          0      << 
 76 # define __schedstat_inc(var)           do { } << 
 77 # define   schedstat_inc(var)           do { } << 
 78 # define __schedstat_add(var, amt)      do { } << 
 79 # define   schedstat_add(var, amt)      do { } << 
 80 # define __schedstat_set(var, val)      do { } << 
 81 # define   schedstat_set(var, val)      do { } << 
 82 # define   schedstat_val(var)           0      << 
 83 # define   schedstat_val_or_zero(var)   0      << 
 84                                                << 
 85 # define __update_stats_wait_start(rq, p, stat << 
 86 # define __update_stats_wait_end(rq, p, stats) << 
 87 # define __update_stats_enqueue_sleeper(rq, p, << 
 88 # define check_schedstat_required()            << 
 89                                                << 
 90 #endif /* CONFIG_SCHEDSTATS */                 << 
 91                                                << 
 92 #ifdef CONFIG_FAIR_GROUP_SCHED                 << 
 93 struct sched_entity_stats {                    << 
 94         struct sched_entity     se;            << 
 95         struct sched_statistics stats;         << 
 96 } __no_randomize_layout;                       << 
 97 #endif                                         << 
 98                                                << 
 99 static inline struct sched_statistics *        << 
100 __schedstats_from_se(struct sched_entity *se)  << 
101 {                                              << 
102 #ifdef CONFIG_FAIR_GROUP_SCHED                 << 
103         if (!entity_is_task(se))               << 
104                 return &container_of(se, struc << 
105 #endif                                             48 #endif
106         return &task_of(se)->stats;            << 
107 }                                              << 
108                                                << 
109 #ifdef CONFIG_PSI                              << 
110 void psi_task_change(struct task_struct *task, << 
111 void psi_task_switch(struct task_struct *prev, << 
112                      bool sleep);              << 
113 #ifdef CONFIG_IRQ_TIME_ACCOUNTING              << 
114 void psi_account_irqtime(struct rq *rq, struct << 
115 #else                                          << 
116 static inline void psi_account_irqtime(struct  << 
117                                        struct  << 
118 #endif /*CONFIG_IRQ_TIME_ACCOUNTING */         << 
119 /*                                             << 
120  * PSI tracks state that persists across sleep << 
121  * memory stalls. As a result, it has to disti << 
122  * where a task's runnable state changes, and  << 
123  * and its runnable state are being moved betw << 
124  *                                             << 
125  * A notable case is a task whose dequeue is d << 
126  * those sleeping, but because they are still  << 
127  * go through migration requeues. In this case << 
128  * to be transferred.                          << 
129  */                                            << 
130 static inline void psi_enqueue(struct task_str << 
131 {                                              << 
132         int clear = 0, set = 0;                << 
133                                                << 
134         if (static_branch_likely(&psi_disabled << 
135                 return;                        << 
136                                                << 
137         if (p->se.sched_delayed) {             << 
138                 /* CPU migration of "sleeping" << 
139                 SCHED_WARN_ON(!migrate);       << 
140                 if (p->in_memstall)            << 
141                         set |= TSK_MEMSTALL;   << 
142                 if (p->in_iowait)              << 
143                         set |= TSK_IOWAIT;     << 
144         } else if (migrate) {                  << 
145                 /* CPU migration of runnable t << 
146                 set = TSK_RUNNING;             << 
147                 if (p->in_memstall)            << 
148                         set |= TSK_MEMSTALL |  << 
149         } else {                               << 
150                 /* Wakeup of new or sleeping t << 
151                 if (p->in_iowait)              << 
152                         clear |= TSK_IOWAIT;   << 
153                 set = TSK_RUNNING;             << 
154                 if (p->in_memstall)            << 
155                         set |= TSK_MEMSTALL_RU << 
156         }                                      << 
157                                                << 
158         psi_task_change(p, clear, set);        << 
159 }                                              << 
160                                                << 
161 static inline void psi_dequeue(struct task_str << 
162 {                                              << 
163         if (static_branch_likely(&psi_disabled << 
164                 return;                        << 
165                                                << 
166         /*                                     << 
167          * When migrating a task to another CP << 
168          * state. The enqueue callback above w << 
169          */                                    << 
170         if (migrate)                           << 
171                 psi_task_change(p, p->psi_flag << 
172                                                << 
173         /*                                     << 
174          * A voluntary sleep is a dequeue foll << 
175          * avoid walking all ancestors twice,  << 
176          * TSK_RUNNING and TSK_IOWAIT for us w << 
177          * Do nothing here.                    << 
178          */                                    << 
179 }                                              << 
180                                                    49 
181 static inline void psi_ttwu_dequeue(struct tas !!  50 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
                                                   >>  51 static inline void sched_info_reset_dequeued(struct task_struct *t)
182 {                                                  52 {
183         if (static_branch_likely(&psi_disabled !!  53         t->sched_info.last_queued = 0;
184                 return;                        << 
185         /*                                     << 
186          * Is the task being migrated during a << 
187          * deregister its sleep-persistent psi << 
188          * queue, and let psi_enqueue() know i << 
189          */                                    << 
190         if (unlikely(p->psi_flags)) {          << 
191                 struct rq_flags rf;            << 
192                 struct rq *rq;                 << 
193                                                << 
194                 rq = __task_rq_lock(p, &rf);   << 
195                 psi_task_change(p, p->psi_flag << 
196                 __task_rq_unlock(rq, &rf);     << 
197         }                                      << 
198 }                                              << 
199                                                << 
200 static inline void psi_sched_switch(struct tas << 
201                                     struct tas << 
202                                     bool sleep << 
203 {                                              << 
204         if (static_branch_likely(&psi_disabled << 
205                 return;                        << 
206                                                << 
207         psi_task_switch(prev, next, sleep);    << 
208 }                                                  54 }
209                                                    55 
210 #else /* CONFIG_PSI */                         << 
211 static inline void psi_enqueue(struct task_str << 
212 static inline void psi_dequeue(struct task_str << 
213 static inline void psi_ttwu_dequeue(struct tas << 
214 static inline void psi_sched_switch(struct tas << 
215                                     struct tas << 
216                                     bool sleep << 
217 static inline void psi_account_irqtime(struct  << 
218                                        struct  << 
219 #endif /* CONFIG_PSI */                        << 
220                                                << 
221 #ifdef CONFIG_SCHED_INFO                       << 
222 /*                                                 56 /*
223  * We are interested in knowing how long it wa     57  * We are interested in knowing how long it was from the *first* time a
224  * task was queued to the time that it finally !!  58  * task was queued to the time that it finally hit a cpu, we call this routine
225  * from dequeue_task() to account for possible !!  59  * from dequeue_task() to account for possible rq->clock skew across cpus. The
226  * delta taken on each CPU would annul the ske !!  60  * delta taken on each cpu would annul the skew.
227  */                                            !!  61  */
228 static inline void sched_info_dequeue(struct r !!  62 static inline void sched_info_dequeued(struct task_struct *t)
229 {                                              !!  63 {
230         unsigned long long delta = 0;          !!  64         unsigned long long now = task_rq(t)->clock, delta = 0;
231                                                !!  65 
232         if (!t->sched_info.last_queued)        !!  66         if (unlikely(sched_info_on()))
233                 return;                        !!  67                 if (t->sched_info.last_queued)
234                                                !!  68                         delta = now - t->sched_info.last_queued;
235         delta = rq_clock(rq) - t->sched_info.l !!  69         sched_info_reset_dequeued(t);
236         t->sched_info.last_queued = 0;         << 
237         t->sched_info.run_delay += delta;          70         t->sched_info.run_delay += delta;
238                                                    71 
239         rq_sched_info_dequeue(rq, delta);      !!  72         rq_sched_info_dequeued(task_rq(t), delta);
240 }                                                  73 }
241                                                    74 
242 /*                                                 75 /*
243  * Called when a task finally hits the CPU.  W !!  76  * Called when a task finally hits the cpu.  We can now calculate how
244  * long it was waiting to run.  We also note w     77  * long it was waiting to run.  We also note when it began so that we
245  * can keep stats on how long its time-slice i !!  78  * can keep stats on how long its timeslice is.
246  */                                                79  */
247 static void sched_info_arrive(struct rq *rq, s !!  80 static void sched_info_arrive(struct task_struct *t)
248 {                                                  81 {
249         unsigned long long now, delta = 0;     !!  82         unsigned long long now = task_rq(t)->clock, delta = 0;
250                                                    83 
251         if (!t->sched_info.last_queued)        !!  84         if (t->sched_info.last_queued)
252                 return;                        !!  85                 delta = now - t->sched_info.last_queued;
253                                                !!  86         sched_info_reset_dequeued(t);
254         now = rq_clock(rq);                    << 
255         delta = now - t->sched_info.last_queue << 
256         t->sched_info.last_queued = 0;         << 
257         t->sched_info.run_delay += delta;          87         t->sched_info.run_delay += delta;
258         t->sched_info.last_arrival = now;          88         t->sched_info.last_arrival = now;
259         t->sched_info.pcount++;                    89         t->sched_info.pcount++;
260                                                    90 
261         rq_sched_info_arrive(rq, delta);       !!  91         rq_sched_info_arrive(task_rq(t), delta);
262 }                                                  92 }
263                                                    93 
264 /*                                                 94 /*
265  * This function is only called from enqueue_t     95  * This function is only called from enqueue_task(), but also only updates
266  * the timestamp if it is already not set.  It     96  * the timestamp if it is already not set.  It's assumed that
267  * sched_info_dequeue() will clear that stamp  !!  97  * sched_info_dequeued() will clear that stamp when appropriate.
268  */                                                98  */
269 static inline void sched_info_enqueue(struct r !!  99 static inline void sched_info_queued(struct task_struct *t)
270 {                                                 100 {
271         if (!t->sched_info.last_queued)        !! 101         if (unlikely(sched_info_on()))
272                 t->sched_info.last_queued = rq !! 102                 if (!t->sched_info.last_queued)
                                                   >> 103                         t->sched_info.last_queued = task_rq(t)->clock;
273 }                                                 104 }
274                                                   105 
275 /*                                                106 /*
276  * Called when a process ceases being the acti !! 107  * Called when a process ceases being the active-running process, either
277  * due, typically, to expiring its time slice  !! 108  * voluntarily or involuntarily.  Now we can calculate how long we ran.
278  * switching to the idle task).  Now we can ca << 
279  * Also, if the process is still in the TASK_R    109  * Also, if the process is still in the TASK_RUNNING state, call
280  * sched_info_enqueue() to mark that it has no !! 110  * sched_info_queued() to mark that it has now again started waiting on
281  * the runqueue.                                  111  * the runqueue.
282  */                                               112  */
283 static inline void sched_info_depart(struct rq !! 113 static inline void sched_info_depart(struct task_struct *t)
284 {                                                 114 {
285         unsigned long long delta = rq_clock(rq !! 115         unsigned long long delta = task_rq(t)->clock -
                                                   >> 116                                         t->sched_info.last_arrival;
286                                                   117 
287         rq_sched_info_depart(rq, delta);       !! 118         rq_sched_info_depart(task_rq(t), delta);
288                                                   119 
289         if (task_is_running(t))                !! 120         if (t->state == TASK_RUNNING)
290                 sched_info_enqueue(rq, t);     !! 121                 sched_info_queued(t);
291 }                                                 122 }
292                                                   123 
293 /*                                                124 /*
294  * Called when tasks are switched involuntaril    125  * Called when tasks are switched involuntarily due, typically, to expiring
295  * their time slice.  (This may also be called    126  * their time slice.  (This may also be called when switching to or from
296  * the idle task.)  We are only called when pr    127  * the idle task.)  We are only called when prev != next.
297  */                                               128  */
298 static inline void                                129 static inline void
299 sched_info_switch(struct rq *rq, struct task_s !! 130 __sched_info_switch(struct task_struct *prev, struct task_struct *next)
300 {                                                 131 {
                                                   >> 132         struct rq *rq = task_rq(prev);
                                                   >> 133 
301         /*                                        134         /*
302          * prev now departs the CPU.  It's not !! 135          * prev now departs the cpu.  It's not interesting to record
303          * stats about how efficient we were a    136          * stats about how efficient we were at scheduling the idle
304          * process, however.                      137          * process, however.
305          */                                       138          */
306         if (prev != rq->idle)                     139         if (prev != rq->idle)
307                 sched_info_depart(rq, prev);   !! 140                 sched_info_depart(prev);
308                                                   141 
309         if (next != rq->idle)                     142         if (next != rq->idle)
310                 sched_info_arrive(rq, next);   !! 143                 sched_info_arrive(next);
                                                   >> 144 }
                                                   >> 145 static inline void
                                                   >> 146 sched_info_switch(struct task_struct *prev, struct task_struct *next)
                                                   >> 147 {
                                                   >> 148         if (unlikely(sched_info_on()))
                                                   >> 149                 __sched_info_switch(prev, next);
                                                   >> 150 }
                                                   >> 151 #else
                                                   >> 152 #define sched_info_queued(t)                    do { } while (0)
                                                   >> 153 #define sched_info_reset_dequeued(t)    do { } while (0)
                                                   >> 154 #define sched_info_dequeued(t)                  do { } while (0)
                                                   >> 155 #define sched_info_switch(t, next)              do { } while (0)
                                                   >> 156 #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
                                                   >> 157 
                                                   >> 158 /*
                                                   >> 159  * The following are functions that support scheduler-internal time accounting.
                                                   >> 160  * These functions are generally called at the timer tick.  None of this depends
                                                   >> 161  * on CONFIG_SCHEDSTATS.
                                                   >> 162  */
                                                   >> 163 
                                                   >> 164 /**
                                                   >> 165  * account_group_user_time - Maintain utime for a thread group.
                                                   >> 166  *
                                                   >> 167  * @tsk:        Pointer to task structure.
                                                   >> 168  * @cputime:    Time value by which to increment the utime field of the
                                                   >> 169  *              thread_group_cputime structure.
                                                   >> 170  *
                                                   >> 171  * If thread group time is being maintained, get the structure for the
                                                   >> 172  * running CPU and update the utime field there.
                                                   >> 173  */
                                                   >> 174 static inline void account_group_user_time(struct task_struct *tsk,
                                                   >> 175                                            cputime_t cputime)
                                                   >> 176 {
                                                   >> 177         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
                                                   >> 178 
                                                   >> 179         if (!cputimer->running)
                                                   >> 180                 return;
                                                   >> 181 
                                                   >> 182         raw_spin_lock(&cputimer->lock);
                                                   >> 183         cputimer->cputime.utime += cputime;
                                                   >> 184         raw_spin_unlock(&cputimer->lock);
                                                   >> 185 }
                                                   >> 186 
                                                   >> 187 /**
                                                   >> 188  * account_group_system_time - Maintain stime for a thread group.
                                                   >> 189  *
                                                   >> 190  * @tsk:        Pointer to task structure.
                                                   >> 191  * @cputime:    Time value by which to increment the stime field of the
                                                   >> 192  *              thread_group_cputime structure.
                                                   >> 193  *
                                                   >> 194  * If thread group time is being maintained, get the structure for the
                                                   >> 195  * running CPU and update the stime field there.
                                                   >> 196  */
                                                   >> 197 static inline void account_group_system_time(struct task_struct *tsk,
                                                   >> 198                                              cputime_t cputime)
                                                   >> 199 {
                                                   >> 200         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
                                                   >> 201 
                                                   >> 202         if (!cputimer->running)
                                                   >> 203                 return;
                                                   >> 204 
                                                   >> 205         raw_spin_lock(&cputimer->lock);
                                                   >> 206         cputimer->cputime.stime += cputime;
                                                   >> 207         raw_spin_unlock(&cputimer->lock);
311 }                                                 208 }
312                                                   209 
313 #else /* !CONFIG_SCHED_INFO: */                !! 210 /**
314 # define sched_info_enqueue(rq, t)      do { } !! 211  * account_group_exec_runtime - Maintain exec runtime for a thread group.
315 # define sched_info_dequeue(rq, t)      do { } !! 212  *
316 # define sched_info_switch(rq, t, next) do { } !! 213  * @tsk:        Pointer to task structure.
317 #endif /* CONFIG_SCHED_INFO */                 !! 214  * @ns:         Time value by which to increment the sum_exec_runtime field
                                                   >> 215  *              of the thread_group_cputime structure.
                                                   >> 216  *
                                                   >> 217  * If thread group time is being maintained, get the structure for the
                                                   >> 218  * running CPU and update the sum_exec_runtime field there.
                                                   >> 219  */
                                                   >> 220 static inline void account_group_exec_runtime(struct task_struct *tsk,
                                                   >> 221                                               unsigned long long ns)
                                                   >> 222 {
                                                   >> 223         struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
318                                                   224 
319 #endif /* _KERNEL_STATS_H */                   !! 225         if (!cputimer->running)
                                                   >> 226                 return;
                                                   >> 227 
                                                   >> 228         raw_spin_lock(&cputimer->lock);
                                                   >> 229         cputimer->cputime.sum_exec_runtime += ns;
                                                   >> 230         raw_spin_unlock(&cputimer->lock);
                                                   >> 231 }
320                                                   232 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php