1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 #ifndef _LINUX_KERNEL_STAT_H 1 #ifndef _LINUX_KERNEL_STAT_H 3 #define _LINUX_KERNEL_STAT_H 2 #define _LINUX_KERNEL_STAT_H 4 3 5 #include <linux/smp.h> 4 #include <linux/smp.h> 6 #include <linux/threads.h> 5 #include <linux/threads.h> 7 #include <linux/percpu.h> 6 #include <linux/percpu.h> >> 7 #include <linux/cpumask.h> 8 #include <linux/interrupt.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched.h> 9 #include <linux/sched.h> 10 #include <linux/vtime.h> 10 #include <linux/vtime.h> 11 #include <asm/irq.h> 11 #include <asm/irq.h> >> 12 #include <asm/cputime.h> 12 13 13 /* 14 /* 14 * 'kernel_stat.h' contains the definitions ne 15 * 'kernel_stat.h' contains the definitions needed for doing 15 * some kernel statistics (CPU usage, context 16 * some kernel statistics (CPU usage, context switches ...), 16 * used by rstatd/perfmeter 17 * used by rstatd/perfmeter 17 */ 18 */ 18 19 19 enum cpu_usage_stat { 20 enum cpu_usage_stat { 20 CPUTIME_USER, 21 CPUTIME_USER, 21 CPUTIME_NICE, 22 CPUTIME_NICE, 22 CPUTIME_SYSTEM, 23 CPUTIME_SYSTEM, 23 CPUTIME_SOFTIRQ, 24 CPUTIME_SOFTIRQ, 24 CPUTIME_IRQ, 25 CPUTIME_IRQ, 25 CPUTIME_IDLE, 26 CPUTIME_IDLE, 26 CPUTIME_IOWAIT, 27 CPUTIME_IOWAIT, 27 CPUTIME_STEAL, 28 CPUTIME_STEAL, 28 CPUTIME_GUEST, 29 CPUTIME_GUEST, 29 CPUTIME_GUEST_NICE, 30 CPUTIME_GUEST_NICE, 30 #ifdef CONFIG_SCHED_CORE << 31 CPUTIME_FORCEIDLE, << 32 #endif << 33 NR_STATS, 31 NR_STATS, 34 }; 32 }; 35 33 36 struct kernel_cpustat { 34 struct kernel_cpustat { 37 u64 cpustat[NR_STATS]; 35 u64 cpustat[NR_STATS]; 38 }; 36 }; 39 37 40 struct kernel_stat { 38 struct kernel_stat { >> 39 #ifndef CONFIG_GENERIC_HARDIRQS >> 40 unsigned int irqs[NR_IRQS]; >> 41 #endif 41 unsigned long irqs_sum; 42 unsigned long irqs_sum; 42 unsigned int softirqs[NR_SOFTIRQS]; 43 unsigned int softirqs[NR_SOFTIRQS]; 43 }; 44 }; 44 45 45 DECLARE_PER_CPU(struct kernel_stat, kstat); 46 DECLARE_PER_CPU(struct kernel_stat, kstat); 46 DECLARE_PER_CPU(struct kernel_cpustat, kernel_ 47 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 47 48 48 /* Must have preemption disabled for this to b 49 /* Must have preemption disabled for this to be meaningful. */ 49 #define kstat_this_cpu this_cpu_ptr(&kstat) !! 50 #define kstat_this_cpu (&__get_cpu_var(kstat)) 50 #define kcpustat_this_cpu this_cpu_ptr(&kernel !! 51 #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) 51 #define kstat_cpu(cpu) per_cpu(kstat, cpu) 52 #define kstat_cpu(cpu) per_cpu(kstat, cpu) 52 #define kcpustat_cpu(cpu) per_cpu(kernel_cpust 53 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) 53 54 54 extern unsigned long long nr_context_switches_ << 55 extern unsigned long long nr_context_switches( 55 extern unsigned long long nr_context_switches(void); 56 56 >> 57 #ifndef CONFIG_GENERIC_HARDIRQS >> 58 >> 59 struct irq_desc; >> 60 >> 61 static inline void kstat_incr_irqs_this_cpu(unsigned int irq, >> 62 struct irq_desc *desc) >> 63 { >> 64 __this_cpu_inc(kstat.irqs[irq]); >> 65 __this_cpu_inc(kstat.irqs_sum); >> 66 } >> 67 >> 68 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) >> 69 { >> 70 return kstat_cpu(cpu).irqs[irq]; >> 71 } >> 72 #else >> 73 #include <linux/irq.h> 57 extern unsigned int kstat_irqs_cpu(unsigned in 74 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); 58 extern void kstat_incr_irq_this_cpu(unsigned i !! 75 >> 76 #define kstat_incr_irqs_this_cpu(irqno, DESC) \ >> 77 do { \ >> 78 __this_cpu_inc(*(DESC)->kstat_irqs); \ >> 79 __this_cpu_inc(kstat.irqs_sum); \ >> 80 } while (0) >> 81 >> 82 #endif 59 83 60 static inline void kstat_incr_softirqs_this_cp 84 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) 61 { 85 { 62 __this_cpu_inc(kstat.softirqs[irq]); 86 __this_cpu_inc(kstat.softirqs[irq]); 63 } 87 } 64 88 65 static inline unsigned int kstat_softirqs_cpu( 89 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) 66 { 90 { 67 return kstat_cpu(cpu).softirqs[irq]; 91 return kstat_cpu(cpu).softirqs[irq]; 68 } 92 } 69 93 70 static inline unsigned int kstat_cpu_softirqs_ !! 94 /* >> 95 * Number of interrupts per specific IRQ source, since bootup >> 96 */ >> 97 #ifndef CONFIG_GENERIC_HARDIRQS >> 98 static inline unsigned int kstat_irqs(unsigned int irq) 71 { 99 { 72 int i; << 73 unsigned int sum = 0; 100 unsigned int sum = 0; >> 101 int cpu; 74 102 75 for (i = 0; i < NR_SOFTIRQS; i++) !! 103 for_each_possible_cpu(cpu) 76 sum += kstat_softirqs_cpu(i, c !! 104 sum += kstat_irqs_cpu(irq, cpu); 77 105 78 return sum; 106 return sum; 79 } 107 } 80 << 81 #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT << 82 extern void kstat_snapshot_irqs(void); << 83 extern unsigned int kstat_get_irq_since_snapsh << 84 #else 108 #else 85 static inline void kstat_snapshot_irqs(void) { !! 109 extern unsigned int kstat_irqs(unsigned int irq); 86 static inline unsigned int kstat_get_irq_since << 87 #endif 110 #endif 88 111 89 /* 112 /* 90 * Number of interrupts per specific IRQ sourc << 91 */ << 92 extern unsigned int kstat_irqs_usr(unsigned in << 93 << 94 /* << 95 * Number of interrupts per cpu, since bootup 113 * Number of interrupts per cpu, since bootup 96 */ 114 */ 97 static inline unsigned long kstat_cpu_irqs_sum !! 115 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) 98 { 116 { 99 return kstat_cpu(cpu).irqs_sum; 117 return kstat_cpu(cpu).irqs_sum; 100 } 118 } 101 119 102 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN !! 120 /* 103 extern u64 kcpustat_field(struct kernel_cpusta !! 121 * Lock/unlock the current runqueue - to extract task statistics: 104 enum cpu_usage_stat !! 122 */ 105 extern void kcpustat_cpu_fetch(struct kernel_c !! 123 extern unsigned long long task_delta_exec(struct task_struct *); 106 #else << 107 static inline u64 kcpustat_field(struct kernel << 108 enum cpu_usag << 109 { << 110 return kcpustat->cpustat[usage]; << 111 } << 112 << 113 static inline void kcpustat_cpu_fetch(struct k << 114 { << 115 *dst = kcpustat_cpu(cpu); << 116 } << 117 << 118 #endif << 119 124 120 extern void account_user_time(struct task_stru !! 125 extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 121 extern void account_guest_time(struct task_str !! 126 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 122 extern void account_system_time(struct task_st !! 127 extern void account_steal_time(cputime_t); 123 extern void account_system_index_time(struct t !! 128 extern void account_idle_time(cputime_t); 124 enum cpu << 125 extern void account_steal_time(u64); << 126 extern void account_idle_time(u64); << 127 extern u64 get_idle_time(struct kernel_cpustat << 128 129 129 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 130 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 130 static inline void account_process_tick(struct 131 static inline void account_process_tick(struct task_struct *tsk, int user) 131 { 132 { 132 vtime_flush(tsk); !! 133 vtime_account_user(tsk); 133 } 134 } 134 #else 135 #else 135 extern void account_process_tick(struct task_s 136 extern void account_process_tick(struct task_struct *, int user); 136 #endif 137 #endif 137 138 >> 139 extern void account_steal_ticks(unsigned long ticks); 138 extern void account_idle_ticks(unsigned long t 140 extern void account_idle_ticks(unsigned long ticks); 139 << 140 #ifdef CONFIG_SCHED_CORE << 141 extern void __account_forceidle_time(struct ta << 142 #endif << 143 141 144 #endif /* _LINUX_KERNEL_STAT_H */ 142 #endif /* _LINUX_KERNEL_STAT_H */ 145 143
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.