~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/kvm/arm_pmu.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2015 Linaro Ltd.
  4  * Author: Shannon Zhao <shannon.zhao@linaro.org>
  5  */
  6 
  7 #ifndef __ASM_ARM_KVM_PMU_H
  8 #define __ASM_ARM_KVM_PMU_H
  9 
 10 #include <linux/perf_event.h>
 11 #include <linux/perf/arm_pmuv3.h>
 12 
 13 #define ARMV8_PMU_CYCLE_IDX             (ARMV8_PMU_MAX_COUNTERS - 1)
 14 
 15 #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
 16 struct kvm_pmc {
 17         u8 idx; /* index into the pmu->pmc array */
 18         struct perf_event *perf_event;
 19 };
 20 
 21 struct kvm_pmu_events {
 22         u32 events_host;
 23         u32 events_guest;
 24 };
 25 
 26 struct kvm_pmu {
 27         struct irq_work overflow_work;
 28         struct kvm_pmu_events events;
 29         struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
 30         int irq_num;
 31         bool created;
 32         bool irq_level;
 33 };
 34 
 35 struct arm_pmu_entry {
 36         struct list_head entry;
 37         struct arm_pmu *arm_pmu;
 38 };
 39 
 40 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
 41 
 42 static __always_inline bool kvm_arm_support_pmu_v3(void)
 43 {
 44         return static_branch_likely(&kvm_arm_pmu_available);
 45 }
 46 
 47 #define kvm_arm_pmu_irq_initialized(v)  ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
 48 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 49 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
 50 u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
 51 u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
 52 void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
 53 void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
 54 void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
 55 void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
 56 void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
 57 void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
 58 void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
 59 bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
 60 void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
 61 void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
 62 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
 63 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 64                                     u64 select_idx);
 65 void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu);
 66 int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
 67                             struct kvm_device_attr *attr);
 68 int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 69                             struct kvm_device_attr *attr);
 70 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
 71                             struct kvm_device_attr *attr);
 72 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
 73 
 74 struct kvm_pmu_events *kvm_get_pmu_events(void);
 75 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
 76 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 77 void kvm_vcpu_pmu_resync_el0(void);
 78 
 79 #define kvm_vcpu_has_pmu(vcpu)                                  \
 80         (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
 81 
 82 /*
 83  * Updates the vcpu's view of the pmu events for this cpu.
 84  * Must be called before every vcpu run after disabling interrupts, to ensure
 85  * that an interrupt cannot fire and update the structure.
 86  */
 87 #define kvm_pmu_update_vcpu_events(vcpu)                                \
 88         do {                                                            \
 89                 if (!has_vhe() && kvm_arm_support_pmu_v3())             \
 90                         vcpu->arch.pmu.events = *kvm_get_pmu_events();  \
 91         } while (0)
 92 
 93 u8 kvm_arm_pmu_get_pmuver_limit(void);
 94 u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
 95 int kvm_arm_set_default_pmu(struct kvm *kvm);
 96 u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm);
 97 
 98 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu);
 99 #else
100 struct kvm_pmu {
101 };
102 
103 static inline bool kvm_arm_support_pmu_v3(void)
104 {
105         return false;
106 }
107 
108 #define kvm_arm_pmu_irq_initialized(v)  (false)
109 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
110                                             u64 select_idx)
111 {
112         return 0;
113 }
114 static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
115                                              u64 select_idx, u64 val) {}
116 static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
117 {
118         return 0;
119 }
120 static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
121 static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
122 static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
123 static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
124 static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
125 static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
126 static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
127 static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
128 {
129         return false;
130 }
131 static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
132 static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
133 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
134 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
135                                                   u64 data, u64 select_idx) {}
136 static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
137                                           struct kvm_device_attr *attr)
138 {
139         return -ENXIO;
140 }
141 static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
142                                           struct kvm_device_attr *attr)
143 {
144         return -ENXIO;
145 }
146 static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
147                                           struct kvm_device_attr *attr)
148 {
149         return -ENXIO;
150 }
151 static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
152 {
153         return 0;
154 }
155 static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
156 {
157         return 0;
158 }
159 
160 #define kvm_vcpu_has_pmu(vcpu)          ({ false; })
161 static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
162 static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
163 static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
164 static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {}
165 static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
166 {
167         return 0;
168 }
169 static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
170 {
171         return 0;
172 }
173 static inline void kvm_vcpu_pmu_resync_el0(void) {}
174 
175 static inline int kvm_arm_set_default_pmu(struct kvm *kvm)
176 {
177         return -ENODEV;
178 }
179 
180 static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm)
181 {
182         return 0;
183 }
184 
185 static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
186 {
187         return 0;
188 }
189 
190 #endif
191 
192 #endif
193 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php