---
lib/x86/pmu.c | 10 +++++-----
lib/x86/pmu.h | 8 ++++----
x86/pmu.c | 8 ++++----
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/lib/x86/pmu.c b/lib/x86/pmu.c
index d06e9455..d37c874c 100644
--- a/lib/x86/pmu.c
+++ b/lib/x86/pmu.c
@@ -18,10 +18,10 @@ void pmu_init(void)
pmu.nr_gp_counters = (cpuid_10.a >> 8) & 0xff;
pmu.gp_counter_width = (cpuid_10.a >> 16) & 0xff;
- pmu.gp_counter_mask_length = (cpuid_10.a >> 24) & 0xff;
+ pmu.arch_event_mask_length = (cpuid_10.a >> 24) & 0xff;
- /* CPUID.0xA.EBX bit is '1' if a counter is NOT available. */
- pmu.gp_counter_available = ~cpuid_10.b;
+ /* CPUID.0xA.EBX bit is '1' if an arch event is NOT available. */
+ pmu.arch_event_available = ~cpuid_10.b;
if (this_cpu_has(X86_FEATURE_PDCM))
pmu.perf_cap = rdmsr(MSR_IA32_PERF_CAPABILITIES);
@@ -50,8 +50,8 @@ void pmu_init(void)
pmu.msr_gp_event_select_base = MSR_K7_EVNTSEL0;
}
pmu.gp_counter_width = PMC_DEFAULT_WIDTH;
- pmu.gp_counter_mask_length = pmu.nr_gp_counters;
- pmu.gp_counter_available = (1u << pmu.nr_gp_counters) - 1;
+ pmu.arch_event_mask_length = 32;
+ pmu.arch_event_available = -1u;
if (this_cpu_has_perf_global_status()) {
pmu.msr_global_status = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS;
diff --git a/lib/x86/pmu.h b/lib/x86/pmu.h
index f07fbd93..c7dc68c1 100644
--- a/lib/x86/pmu.h
+++ b/lib/x86/pmu.h
@@ -63,8 +63,8 @@ struct pmu_caps {
u8 fixed_counter_width;
u8 nr_gp_counters;
u8 gp_counter_width;
- u8 gp_counter_mask_length;
- u32 gp_counter_available;
+ u8 arch_event_mask_length;
+ u32 arch_event_available;
u32 msr_gp_counter_base;
u32 msr_gp_event_select_base;
@@ -110,9 +110,9 @@ static inline bool this_cpu_has_perf_global_status(void)
return pmu.version > 1;
}
-static inline bool pmu_gp_counter_is_available(int i)
+static inline bool pmu_arch_event_is_available(int i)
{
- return pmu.gp_counter_available & BIT(i);
+ return pmu.arch_event_available & BIT(i);
}
static inline u64 pmu_lbr_version(void)
diff --git a/x86/pmu.c b/x86/pmu.c
index 45c6db3c..e79122ed 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -436,7 +436,7 @@ static void check_gp_counters(void)
int i;
for (i = 0; i < gp_events_size; i++)
- if (pmu_gp_counter_is_available(i))
+ if (pmu_arch_event_is_available(i))
check_gp_counter(&gp_events[i]);
else
printf("GP event '%s' is disabled\n",
@@ -463,7 +463,7 @@ static void check_counters_many(void)
int i, n;
for (i = 0, n = 0; n < pmu.nr_gp_counters; i++) {
- if (!pmu_gp_counter_is_available(i))
+ if (!pmu_arch_event_is_available(i))
continue;
cnt[n].ctr = MSR_GP_COUNTERx(n);
@@ -902,7 +902,7 @@ static void set_ref_cycle_expectations(void)
uint64_t t0, t1, t2, t3;
/* Bit 2 enumerates the availability of reference cycles events. */
- if (!pmu.nr_gp_counters || !pmu_gp_counter_is_available(2))
+ if (!pmu.nr_gp_counters || !pmu_arch_event_is_available(2))
return;
t0 = fenced_rdtsc();
@@ -992,7 +992,7 @@ int main(int ac, char **av)
printf("PMU version: %d\n", pmu.version);
printf("GP counters: %d\n", pmu.nr_gp_counters);
printf("GP counter width: %d\n", pmu.gp_counter_width);
- printf("Mask length: %d\n", pmu.gp_counter_mask_length);
+ printf("Event Mask length: %d\n", pmu.arch_event_mask_length);
printf("Fixed counters: %d\n", pmu.nr_fixed_counters);
printf("Fixed counter width: %d\n", pmu.fixed_counter_width);