perf_counter, x86: rename cpuc->active_mask
This is to have a consistent naming scheme with cpuc->used.
[ Impact: cleanup ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-19-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 3f3ae47..9ec51a6 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -29,9 +29,9 @@
struct cpu_hw_counters {
struct perf_counter *counters[X86_PMC_IDX_MAX];
unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long interrupts;
u64 throttle_ctrl;
- unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
};
@@ -334,7 +334,7 @@
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
@@ -376,7 +376,7 @@
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
u64 val;
- if (!test_bit(idx, cpuc->active_mask))
+ if (!test_bit(idx, cpuc->active))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
@@ -424,7 +424,7 @@
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- set_bit(idx, cpuc->active_mask);
+ set_bit(idx, cpuc->active);
if (cpuc->enabled)
config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -448,7 +448,7 @@
{
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
- clear_bit(idx, cpuc->active_mask);
+ clear_bit(idx, cpuc->active);
wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
}