1006 goto try_generic; 1007 1008 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; 1009 /* 1010 * We set it so that counter_base + idx in wrmsr/rdmsr maps to 1011 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: 1012 */ 1013 hwc->counter_base = 1014 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; 1015 hwc->idx = idx; 1016 } else { 1017 idx = hwc->idx; 1018 /* Try to get the previous generic counter again */ 1019 if (test_and_set_bit(idx, cpuc->used_mask)) { 1020 try_generic: 1021 idx = find_first_zero_bit(cpuc->used_mask, 1022 x86_pmu.num_counters); 1023 if (idx == x86_pmu.num_counters) 1024 return -EAGAIN; 1025 1026 set_bit(idx, cpuc->used_mask); 1027 hwc->idx = idx; 1028 } 1029 hwc->config_base = x86_pmu.eventsel; 1030 hwc->counter_base = x86_pmu.perfctr; 1031 } 1032 1033 perf_counters_lapic_init(); 1034 1035 x86_pmu.disable(hwc, idx); 1036 1037 cpuc->counters[idx] = counter; 1038 set_bit(idx, cpuc->active_mask); 1039 1040 x86_perf_counter_set_period(counter, hwc, idx); 1041 x86_pmu.enable(hwc, idx); 1042 1302 1303 void perf_counters_lapic_init(void) 1304 { 1305 if (!x86_pmu_initialized()) 1306 return; 1307 1308 /* 1309 * Always use NMI for PMU 1310 */ 1311 apic_write(APIC_LVTPC, APIC_DM_NMI); 1312 } |