Robert,
I have applied this patch to my tree.
Thanks.
On Mon, Jul 02, 2007 at 05:58:06PM +0200, Robert Richter wrote:
> Signed-off-by: Robert Richter <[EMAIL PROTECTED]>
>
> ---
> arch/x86_64/perfmon/perfmon_k8.c | 68
> +++++++++++++++++++--------------------
> 1 file changed, 34 insertions(+), 34 deletions(-)
>
> Index: linux-2.6.22-rc4/arch/x86_64/perfmon/perfmon_k8.c
> ===================================================================
> --- linux-2.6.22-rc4.orig/arch/x86_64/perfmon/perfmon_k8.c
> +++ linux-2.6.22-rc4/arch/x86_64/perfmon/perfmon_k8.c
> @@ -32,7 +32,7 @@ static int force_nmi;
> MODULE_PARM_DESC(force_nmi, "bool: force use of NMI for PMU interrupt");
> module_param(force_nmi, bool, 0600);
>
> -static struct pfm_arch_pmu_info pfm_k8_pmu_info = {
> +static struct pfm_arch_pmu_info pfm_amd64_pmu_info = {
> .pmc_addrs = {
> /* pmc0 */ {{MSR_K7_EVNTSEL0, 0}, 0, PFM_REGT_EN},
> /* pmc1 */ {{MSR_K7_EVNTSEL1, 0}, 1, PFM_REGT_EN},
> @@ -65,26 +65,26 @@ static struct pfm_arch_pmu_info pfm_k8_p
> | (1ULL<<20) \
> | (1ULL<<21))
>
> -static struct pfm_regmap_desc pfm_k8_pmc_desc[] = {
> +static struct pfm_regmap_desc pfm_amd64_pmc_desc[] = {
> /* pmc0 */ PMC_D(PFM_REG_I64, "PERFSEL0", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL0),
> /* pmc1 */ PMC_D(PFM_REG_I64, "PERFSEL1", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL1),
> /* pmc2 */ PMC_D(PFM_REG_I64, "PERFSEL2", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL2),
> /* pmc3 */ PMC_D(PFM_REG_I64, "PERFSEL3", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL3),
> };
> -#define PFM_AMD_NUM_PMCS ARRAY_SIZE(pfm_k8_pmc_desc)
> +#define PFM_AMD_NUM_PMCS ARRAY_SIZE(pfm_amd64_pmc_desc)
>
> -static struct pfm_regmap_desc pfm_k8_pmd_desc[] = {
> +static struct pfm_regmap_desc pfm_amd64_pmd_desc[] = {
> /* pmd0 */ PMD_D(PFM_REG_C, "PERFCTR0", MSR_K7_PERFCTR0),
> /* pmd1 */ PMD_D(PFM_REG_C, "PERFCTR1", MSR_K7_PERFCTR1),
> /* pmd2 */ PMD_D(PFM_REG_C, "PERFCTR2", MSR_K7_PERFCTR2),
> /* pmd3 */ PMD_D(PFM_REG_C, "PERFCTR3", MSR_K7_PERFCTR3)
> };
> -#define PFM_AMD_NUM_PMDS ARRAY_SIZE(pfm_k8_pmd_desc)
> +#define PFM_AMD_NUM_PMDS ARRAY_SIZE(pfm_amd64_pmd_desc)
>
> static struct pfm_context **pfm_nb_sys_owners;
> static struct pfm_context *pfm_nb_task_owner;
>
> -static struct pfm_pmu_config pfm_k8_pmu_conf;
> +static struct pfm_pmu_config pfm_amd64_pmu_conf;
>
> /*
> * There can only be one user per socket for the Northbridge (NB) events,
> @@ -103,7 +103,7 @@ static struct pfm_pmu_config pfm_k8_pmu_
> * 0 : successfully acquire NB access
> * < 0: errno, failed to acquire NB access
> */
> -static int pfm_k8_acquire_nb(struct pfm_context *ctx)
> +static int pfm_amd64_acquire_nb(struct pfm_context *ctx)
> {
> struct pfm_context **entry, *old;
> int proc_id;
> @@ -120,7 +120,7 @@ static int pfm_k8_acquire_nb(struct pfm_
> entry = &pfm_nb_task_owner;
>
> old = cmpxchg(entry, NULL, ctx);
> - if (!old) {
> + if (!old) {
> if (ctx->flags.system)
> PFM_DBG("acquired Northbridge event access on socket
> %u", proc_id);
> else
> @@ -141,7 +141,7 @@ static int pfm_k8_acquire_nb(struct pfm_
> *
> * context is locked, interrupts are masked
> */
> -static int pfm_k8_pmc_write_check(struct pfm_context *ctx,
> +static int pfm_amd64_pmc_write_check(struct pfm_context *ctx,
> struct pfm_event_set *set,
> struct pfarg_pmc *req)
> {
> @@ -159,14 +159,14 @@ static int pfm_k8_pmc_write_check(struct
> if (event < 0xee)
> return 0;
>
> - return pfm_k8_acquire_nb(ctx);
> + return pfm_amd64_acquire_nb(ctx);
> }
>
> /*
> * invoked on pfm_load_context().
> * context is locked, interrupts are masked
> */
> -static int pfm_k8_load_context(struct pfm_context *ctx)
> +static int pfm_amd64_load_context(struct pfm_context *ctx)
> {
> struct pfm_event_set *set;
> unsigned int i, n;
> @@ -176,7 +176,7 @@ static int pfm_k8_load_context(struct pf
> */
> list_for_each_entry(set, &ctx->list, list) {
> n = set->nused_pmcs;
> - for(i=0; n; i++) {
> + for (i = 0; n; i++) {
> if (!test_bit(i, cast_ulp(set->used_pmcs)))
> continue;
> if ((set->pmcs[i] & 0xff) >= 0xee)
> @@ -186,13 +186,13 @@ static int pfm_k8_load_context(struct pf
> }
> return 0;
> found:
> - return pfm_k8_acquire_nb(ctx);
> + return pfm_amd64_acquire_nb(ctx);
> }
>
> /*
> * invoked on pfm_unload_context()
> */
> -static int pfm_k8_unload_context(struct pfm_context *ctx)
> +static int pfm_amd64_unload_context(struct pfm_context *ctx)
> {
> struct pfm_context **entry, *old;
> int proc_id;
> @@ -224,7 +224,7 @@ static int pfm_k8_unload_context(struct
> /*
> * detect if we need to active NorthBridge event access control
> */
> -static int pfm_k8_setup_nb_event_control(void)
> +static int pfm_amd64_setup_nb_event_control(void)
> {
> unsigned int c, n = 0;
> unsigned int max_phys = 0;
> @@ -256,21 +256,21 @@ static int pfm_k8_setup_nb_event_control
> /*
> * activate write-checker for PMC registers
> */
> - for(c=0; c < PFM_AMD_NUM_PMCS; c++) {
> - pfm_k8_pmc_desc[c].type |= PFM_REG_WC;
> + for (c = 0; c < PFM_AMD_NUM_PMCS; c++) {
> + pfm_amd64_pmc_desc[c].type |= PFM_REG_WC;
> }
>
> - pfm_k8_pmu_info.load_context = pfm_k8_load_context;
> - pfm_k8_pmu_info.unload_context = pfm_k8_unload_context;
> + pfm_amd64_pmu_info.load_context = pfm_amd64_load_context;
> + pfm_amd64_pmu_info.unload_context = pfm_amd64_unload_context;
>
> - pfm_k8_pmu_conf.pmc_write_check = pfm_k8_pmc_write_check;
> + pfm_amd64_pmu_conf.pmc_write_check = pfm_amd64_pmc_write_check;
>
> PFM_INFO("NorthBridge event access control enabled");
>
> return 0;
> }
>
> -static int pfm_k8_probe_pmu(void)
> +static int pfm_amd64_probe_pmu(void)
> {
> if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) {
> PFM_INFO("not an AMD processor");
> @@ -296,7 +296,7 @@ static int pfm_k8_probe_pmu(void)
> }
>
> if (current_cpu_data.x86_max_cores > 1)
> - return pfm_k8_setup_nb_event_control();
> + return pfm_amd64_setup_nb_event_control();
>
> PFM_INFO("nmi_watchdog=%d nmi_active=%d force_nmi=%d",
> nmi_watchdog, atomic_read(&nmi_active), force_nmi);
> @@ -305,37 +305,37 @@ static int pfm_k8_probe_pmu(void)
> * Actual removal of NMI counter is done by pfm_pmu_acquire()
> */
> if (nmi_watchdog == NMI_LOCAL_APIC || force_nmi)
> - pfm_k8_pmu_info.flags |= PFM_X86_FL_USE_NMI;
> + pfm_amd64_pmu_info.flags |= PFM_X86_FL_USE_NMI;
>
> return 0;
> }
>
> -static struct pfm_pmu_config pfm_k8_pmu_conf = {
> +static struct pfm_pmu_config pfm_amd64_pmu_conf = {
> .pmu_name = "AMD64",
> .counter_width = 47,
> - .pmd_desc = pfm_k8_pmd_desc,
> - .pmc_desc = pfm_k8_pmc_desc,
> + .pmd_desc = pfm_amd64_pmd_desc,
> + .pmc_desc = pfm_amd64_pmc_desc,
> .num_pmc_entries = PFM_AMD_NUM_PMCS,
> .num_pmd_entries = PFM_AMD_NUM_PMDS,
> - .probe_pmu = pfm_k8_probe_pmu,
> + .probe_pmu = pfm_amd64_probe_pmu,
> .version = "1.1",
> - .arch_info = &pfm_k8_pmu_info,
> + .arch_info = &pfm_amd64_pmu_info,
> .flags = PFM_PMU_BUILTIN_FLAG,
> .owner = THIS_MODULE
> };
>
> -static int __init pfm_k8_pmu_init_module(void)
> +static int __init pfm_amd64_pmu_init_module(void)
> {
> - return pfm_pmu_register(&pfm_k8_pmu_conf);
> + return pfm_pmu_register(&pfm_amd64_pmu_conf);
> }
>
> -static void __exit pfm_k8_pmu_cleanup_module(void)
> +static void __exit pfm_amd64_pmu_cleanup_module(void)
> {
> if (pfm_nb_sys_owners)
> vfree(pfm_nb_sys_owners);
>
> - pfm_pmu_unregister(&pfm_k8_pmu_conf);
> + pfm_pmu_unregister(&pfm_amd64_pmu_conf);
> }
>
> -module_init(pfm_k8_pmu_init_module);
> -module_exit(pfm_k8_pmu_cleanup_module);
> +module_init(pfm_amd64_pmu_init_module);
> +module_exit(pfm_amd64_pmu_cleanup_module);
>
> --
> AMD Saxony, Dresden, Germany
> Operating System Research Center
> email: [EMAIL PROTECTED]
>
>
>
> _______________________________________________
> perfmon mailing list
> [email protected]
> http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/
--
-Stephane
_______________________________________________
perfmon mailing list
[email protected]
http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/