Robert,
I have applied this patch to my tree.
On Mon, Jul 02, 2007 at 05:58:05PM +0200, Robert Richter wrote:
> Signed-off-by: Robert Richter <[EMAIL PROTECTED]>
>
> ---
> arch/i386/perfmon/perfmon.c | 23 ++++++++++++-----------
> arch/x86_64/perfmon/perfmon_k8.c | 14 ++++----------
> include/asm-i386/msr-index.h | 2 +-
> include/asm-i386/perfmon.h | 17 ++++++++---------
> perfmon/perfmon_intr.c | 22 +++++++++++-----------
> perfmon/perfmon_syscalls.c | 4 ++--
> 6 files changed, 38 insertions(+), 44 deletions(-)
>
> Index: linux-2.6.22-rc4/arch/i386/perfmon/perfmon.c
> ===================================================================
> --- linux-2.6.22-rc4.orig/arch/i386/perfmon/perfmon.c
> +++ linux-2.6.22-rc4/arch/i386/perfmon/perfmon.c
> @@ -119,8 +119,9 @@ void __pfm_read_reg_p4(const struct pfm_
> */
> *val &= PFM_ESCR_RSVD;
> }
> - } else
> - *val = 0;
> + } else {
> + *val = 0;
> + }
> }
>
> /*
> @@ -177,7 +178,7 @@ void pfm_arch_intr_unfreeze_pmu(struct p
> * must reactivate monitoring
> */
> void pfm_arch_ctxswin_thread(struct task_struct *task, struct pfm_context
> *ctx,
> - struct pfm_event_set *set)
> + struct pfm_event_set *set)
> {
> struct pfm_arch_context *ctx_arch;
>
> @@ -266,11 +267,11 @@ static int pfm_stop_save_p6(struct pfm_c
> static int pfm_stop_save_amd64(struct pfm_context *ctx,
> struct pfm_event_set *set)
> {
> - return pfm_stop_save_p6(ctx, set);
> + return pfm_stop_save_p6(ctx, set);
> }
>
> static int pfm_stop_save_intel_core(struct pfm_context *ctx,
> - struct pfm_event_set *set)
> + struct pfm_event_set *set)
> {
> struct pfm_arch_pmu_info *arch_info = pfm_pmu_conf->arch_info;
> struct pfm_arch_context *ctx_arch;
> @@ -363,7 +364,7 @@ static int pfm_stop_save_intel_core(stru
>
> if (ds->pebs_index >= ds->pebs_intr_thres
> && test_bit(arch_info->pebs_ctr_idx,
> - cast_ulp(set->used_pmds))) {
> + cast_ulp(set->used_pmds))) {
> __set_bit(arch_info->pebs_ctr_idx,
> cast_ulp(set->povfl_pmds));
> set->npend_ovfls++;
> @@ -529,7 +530,7 @@ void pfm_arch_stop(struct task_struct *t
> * nothing to do here
> */
> if (task == current)
> - pfm_stop_save(ctx, set);
> + pfm_stop_save(ctx, set);
> }
>
> /*
> @@ -595,7 +596,7 @@ void pfm_arch_start(struct task_struct *
> * cannot restore PMC if no access to PMU. Will be done
> * when the thread is switched back in
> */
> - if (task != current)
> + if (task != current)
> return;
>
> ctx_arch = pfm_ctx_arch(ctx);
> @@ -1179,7 +1180,7 @@ static void pfm_arch_pmu_release_percpu(
>
>
> /*
> - * called from pfm_acquire_pmu() with
> + * called from pfm_pmu_acquire() with
> * pfm_pmu_conf.regs copied from pfm_pmu_conf.full_regs
> * needs to adjust regs to match current PMU availabilityy
> *
> @@ -1228,8 +1229,8 @@ int pfm_arch_pmu_acquire(void)
> __clear_bit(i, cast_ulp(pfm_pmu_conf->regs.pmcs));
> } else {
> if (pc[i].reg_type & PFM_REGT_EN) {
> - __set_bit(i, cast_ulp(arch_info->enable_mask));
> - ena++;
> + __set_bit(i, cast_ulp(arch_info->enable_mask));
> + ena++;
> arch_info->max_ena = i + 1;
> }
> }
> Index: linux-2.6.22-rc4/arch/x86_64/perfmon/perfmon_k8.c
> ===================================================================
> --- linux-2.6.22-rc4.orig/arch/x86_64/perfmon/perfmon_k8.c
> +++ linux-2.6.22-rc4/arch/x86_64/perfmon/perfmon_k8.c
> @@ -65,13 +65,7 @@ static struct pfm_arch_pmu_info pfm_k8_p
> | (1ULL<<20) \
> | (1ULL<<21))
>
> -/*
> - * force Local APIC interrupt on overflow
> - */
> -#define PFM_K8_VAL (1ULL<<20)
> -#define PFM_K8_NO64 (1ULL<<20)
> -
> -static struct pfm_regmap_desc pfm_k8_pmc_desc[]={
> +static struct pfm_regmap_desc pfm_k8_pmc_desc[] = {
> /* pmc0 */ PMC_D(PFM_REG_I64, "PERFSEL0", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL0),
> /* pmc1 */ PMC_D(PFM_REG_I64, "PERFSEL1", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL1),
> /* pmc2 */ PMC_D(PFM_REG_I64, "PERFSEL2", PFM_K8_VAL, PFM_K8_RSVD,
> PFM_K8_NO64, MSR_K7_EVNTSEL2),
> @@ -125,7 +119,7 @@ static int pfm_k8_acquire_nb(struct pfm_
> else
> entry = &pfm_nb_task_owner;
>
> - old = cmpxchg(entry, NULL, ctx);
> + old = cmpxchg(entry, NULL, ctx);
> if (!old) {
> if (ctx->flags.system)
> PFM_DBG("acquired Northbridge event access on socket
> %u", proc_id);
> @@ -217,7 +211,7 @@ static int pfm_k8_unload_context(struct
> else
> entry = &pfm_nb_task_owner;
>
> - old = cmpxchg(entry, ctx, NULL);
> + old = cmpxchg(entry, ctx, NULL);
> if (old == ctx) {
> if (ctx->flags.system)
> PFM_DBG("released NorthBridge on socket %u", proc_id);
> @@ -329,7 +323,7 @@ static struct pfm_pmu_config pfm_k8_pmu_
> .flags = PFM_PMU_BUILTIN_FLAG,
> .owner = THIS_MODULE
> };
> -
> +
> static int __init pfm_k8_pmu_init_module(void)
> {
> return pfm_pmu_register(&pfm_k8_pmu_conf);
> Index: linux-2.6.22-rc4/include/asm-i386/msr-index.h
> ===================================================================
> --- linux-2.6.22-rc4.orig/include/asm-i386/msr-index.h
> +++ linux-2.6.22-rc4/include/asm-i386/msr-index.h
> @@ -86,7 +86,7 @@
> #define MSR_AMD64_IBSOPDATA3 0xc0011037
> #define MSR_AMD64_IBSDCLINAD 0xc0011038
> #define MSR_AMD64_IBSDCPHYSAD 0xc0011039
> -#define MSR_AMD64_IBSCTL 0xc001103A
> +#define MSR_AMD64_IBSCTL 0xc001103a
>
> /* K8 MSRs */
> #define MSR_K8_TOP_MEM1 0xc001001a
> Index: linux-2.6.22-rc4/include/asm-i386/perfmon.h
> ===================================================================
> --- linux-2.6.22-rc4.orig/include/asm-i386/perfmon.h
> +++ linux-2.6.22-rc4/include/asm-i386/perfmon.h
> @@ -178,7 +178,6 @@ static inline u64 pfm_arch_read_pmd(stru
> {
> struct pfm_arch_pmu_info *arch_info = pfm_pmu_conf->arch_info;
> u64 tmp;
> -
> if (arch_info->pmu_style == PFM_X86_PMU_P4)
> __pfm_read_reg_p4(&arch_info->pmd_addrs[cnum], &tmp);
> else
> @@ -197,7 +196,7 @@ static inline u64 pfm_arch_read_pmc(stru
> __pfm_read_reg_p4(&arch_info->pmc_addrs[cnum], &tmp);
> else
> rdmsrl(pfm_pmu_conf->pmc_desc[cnum].hw_addr, tmp);
> - PFM_DBG_ovfl("pfm_arch_read_pmc(0x%016Lx) = 0x%016Lx",
> + PFM_DBG_ovfl("pfm_arch_read_pmc(0x%016Lx) = 0x%016Lx",
> (unsigned long long) pfm_pmu_conf->pmc_desc[cnum].hw_addr,
> (unsigned long long) tmp);
> return tmp;
> @@ -218,12 +217,12 @@ static inline int pfm_arch_is_active(str
> }
>
> static inline void pfm_arch_ctxswout_sys(struct task_struct *task,
> - struct pfm_context *ctx,
> + struct pfm_context *ctx,
> struct pfm_event_set *set)
> {}
>
> static inline void pfm_arch_ctxswin_sys(struct task_struct *task,
> - struct pfm_context *ctx,
> + struct pfm_context *ctx,
> struct pfm_event_set *set)
> {}
>
> @@ -236,11 +235,11 @@ static inline void pfm_arch_init_percpu(
> {}
>
> int pfm_arch_ctxswout_thread(struct task_struct *task,
> - struct pfm_context *ctx,
> + struct pfm_context *ctx,
> struct pfm_event_set *set);
>
> void pfm_arch_ctxswin_thread(struct task_struct *task,
> - struct pfm_context *ctx,
> + struct pfm_context *ctx,
> struct pfm_event_set *set);
>
> void pfm_arch_stop(struct task_struct *task,
> @@ -256,7 +255,7 @@ void pfm_arch_pmu_config_remove(void);
> char *pfm_arch_get_pmu_module_name(void);
>
> static inline int pfm_arch_unload_context(struct pfm_context *ctx,
> - struct task_struct *task)
> + struct task_struct *task)
> {
> struct pfm_arch_pmu_info *arch_info;
> int ret = 0;
> @@ -269,8 +268,8 @@ static inline int pfm_arch_unload_contex
> }
>
> static inline int pfm_arch_load_context(struct pfm_context *ctx,
> - struct pfm_event_set *set,
> - struct task_struct *task)
> + struct pfm_event_set *set,
> + struct task_struct *task)
> {
> struct pfm_arch_pmu_info *arch_info;
> int ret = 0;
> Index: linux-2.6.22-rc4/perfmon/perfmon_intr.c
> ===================================================================
> --- linux-2.6.22-rc4.orig/perfmon/perfmon_intr.c
> +++ linux-2.6.22-rc4/perfmon/perfmon_intr.c
> @@ -184,14 +184,14 @@ static void pfm_overflow_handler(struct
> max_pmd);
>
> } else {
> - PFM_DBG_ovfl("Hardware counter overflow of
> PMD%d=0x%04Lx",
> - i,
> - (unsigned long long)new_val);
> + PFM_DBG_ovfl("Hardware counter overflow of
> PMD%d=0x%04Lx",
> + i,
> + (unsigned long long)new_val);
> /* only keep track of 64-bit overflows */
> __clear_bit(i, cast_ulp(pend_ovfls));
> /*
> - * on some PMU, it may be necessary to re-arm the PMD
> - */
> + * on some PMU, it may be necessary to re-arm the PMD
> + */
> pfm_arch_ovfl_reset_pmd(ctx, i);
> }
>
> @@ -278,10 +278,10 @@ static void pfm_overflow_handler(struct
> ovfl_arg->pmd_eventid = set->pmds[i].eventid;
>
> /*
> - * copy values of pmds of interest.
> + * copy values of pmds of interest.
> * Sampling format may use them
> * We do not initialize the unused smpl_pmds_values
> - */
> + */
> k = 0;
> smpl_pmds = set->pmds[i].smpl_pmds;
> if (!bitmap_empty(cast_ulp(smpl_pmds), max_pmd)) {
> @@ -313,8 +313,8 @@ static void pfm_overflow_handler(struct
> start_cycles = sched_clock();
>
> /*
> - * call custom buffer format record (handler) routine
> - */
> + * call custom buffer format record (handler) routine
> + */
> ret = (*ctx->smpl_fmt->fmt_handler)(hdr,
> ovfl_arg,
> ip,
> @@ -386,8 +386,8 @@ static void pfm_overflow_handler(struct
> pfm_switch_sets_from_intr(ctx);
>
> /*
> - * update our view of the active set
> - */
> + * update our view of the active set
> + */
> set = ctx->active_set;
>
> must_switch = 0;
> Index: linux-2.6.22-rc4/perfmon/perfmon_syscalls.c
> ===================================================================
> --- linux-2.6.22-rc4.orig/perfmon/perfmon_syscalls.c
> +++ linux-2.6.22-rc4/perfmon/perfmon_syscalls.c
> @@ -34,7 +34,7 @@
> * along with this program; if not, write to the Free Software
> * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
> * 02111-1307 USA
> - */
> + */
> #include <linux/kernel.h>
> #include <linux/perfmon.h>
> #include <linux/fs.h>
> @@ -448,7 +448,7 @@ asmlinkage long sys_pfm_write_pmcs(int f
> int ret, fput_needed;
>
> if (count < 0 || count >= PFM_MAX_ARG_COUNT(ureq)) {
> - PFM_DBG("invalid arg count %d", count);
> + PFM_DBG("invalid arg count %d", count);
> return -EINVAL;
> }
>
>
> --
> AMD Saxony, Dresden, Germany
> Operating System Research Center
> email: [EMAIL PROTECTED]
>
>
>
> _______________________________________________
> perfmon mailing list
> [email protected]
> http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/
--
-Stephane
_______________________________________________
perfmon mailing list
[email protected]
http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/