Hi Stefane,
Attached is a patch for PPC32. You should apply this to an already
patched kernel.
This fixes the 32 bit build of PPC32. I have successfully booted the
patch on a 2.6.19.1 kernel on an ibook (however I am unable to get by
mounting of a root fs due to LVM2/MD/initrd issues), but everything does
work up until that point.
BTW, if you've ever used yaboot and the build process on PPC32, it
sucks. ;-)
Phil
diff -rbc linux-2.6.19.1.orig/arch/powerpc/perfmon/Kconfig linux-2.6.19.1/arch/powerpc/perfmon/Kconfig
*** linux-2.6.19.1.orig/arch/powerpc/perfmon/Kconfig 2006-12-23 11:12:48.000000000 -0500
--- linux-2.6.19.1/arch/powerpc/perfmon/Kconfig 2006-12-23 11:11:13.000000000 -0500
***************
*** 16,22 ****
config PERFMON_POWER5
tristate "Support for Power5 hardware performance counters"
! depends on PERFMON
default n
help
Enables support for the Power 5 hardware performance counters
--- 16,22 ----
config PERFMON_POWER5
tristate "Support for Power5 hardware performance counters"
! depends on PERFMON && PPC64
default n
help
Enables support for the Power 5 hardware performance counters
***************
*** 24,30 ****
config PERFMON_PPC32
tristate "Support for PPC32 hardware performance counters"
! depends on PERFMON
default n
help
Enables support for the PPC32 hardware performance counters
--- 24,30 ----
config PERFMON_PPC32
tristate "Support for PPC32 hardware performance counters"
! depends on PERFMON && PPC32
default n
help
Enables support for the PPC32 hardware performance counters
diff -rbc linux-2.6.19.1.orig/arch/powerpc/perfmon/perfmon.c linux-2.6.19.1/arch/powerpc/perfmon/perfmon.c
*** linux-2.6.19.1.orig/arch/powerpc/perfmon/perfmon.c 2006-12-23 11:12:48.000000000 -0500
--- linux-2.6.19.1/arch/powerpc/perfmon/perfmon.c 2006-12-23 00:07:08.000000000 -0500
***************
*** 34,55 ****
struct pfm_event_set *set)
{
u64 new_val, wmask;
! unsigned long *used_mask;
unsigned int i, max;
max = pfm_pmu_conf->max_cnt_pmd;
used_mask = set->used_pmds;
wmask = 1ULL << pfm_pmu_conf->counter_width;
for (i = 0; i < max; i++) {
/* assume all PMD are counters */
! if (pfm_bv_isset(used_mask, i)) {
new_val = pfm_arch_read_pmd(ctx, i);
! PFM_DBG_ovfl("pmd%u new_val=0x%lx bit=%d",
! i, new_val, (new_val&wmask) ? 1 : 0);
! if (!(new_val & wmask)) {
pfm_bv_set(set->povfl_pmds, i);
set->npend_ovfls++;
}
--- 34,59 ----
struct pfm_event_set *set)
{
u64 new_val, wmask;
! u64 *used_mask, *cnt_pmds;
! u64 mask[PFM_PMD_BV];
unsigned int i, max;
max = pfm_pmu_conf->max_cnt_pmd;
+ cnt_pmds = pfm_pmu_conf->cnt_pmds;
used_mask = set->used_pmds;
wmask = 1ULL << pfm_pmu_conf->counter_width;
+ bitmap_and((unsigned long *)mask,(unsigned long *)cnt_pmds,(unsigned long *)used_mask,max);
for (i = 0; i < max; i++) {
/* assume all PMD are counters */
! if (pfm_bv_isset(mask, i)) {
new_val = pfm_arch_read_pmd(ctx, i);
! PFM_DBG_ovfl("pmd%u new_val=0x%llx bit=%d\n",
! i, (unsigned long long)new_val,
! (new_val&wmask) ? 1 : 0);
! if (new_val & wmask) {
pfm_bv_set(set->povfl_pmds, i);
set->npend_ovfls++;
}
***************
*** 101,122 ****
return 1;
}
- /*
- * Called from pfm_stop() and idle notifier
- *
- * Interrupts are masked. Context is locked. Set is the active set.
- *
- * For per-thread:
- * task is not necessarily current. If not current task, then
- * task is guaranteed stopped and off any cpu. Access to PMU
- * is not guaranteed. Interrupts are masked. Context is locked.
- * Set is the active set.
- *
- * For system-wide:
- * task is current
- *
- * must disable active monitoring.
- */
void pfm_arch_stop(struct task_struct *task, struct pfm_context *ctx,
struct pfm_event_set *set)
{
--- 105,110 ----
***************
*** 127,132 ****
--- 115,149 ----
pfm_stop_active(task, ctx, set);
}
+ /*
+ * function called from pfm_unload_context_*(). Context is locked.
+ * interrupts are masked. task is not guaranteed to be current task.
+ * Access to PMU is not guaranteed.
+ *
+ * function must do whatever arch-specific action is required on unload
+ * of a context.
+ *
+ * called for both system-wide and per-thread. task is NULL for ssytem-wide
+ */
+ void pfm_arch_unload_context(struct pfm_context *ctx, struct task_struct *task)
+ {
+ }
+
+ /*
+ * called from pfm_start() or pfm_ctxsw() when idle task and
+ * EXCL_IDLE is on.
+ *
+ * Interrupts are masked. Context is locked. Set is the active set.
+ *
+ * For per-trhead:
+ * Task is not necessarily current. If not current task, then task
+ * is guaranteed stopped and off any cpu. Access to PMU is not guaranteed.
+ *
+ * For system-wide:
+ * task is always current
+ *
+ * must enable active monitoring.
+ */
static void __pfm_arch_start(struct task_struct *task, struct pfm_context *ctx,
struct pfm_event_set *set)
{
***************
*** 143,163 ****
}
}
- /*
- * called from pfm_start() and idle notifier
- *
- * Interrupts are masked. Context is locked. Set is the active set.
- *
- * For per-thread:
- * Task is not necessarily current. If not current task, then task
- * is guaranteed stopped and off any cpu. No access to PMU is task
- * is not current.
- *
- * For system-wide:
- * task is always current
- *
- * must enable active monitoring.
- */
void pfm_arch_start(struct task_struct *task, struct pfm_context *ctx,
struct pfm_event_set *set)
{
--- 160,165 ----
***************
*** 245,274 ****
pfm_arch_write_pmc(ctx, i, set->pmcs[i]);
}
- #if 0
- asmlinkage void pfm_intr_handler(struct pt_regs *regs)
- {
- pfm_interrupt_handler(instruction_pointer(regs), regs);
- }
- #endif
-
- extern void ppc64_enable_pmcs(void);
-
- void pfm_arch_init_percpu(void)
- {
- #ifdef CONFIG_PPC64
- ppc64_enable_pmcs();
- #endif
- }
- /*
- * called from __pfm_interrupt_handler(). ctx is not NULL.
- * ctx is locked. PMU interrupt is masked.
- *
- * must stop all monitoring to ensure handler has consistent view.
- * must collect overflowed PMDs bitmask into povfls_pmds and
- * npend_ovfls. If no interrupt detected then npend_ovfls
- * must be set to zero.
- */
void pfm_arch_intr_freeze_pmu(struct pfm_context *ctx)
{
/*
--- 247,252 ----
***************
*** 287,293 ****
*/
void pfm_arch_intr_unfreeze_pmu(struct pfm_context *ctx)
{
! if (!ctx)
return;
pfm_arch_restore_pmcs(ctx, ctx->active_set);
}
--- 265,271 ----
*/
void pfm_arch_intr_unfreeze_pmu(struct pfm_context *ctx)
{
! if (ctx == NULL)
return;
pfm_arch_restore_pmcs(ctx, ctx->active_set);
}
***************
*** 295,301 ****
void pfm_arch_mask_monitoring(struct pfm_context *ctx)
{
/*
! * on ppc64 masking/unmasking uses start/stop
* mechanism
*/
pfm_arch_stop(current, ctx, ctx->active_set);
--- 273,279 ----
void pfm_arch_mask_monitoring(struct pfm_context *ctx)
{
/*
! * on mips64 masking/unmasking uses start/stop
* mechanism
*/
pfm_arch_stop(current, ctx, ctx->active_set);
***************
*** 304,324 ****
void pfm_arch_unmask_monitoring(struct pfm_context *ctx)
{
/*
! * on ppc64 masking/unmasking uses start/stop
* mechanism
*/
__pfm_arch_start(current, ctx, ctx->active_set);
}
! #if 0
! /*
! * invoked from arch/ppc64/kernel.entry.S
! */
! void ppc64_pfm_handle_work(void)
{
! pfm_handle_work();
! }
#endif
char *pfm_arch_get_pmu_module_name(void)
{
--- 282,301 ----
void pfm_arch_unmask_monitoring(struct pfm_context *ctx)
{
/*
! * on mips64 masking/unmasking uses start/stop
* mechanism
*/
__pfm_arch_start(current, ctx, ctx->active_set);
}
! void pfm_arch_init_percpu(void)
{
! #ifdef CONFIG_PPC64
! extern void ppc64_enable_pmcs(void);
!
! ppc64_enable_pmcs();
#endif
+ }
char *pfm_arch_get_pmu_module_name(void)
{
Only in linux-2.6.19.1/arch/powerpc/perfmon/: perfmon.c~
diff -rbc linux-2.6.19.1.orig/arch/powerpc/perfmon/perfmon_ppc32.c linux-2.6.19.1/arch/powerpc/perfmon/perfmon_ppc32.c
*** linux-2.6.19.1.orig/arch/powerpc/perfmon/perfmon_ppc32.c 2006-12-23 11:12:48.000000000 -0500
--- linux-2.6.19.1/arch/powerpc/perfmon/perfmon_ppc32.c 2006-12-23 00:13:38.000000000 -0500
***************
*** 27,32 ****
--- 27,33 ----
*/
#include <linux/module.h>
#include <linux/perfmon.h>
+ #include <asm/reg.h>
MODULE_AUTHOR("Philip Mucci <[EMAIL PROTECTED]>");
MODULE_DESCRIPTION("PPC32 PMU description table");
***************
*** 41,47 ****
static struct pfm_reg_desc pfm_ppc32_pmc_desc[]={
/* mmcr0 */ PMC_D(PFM_REG_I, "MMCR0", 0x0, 0, 0, SPRN_MMCR0),
/* mmcr1 */ PMC_D(PFM_REG_I, "MMCR1", 0x0, 0, 0, SPRN_MMCR1),
! /* mmcra */ PMC_D(PFM_REG_I, "MMCRA", 0x0, 0, 0, SPRN_MMCRA)
};
#define PFM_PM_NUM_PMCS (sizeof(pfm_ppc32_pmc_desc)/sizeof(struct pfm_reg_desc))
--- 42,48 ----
static struct pfm_reg_desc pfm_ppc32_pmc_desc[]={
/* mmcr0 */ PMC_D(PFM_REG_I, "MMCR0", 0x0, 0, 0, SPRN_MMCR0),
/* mmcr1 */ PMC_D(PFM_REG_I, "MMCR1", 0x0, 0, 0, SPRN_MMCR1),
! /* mmcr2 */ PMC_D(PFM_REG_I, "MMCR2", 0x0, 0, 0, SPRN_MMCR2)
};
#define PFM_PM_NUM_PMCS (sizeof(pfm_ppc32_pmc_desc)/sizeof(struct pfm_reg_desc))
***************
*** 78,84 ****
u32 mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
! mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCjCE | MMCR0_PMXE);
mtspr(SPRN_MMCR0, mmcr0);
}
--- 79,85 ----
u32 mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
! mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
mtspr(SPRN_MMCR0, mmcr0);
}
Only in linux-2.6.19.1/arch/powerpc/perfmon/: perfmon_ppc32.c~
diff -rbc linux-2.6.19.1.orig/include/asm-powerpc/perfmon.h linux-2.6.19.1/include/asm-powerpc/perfmon.h
*** linux-2.6.19.1.orig/include/asm-powerpc/perfmon.h 2006-12-23 11:12:48.000000000 -0500
--- linux-2.6.19.1/include/asm-powerpc/perfmon.h 2006-12-23 00:14:50.000000000 -0500
***************
*** 80,88 ****
--- 80,94 ----
case SPRN_MMCR1:
mtspr(SPRN_MMCR1, value);
break;
+ #ifdef CONFIG_PPC64
case SPRN_MMCRA:
mtspr(SPRN_MMCRA, value);
break;
+ #else
+ case SPRN_MMCR2:
+ mtspr(SPRN_MMCR2, value);
+ break;
+ #endif
default:
BUG();
}
***************
*** 95,102 ****
--- 101,113 ----
return mfspr(SPRN_MMCR0);
case SPRN_MMCR1:
return mfspr(SPRN_MMCR1);
+ #ifdef CONFIG_PPC64
case SPRN_MMCRA:
return mfspr(SPRN_MMCRA);
+ #else
+ case SPRN_MMCR2:
+ return mfspr(SPRN_MMCR2);
+ #endif
default:
BUG();
}
***************
*** 127,138 ****
--- 138,151 ----
case SPRN_PMC6:
mtspr(SPRN_PMC6, value);
break;
+ #ifdef CONFIG_PPC64
case SPRN_PMC7:
mtspr(SPRN_PMC7, value);
break;
case SPRN_PMC8:
mtspr(SPRN_PMC8, value);
break;
+ #endif
default:
BUG();
}
***************
*** 153,164 ****
--- 166,180 ----
return mfspr(SPRN_PMC5);
case SPRN_PMC6:
return mfspr(SPRN_PMC6);
+ #ifdef CONFIG_PPC64
case SPRN_PMC7:
return mfspr(SPRN_PMC7);
case SPRN_PMC8:
return mfspr(SPRN_PMC8);
+ #endif
default:
BUG();
+ return 0ULL;
}
}
***************
*** 171,179 ****
unsigned int cnum)
{
u64 val;
-
val = pfm_arch_read_pmd(ctx, cnum);
-
/* This masks out overflow bit 31 */
pfm_arch_write_pmd(ctx, cnum, val);
}
--- 187,193 ----
***************
*** 197,205 ****
static inline void pfm_arch_ctxswout_sys(struct task_struct *task,
struct pfm_context *ctx,
struct pfm_event_set *set)
! {
! mtspr(SPRN_MMCR0, MMCR0_FC);
! }
static inline void pfm_arch_ctxswin_sys(struct task_struct *task,
struct pfm_context *ctx, struct pfm_event_set *set)
--- 211,217 ----
static inline void pfm_arch_ctxswout_sys(struct task_struct *task,
struct pfm_context *ctx,
struct pfm_event_set *set)
! {}
static inline void pfm_arch_ctxswin_sys(struct task_struct *task,
struct pfm_context *ctx, struct pfm_event_set *set)
***************
*** 209,215 ****
struct pfm_context *ctx, struct pfm_event_set *set)
{}
- void pfm_arch_init_percpu(void);
int pfm_arch_is_monitoring_active(struct pfm_context *ctx);
int pfm_arch_ctxswout_thread(struct task_struct *task, struct pfm_context *ctx,
struct pfm_event_set *set);
--- 221,226 ----
***************
*** 219,226 ****
struct pfm_event_set *set);
void pfm_arch_restore_pmds(struct pfm_context *ctx, struct pfm_event_set *set);
void pfm_arch_restore_pmcs(struct pfm_context *ctx, struct pfm_event_set *set);
- int pfm_arch_get_ovfl_pmds(struct pfm_context *ctx,
- struct pfm_event_set *set);
void pfm_arch_intr_freeze_pmu(struct pfm_context *ctx);
void pfm_arch_intr_unfreeze_pmu(struct pfm_context *ctx);
char *pfm_arch_get_pmu_module_name(void);
--- 230,235 ----
***************
*** 241,248 ****
return 0;
}
! static inline void pfm_arch_unload_context(struct pfm_context *ctx, struct task_struct *task)
! {}
/*
* function called from pfm_setfl_sane(). Context is locked
--- 250,256 ----
return 0;
}
! void pfm_arch_unload_context(struct pfm_context *ctx, struct task_struct *task);
/*
* function called from pfm_setfl_sane(). Context is locked
***************
*** 260,270 ****
--- 268,308 ----
return 0;
}
+ static inline void pfm_arch_stop_thread(struct task_struct *task,
+ struct pfm_context *ctx,
+ struct pfm_event_set *set)
+ {
+ pfm_arch_stop(task, ctx, set);
+ }
+
+ static inline void pfm_arch_start_thread(struct task_struct *task,
+ struct pfm_context *ctx,
+ struct pfm_event_set *set)
+ {
+ pfm_arch_start(task, ctx, set);
+ }
+
+ static inline void pfm_arch_stop_sys(struct task_struct *task,
+ struct pfm_context *ctx,
+ struct pfm_event_set *set)
+ {
+ pfm_arch_stop(task, ctx, set);
+ }
+
+ static inline void pfm_arch_start_sys(struct task_struct *task,
+ struct pfm_context *ctx,
+ struct pfm_event_set *set)
+ {
+ pfm_arch_start(task, ctx, set);
+ }
+
static inline int pfm_arch_init(void)
{
return 0;
}
+ void pfm_arch_init_percpu(void);
+
static inline int pfm_arch_load_context(struct pfm_context *ctx,
struct task_struct *task)
{
Only in linux-2.6.19.1/include/asm-powerpc/: perfmon.h~
_______________________________________________
perfmon mailing list
[email protected]
http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/