Create optional model-specific restore_pmds() and restore_pmcs() entries in
the function table in the pfm_arch_pmu_info structure.

Cell benefits quite a bit from this, since it can combine all the per-counter
"event" registers into one call to the firmware. The other powerpc models
are probably alright with the "default" method of restoring PMCs and PMDs
one at a time.

Signed-off-by: Kevin Corry <[EMAIL PROTECTED]>
Signed-off-by: Carl Love <[EMAIL PROTECTED]>

Index: linux-2.6.21-perfmon1/arch/powerpc/perfmon/perfmon.c
===================================================================
--- linux-2.6.21-perfmon1.orig/arch/powerpc/perfmon/perfmon.c
+++ linux-2.6.21-perfmon1/arch/powerpc/perfmon/perfmon.c
@@ -182,35 +182,25 @@ void pfm_arch_start(struct task_struct *
  */
 void pfm_arch_restore_pmds(struct pfm_context *ctx, struct pfm_event_set *set)
 {
-       u64 ovfl_mask, val, *pmds;
-       u64 *impl_pmds;
-       unsigned int i;
-       unsigned int max_pmd;
+       struct pfm_arch_pmu_info *arch_info = pfm_pmu_conf->arch_info;
+       u64 *pmds, *used_pmds;
+       unsigned int i, max_pmd;
 
-       max_pmd = pfm_pmu_conf->max_pmd;
-       ovfl_mask = pfm_pmu_conf->ovfl_mask;
-       impl_pmds = pfm_pmu_conf->impl_pmds;
-       pmds = set->view->set_pmds;
-
-       /*
-        * must restore all pmds to avoid leaking
-        * information to user.
+       /* The model-specific module can override the default
+        * restore-PMD method.
         */
-       for (i = 0; i < max_pmd; i++) {
-
-               if (!test_bit(i, impl_pmds))
-                       continue;
-
-               val = pmds[i];
+       if (arch_info->restore_pmds) {
+               return arch_info->restore_pmds(set);
+       }
 
-               /*
-                * set upper bits for counter to ensure
-                * overflow will trigger
-                */
-               val &= ovfl_mask;
+       max_pmd = pfm_pmu_conf->max_pmd;
+       used_pmds = set->used_pmds;
+       pmds = set->view->set_pmds;
 
-               pfm_arch_write_pmd(ctx, i, val);
-       }
+       for (i = 0; i < max_pmd; i++)
+               if (test_bit(i, used_pmds) &&
+                   !(pfm_pmu_conf->pmd_desc[i].type & PFM_REG_RO))
+                       pfm_arch_write_pmd(ctx, i, pmds[i]);
 }
 
 /*
@@ -223,20 +213,28 @@ void pfm_arch_restore_pmds(struct pfm_co
  */
 void pfm_arch_restore_pmcs(struct pfm_context *ctx, struct pfm_event_set *set)
 {
+       struct pfm_arch_pmu_info *arch_info;
        u64 *impl_pmcs;
        unsigned int i, max_pmc;
 
-       max_pmc = pfm_pmu_conf->max_pmc;
-       impl_pmcs = pfm_pmu_conf->impl_pmcs;
+       /* The model-specific module can override the default
+        * restore-PMC method.
+        */
+       arch_info = pfm_pmu_conf->arch_info;
+       if (arch_info->restore_pmcs) {
+               return arch_info->restore_pmcs(set);
+       }
 
-       /*
-        * - by default no PMCS measures anything
-        * - on ctxswout, all used PMCs are disabled (cccr enable bit cleared)
-        * hence when masked we do not need to restore anything
+       /* The "common" powerpc model's enable the counters simply by writing
+        * all the control registers. Therefore, if we're masked or stopped we
+        * don't need to bother restoring the PMCs now.
         */
        if (ctx->state == PFM_CTX_MASKED || ctx->flags.started == 0)
                return;
 
+       max_pmc = pfm_pmu_conf->max_pmc;
+       impl_pmcs = pfm_pmu_conf->impl_pmcs;
+
        /*
         * restore all pmcs
         */
Index: linux-2.6.21-perfmon1/arch/powerpc/perfmon/perfmon_cell.c
===================================================================
--- linux-2.6.21-perfmon1.orig/arch/powerpc/perfmon/perfmon_cell.c
+++ linux-2.6.21-perfmon1/arch/powerpc/perfmon/perfmon_cell.c
@@ -289,6 +289,64 @@ static void pfm_cell_disable_counters(st
        cbe_disable_pm(smp_processor_id());
 }
 
+/**
+ * pfm_cell_restore_pmcs
+ *
+ * Write all control register values that are saved in the specified event
+ * set. We could use the pfm_arch_write_pmc() function to restore each PMC
+ * individually (as is done in other architectures), but that results in
+ * multiple RTAS calls. As an optimization, we will setup the RTAS argument
+ * array so we can do all event-control registers in one RTAS call.
+ **/
+void pfm_cell_restore_pmcs(struct pfm_event_set *set)
+{
+       struct cell_rtas_arg signals[NR_CTRS];
+       u64 value, *used_pmcs = set->used_pmcs;
+       int i, rc, num_used = 0, cpu = smp_processor_id();
+
+       memset(signals, 0, sizeof(signals));
+
+       for (i = 0; i < NR_CTRS; i++) {
+               /* Write the per-counter control register. If the PMC is not
+                * in use, then it will simply clear the register, which will
+                * disable the associated counter.
+                */
+               cbe_write_pm07_control(cpu, i, set->pmcs[i]);
+
+               if (test_bit(i + NR_CTRS, used_pmcs)) {
+                       /* Set up the next RTAS array entry for this counter.
+                        * Only include pm07_event registers that are in use
+                        * by this set so the RTAS call doesn't have to
+                        * process blank array entries.
+                        */
+                       value = set->pmcs[i + NR_CTRS];
+                       signals[num_used].cpu = RTAS_CPU(cpu);
+                       signals[num_used].sub_unit = RTAS_SUB_UNIT(value);
+                       signals[num_used].bus_word = 1 << RTAS_BUS_WORD(value);
+                       signals[num_used].bit = RTAS_SIGNAL_NUMBER(value) % 100;
+                       signals[num_used].signal_group =
+                                               RTAS_SIGNAL_NUMBER(value) / 100;
+                       num_used++;
+               }
+       }
+
+       rc = rtas_activate_signals(signals, num_used);
+       if (rc) {
+               PFM_WARN("Error calling rtas_activate_signal(): %d\n", rc);
+               /* FIX: We will also need this routine to be able to return
+                * an error if Stephane agrees to change pfm_arch_write_pmc
+                * to return an error.
+                */
+       }
+
+       /* Write all the global PMCs. Need to call pfm_cell_write_pmc()
+        * instead of cbe_write_pm() due to special handling for the
+        * pm_status register.
+        */
+       for (i *= 2; i < PFM_PM_NUM_PMCS; i++)
+               pfm_cell_write_pmc(i, set->pmcs[i]);
+}
+
 static struct pfm_arch_pmu_info pfm_cell_pmu_info = {
        .pmu_style        = PFM_POWERPC_PMU_CELL,
        .write_pmc        = pfm_cell_write_pmc,
@@ -296,6 +354,7 @@ static struct pfm_arch_pmu_info pfm_cell
        .read_pmd         = pfm_cell_read_pmd,
        .enable_counters  = pfm_cell_enable_counters,
        .disable_counters = pfm_cell_disable_counters,
+       .restore_pmcs     = pfm_cell_restore_pmcs,
 };
 
 static struct pfm_pmu_config pfm_cell_pmu_conf = {
Index: linux-2.6.21-perfmon1/include/asm-powerpc/perfmon.h
===================================================================
--- linux-2.6.21-perfmon1.orig/include/asm-powerpc/perfmon.h
+++ linux-2.6.21-perfmon1/include/asm-powerpc/perfmon.h
@@ -52,6 +52,10 @@ struct pfm_arch_pmu_info {
 
        void (*enable_counters)(struct pfm_event_set *set);
        void (*disable_counters)(struct pfm_event_set *set);
+
+       /* These two are optional. */
+       void (*restore_pmcs)(struct pfm_event_set *set);
+       void (*restore_pmds)(struct pfm_event_set *set);
 };
 
 #ifdef CONFIG_PPC32
_______________________________________________
perfmon mailing list
[email protected]
http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/

Reply via email to