Stephane Eranian wrote:
Will,

I got OProfile to work on perfmon v2.3 all IA-64 and X86. I need to
clean the user level code a little bit. I would appreciate if you could
take a look at it.

There is one thing I am not so sure about at this point: how does OProfile
handle unavailable counters when it assigns event -> counters,
take the configuration with the NMI watchdog, for instance.

Do you know anything about this?

Yes, I developed a patch to handle this for OProfile to work with some registers being reserved for watchdog. However, I seemed to have forgotten to post the patch. I have attached the patch to this email.


The way I have it setup right now does not use libpfm for event -> counter
assignment. I just use it for interface definitions and perfmon syscalls.
So I rely on OProfile to map events to OProfile counters, then I map
those onto perfmon PMC/PMD registers.

The other thing I noticed is that Montecito events are not available.

I guess no one has taken the time to enter the Montecito events into OProfile.

-Will
Index: libop/op_alloc_counter.c
===================================================================
RCS file: /cvsroot/oprofile/oprofile/libop/op_alloc_counter.c,v
retrieving revision 1.7
diff -u -r1.7 op_alloc_counter.c
--- libop/op_alloc_counter.c	17 Nov 2006 18:20:38 -0000	1.7
+++ libop/op_alloc_counter.c	18 Jan 2007 20:41:50 -0000
@@ -12,6 +12,8 @@
  */
 
 #include <stdlib.h>
+#include <ctype.h>
+#include <dirent.h>
 
 #include "op_events.h"
 #include "op_libiberty.h"
@@ -143,6 +145,42 @@
 	return 0;
 }
 
+/* determine which directories are counter directories
+ */
+static int perfcounterdir(const struct dirent * entry)
+{
+	return (isdigit(entry->d_name[0]));
+}
+
+
+/**
+ * @param mask pointer where to place bit mask of unavailable counters
+ *
+ * return >= 0 number of counters that are available
+ *        < 0  could not determine number of counters
+ *
+ */
+static int op_get_counter_mask(u32 * mask)
+{
+	struct dirent **counterlist;
+	int count, i;
+	/* assume nothing is available */
+	u32 available=0;
+
+	count = scandir("/dev/oprofile", &counterlist, perfcounterdir,
+			alphasort);
+	if (count < 0)
+		/* unable to determine bit mask */
+		return -1;
+	/* convert to bit map (0 where counter exists) */
+	for (i=0; i<count; ++i) {
+		available |= 1 << atoi(counterlist[i]->d_name);
+		free(counterlist[i]);
+	}
+	*mask=~available;
+	free(counterlist);
+	return count;
+}
 
 size_t * map_event_to_counter(struct op_event const * pev[], int nr_events,
                               op_cpu cpu_type)
@@ -150,8 +188,11 @@
 	counter_arc_head * ctr_arc;
 	size_t * counter_map;
 	int nr_counters;
+	u32 unavailable_counters = 0;
 
-	nr_counters = op_get_nr_counters(cpu_type);
+	nr_counters = op_get_counter_mask(&unavailable_counters);
+	if (nr_counters < 0) 
+		nr_counters = op_get_nr_counters(cpu_type);
 	if (nr_counters < nr_events)
 		return 0;
 
@@ -159,7 +200,8 @@
 
 	counter_map = xmalloc(nr_counters * sizeof(size_t));
 
-	if (!allocate_counter(ctr_arc, nr_events, 0, 0, counter_map)) {
+	if (!allocate_counter(ctr_arc, nr_events, 0, unavailable_counters,
+			      counter_map)) {
 		free(counter_map);
 		counter_map = 0;
 	}
_______________________________________________
perfmon mailing list
[email protected]
http://www.hpl.hp.com/hosted/linux/mail-archives/perfmon/

Reply via email to