On Tue, May 12, 2020 at 11:19:18AM +0200, Peter Zijlstra wrote:

> Now, your arm64 counter is 56 bits, so wrapping is rare, but still, we
> should probably fix that. And that probably needs an ABI extention
> *sigh*.

Something like so, on top of the other (2).

---

--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1174,6 +1174,7 @@ void arch_perf_update_userpage(struct pe
 
        userpg->cap_user_time = 0;
        userpg->cap_user_time_zero = 0;
+       userpg->cap_user_time_short = 0;
 
        do {
                rd = sched_clock_read_begin(&seq);
@@ -1184,13 +1185,16 @@ void arch_perf_update_userpage(struct pe
                userpg->time_mult = rd->mult;
                userpg->time_shift = rd->shift;
                userpg->time_zero = rd->epoch_ns;
+               userpg->time_cycle = rd->epoch_cyc;
+               userpg->time_mask = rd->sched_clock_mask;
 
                /*
                 * This isn't strictly correct, the ARM64 counter can be
-                * 'short' and then we get funnies when it wraps. The correct
-                * thing would be to extend the perf ABI with a cycle and mask
-                * value, but because wrapping on ARM64 is very rare in
-                * practise this 'works'.
+                * 'short' and then we get funnies when it wraps. But this
+                * 'works' with the cap_user_time ABI.
+                *
+                * When userspace knows about cap_user_time_short, it
+                * can do the correct thing.
                 */
                userpg->time_zero -= (rd->epoch_cyc * rd->mult) >> rd->shift;
 
@@ -1215,4 +1219,5 @@ void arch_perf_update_userpage(struct pe
         */
        userpg->cap_user_time = 1;
        userpg->cap_user_time_zero = 1;
+       userpg->cap_user_time_short = 1;
 }
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -532,9 +532,10 @@ struct perf_event_mmap_page {
                                cap_bit0_is_deprecated  : 1, /* Always 1, 
signals that bit 0 is zero */
 
                                cap_user_rdpmc          : 1, /* The RDPMC 
instruction can be used to read counts */
-                               cap_user_time           : 1, /* The time_* 
fields are used */
+                               cap_user_time           : 1, /* The 
time_{shift,mult,offset} fields are used */
                                cap_user_time_zero      : 1, /* The time_zero 
field is used */
-                               cap_____res             : 59;
+                               cap_user_time_short     : 1, /* the 
time_{cycle,mask} fields are used */
+                               cap_____res             : 58;
                };
        };
 
@@ -593,13 +594,29 @@ struct perf_event_mmap_page {
         *               ((rem * time_mult) >> time_shift);
         */
        __u64   time_zero;
+
        __u32   size;                   /* Header size up to __reserved[] 
fields. */
+       __u32   __reserved_1;
+
+       /*
+        * If cap_usr_time_short, the hardware clock is less than 64bit wide
+        * and we must compute the 'cyc' value, as used by cap_usr_time, as:
+        *
+        *   cyc = time_cycles + ((cyc - time_cycles) & time_mask)
+        *
+        * NOTE: this form is explicitly chosen such that cap_usr_time_short
+        *       is an extention on top of cap_usr_time, and code that doesn't
+        *       know about cap_usr_time_short still works under the assumption
+        *       the counter doesn't wrap.
+        */
+       __u64   time_cycles;
+       __u64   time_mask;
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u8    __reserved[118*8+4];    /* align to 1k. */
+       __u8    __reserved[116*8];      /* align to 1k. */
 
        /*
         * Control data for the mmap() data buffer.

Reply via email to