Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-02-08 Thread Liang, Kan




On 2/8/2019 5:39 AM, Thomas Gleixner wrote:

On Thu, 31 Jan 2019, Liang, Kan wrote:

+u64 perf_get_page_size(u64 virt)
+{
+   unsigned long flags;
+   unsigned int level;
+   pte_t *pte;
+
+   if (!virt)
+   return 0;
+
+   /*
+* Interrupts are disabled, so it prevents any tear down
+* of the page tables.
+* See the comment near struct mmu_table_batch.
+*/
+   local_irq_save(flags);
+   if (virt >= TASK_SIZE)
+   pte = lookup_address(virt, );
+   else {
+   if (current->mm)
+   pte = lookup_address_in_pgd(pgd_offset(current->mm,
virt),
+   virt, );


Aside from all the missin {}, I'm fairly sure this is broken since this
happens from NMI context. This can interrupt switch_mm() and things like
use_temporary_mm().

Also; why does this live in the x86 code and not in the generic code?



This is x86 implementation.
In generic code, there is a __weak function. I'll make it clear in the change
log in v4.


No, instead of hiding it in the changelog, split the patch into two:

  #1 Adding the core stuff including the weak function

  #2 Adding the x86 implementation.



Thanks for the comments. I will do it in V5.

Thanks,
Kan


Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-02-08 Thread Thomas Gleixner
On Thu, 31 Jan 2019, Liang, Kan wrote:
> > > +u64 perf_get_page_size(u64 virt)
> > > +{
> > > + unsigned long flags;
> > > + unsigned int level;
> > > + pte_t *pte;
> > > +
> > > + if (!virt)
> > > + return 0;
> > > +
> > > + /*
> > > +  * Interrupts are disabled, so it prevents any tear down
> > > +  * of the page tables.
> > > +  * See the comment near struct mmu_table_batch.
> > > +  */
> > > + local_irq_save(flags);
> > > + if (virt >= TASK_SIZE)
> > > + pte = lookup_address(virt, );
> > > + else {
> > > + if (current->mm)
> > > + pte = lookup_address_in_pgd(pgd_offset(current->mm,
> > > virt),
> > > + virt, );
> > 
> > Aside from all the missin {}, I'm fairly sure this is broken since this
> > happens from NMI context. This can interrupt switch_mm() and things like
> > use_temporary_mm().
> > 
> > Also; why does this live in the x86 code and not in the generic code?
> > 
> 
> This is x86 implementation.
> In generic code, there is a __weak function. I'll make it clear in the change
> log in v4.

No, instead of hiding it in the changelog, split the patch into two:

 #1 Adding the core stuff including the weak function

 #2 Adding the x86 implementation.

Thanks,

tglx


Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-01-31 Thread Will Deacon
On Thu, Jan 31, 2019 at 01:37:25PM +0100, Peter Zijlstra wrote:
> On Wed, Jan 30, 2019 at 06:23:42AM -0800, kan.li...@linux.intel.com wrote:
> > +enum perf_page_size {
> > +   PERF_PAGE_SIZE_NONE,
> > +   PERF_PAGE_SIZE_4K,
> > +   PERF_PAGE_SIZE_8K,
> > +   PERF_PAGE_SIZE_16K,
> > +   PERF_PAGE_SIZE_64K,
> > +   PERF_PAGE_SIZE_2M,
> > +   PERF_PAGE_SIZE_1G,
> > +   PERF_PAGE_SIZE_512G,
> > +};
> 
> Since you have a u64 to store this in, WTH do you use this limited enum?
> Are you very sure this covers all the possible page sizes for all
> architectures?

FWIW, this covers the basic page sizes on arm64, but it doesn't cover all of
the hugepage sizes.

Will


Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-01-31 Thread Liang, Kan




On 1/31/2019 7:37 AM, Peter Zijlstra wrote:

On Wed, Jan 30, 2019 at 06:23:42AM -0800, kan.li...@linux.intel.com wrote:

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 374a197..03bf45d 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2578,3 +2578,45 @@ void perf_get_x86_pmu_capability(struct 
x86_pmu_capability *cap)
cap->events_mask_len = x86_pmu.events_mask_len;
  }
  EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
+
+/*
+ * map x86 page levels to perf page sizes
+ */
+static const enum perf_page_size perf_page_size_map[PG_LEVEL_NUM] = {
+   [PG_LEVEL_NONE] = PERF_PAGE_SIZE_NONE,
+   [PG_LEVEL_4K]   = PERF_PAGE_SIZE_4K,
+   [PG_LEVEL_2M]   = PERF_PAGE_SIZE_2M,
+   [PG_LEVEL_1G]   = PERF_PAGE_SIZE_1G,
+   [PG_LEVEL_512G] = PERF_PAGE_SIZE_512G,
+};
+
+u64 perf_get_page_size(u64 virt)
+{
+   unsigned long flags;
+   unsigned int level;
+   pte_t *pte;
+
+   if (!virt)
+   return 0;
+
+   /*
+* Interrupts are disabled, so it prevents any tear down
+* of the page tables.
+* See the comment near struct mmu_table_batch.
+*/
+   local_irq_save(flags);
+   if (virt >= TASK_SIZE)
+   pte = lookup_address(virt, );
+   else {
+   if (current->mm)
+   pte = lookup_address_in_pgd(pgd_offset(current->mm, 
virt),
+   virt, );


Aside from all the missin {}, I'm fairly sure this is broken since this
happens from NMI context. This can interrupt switch_mm() and things like
use_temporary_mm().

Also; why does this live in the x86 code and not in the generic code?



This is x86 implementation.
In generic code, there is a __weak function. I'll make it clear in the 
change log in v4.


+/* Return page size of given virtual address. IRQ-safe required. */
+u64 __weak perf_get_page_size(u64 virt)
+{
+   return PERF_PAGE_SIZE_NONE;
+}



+   else
+   level = PG_LEVEL_NUM;
+   }
+   local_irq_restore(flags);
+   if (level >= PG_LEVEL_NUM)
+   return PERF_PAGE_SIZE_NONE;
+
+   return (u64)perf_page_size_map[level];
+}



diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 7198ddd..79daacd 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -141,8 +141,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR   = 1U << 18,
PERF_SAMPLE_PHYS_ADDR   = 1U << 19,
+   PERF_SAMPLE_DATA_PAGE_SIZE  = 1U << 20,
  
-	PERF_SAMPLE_MAX = 1U << 20,		/* non-ABI */

+   PERF_SAMPLE_MAX = 1U << 21,   /* non-ABI */
  
  	__PERF_SAMPLE_CALLCHAIN_EARLY		= 1ULL << 63, /* non-ABI; internal use */

  };
@@ -863,6 +864,7 @@ enum perf_event_type {
 *  { u64   abi; # enum perf_sample_regs_abi
 *u64   regs[weight(mask)]; } && 
PERF_SAMPLE_REGS_INTR
 *  { u64   phys_addr;} && PERF_SAMPLE_PHYS_ADDR
+*  { u64   data_page_size;} && 
PERF_SAMPLE_DATA_PAGE_SIZE
 * };
 */
PERF_RECORD_SAMPLE  = 9,
@@ -1150,6 +1152,18 @@ union perf_mem_data_src {
  #define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
  
+

+enum perf_page_size {
+   PERF_PAGE_SIZE_NONE,
+   PERF_PAGE_SIZE_4K,
+   PERF_PAGE_SIZE_8K,
+   PERF_PAGE_SIZE_16K,
+   PERF_PAGE_SIZE_64K,
+   PERF_PAGE_SIZE_2M,
+   PERF_PAGE_SIZE_1G,
+   PERF_PAGE_SIZE_512G,
+};


Since you have a u64 to store this in, WTH do you use this limited enum?
Are you very sure this covers all the possible page sizes for all
architectures?

Why not simply report the page size in bytes?



I will use the page size instead in V4.

Thanks,
Kan


Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-01-31 Thread Andi Kleen
> 
> Aside from all the missin {}, I'm fairly sure this is broken since this
> happens from NMI context. This can interrupt switch_mm() and things like
> use_temporary_mm().

So the concern is that the sample is from before the switch, and then
looks it up in the wrong page tables if the PMI happens after the switch
due to sampling skid?

First this can happen only with PEBS, which doesn't have that
bad worst case skid (perhaps tens of cycles)

I doubt it is very likely because this problem could only happen
for user addresses because kernel page tables don't change. 

But we would be in the middle of the context 
switch (or use_temporary_mm) here, and there should be no
user space accesses within a tens of cycle window 
(except perhaps for the rseq address, but that's not a very
interesting case)

I assume the use_temporary_mm() cases are similar.

I suppose we could enforce flushing the PMU on such context switches,
but I would suspect while it's a valid theoretical problem, 
it's unlikely to be a real problem in practice.

Likely it means that large buffer PEBS cannot be ever used
with this option, but I guess that's ok.

-Andi



Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-01-31 Thread Peter Zijlstra
On Thu, Jan 31, 2019 at 01:37:25PM +0100, Peter Zijlstra wrote:
> On Wed, Jan 30, 2019 at 06:23:42AM -0800, kan.li...@linux.intel.com wrote:
> > diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> > index 374a197..03bf45d 100644
> > --- a/arch/x86/events/core.c
> > +++ b/arch/x86/events/core.c
> > @@ -2578,3 +2578,45 @@ void perf_get_x86_pmu_capability(struct 
> > x86_pmu_capability *cap)
> > cap->events_mask_len= x86_pmu.events_mask_len;
> >  }
> >  EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
> > +
> > +/*
> > + * map x86 page levels to perf page sizes
> > + */
> > +static const enum perf_page_size perf_page_size_map[PG_LEVEL_NUM] = {
> > +   [PG_LEVEL_NONE] = PERF_PAGE_SIZE_NONE,
> > +   [PG_LEVEL_4K]   = PERF_PAGE_SIZE_4K,
> > +   [PG_LEVEL_2M]   = PERF_PAGE_SIZE_2M,
> > +   [PG_LEVEL_1G]   = PERF_PAGE_SIZE_1G,
> > +   [PG_LEVEL_512G] = PERF_PAGE_SIZE_512G,
> > +};
> > +
> > +u64 perf_get_page_size(u64 virt)
> > +{
> > +   unsigned long flags;
> > +   unsigned int level;
> > +   pte_t *pte;
> > +
> > +   if (!virt)
> > +   return 0;
> > +
> > +   /*
> > +* Interrupts are disabled, so it prevents any tear down
> > +* of the page tables.
> > +* See the comment near struct mmu_table_batch.
> > +*/
> > +   local_irq_save(flags);
> > +   if (virt >= TASK_SIZE)
> > +   pte = lookup_address(virt, );
> > +   else {
> > +   if (current->mm)
> > +   pte = lookup_address_in_pgd(pgd_offset(current->mm, 
> > virt),
> > +   virt, );
> 
> Aside from all the missin {}, I'm fairly sure this is broken since this
> happens from NMI context. This can interrupt switch_mm() and things like
> use_temporary_mm().

Ah, I'm confused again. This is a software page-table walk and is not
affected by the current CR3 state, which is much safer.

The rest of the comment still apply of course.


Re: [PATCH V3 01/13] perf/core, x86: Add PERF_SAMPLE_DATA_PAGE_SIZE

2019-01-31 Thread Peter Zijlstra
On Wed, Jan 30, 2019 at 06:23:42AM -0800, kan.li...@linux.intel.com wrote:
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 374a197..03bf45d 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -2578,3 +2578,45 @@ void perf_get_x86_pmu_capability(struct 
> x86_pmu_capability *cap)
>   cap->events_mask_len= x86_pmu.events_mask_len;
>  }
>  EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);
> +
> +/*
> + * map x86 page levels to perf page sizes
> + */
> +static const enum perf_page_size perf_page_size_map[PG_LEVEL_NUM] = {
> + [PG_LEVEL_NONE] = PERF_PAGE_SIZE_NONE,
> + [PG_LEVEL_4K]   = PERF_PAGE_SIZE_4K,
> + [PG_LEVEL_2M]   = PERF_PAGE_SIZE_2M,
> + [PG_LEVEL_1G]   = PERF_PAGE_SIZE_1G,
> + [PG_LEVEL_512G] = PERF_PAGE_SIZE_512G,
> +};
> +
> +u64 perf_get_page_size(u64 virt)
> +{
> + unsigned long flags;
> + unsigned int level;
> + pte_t *pte;
> +
> + if (!virt)
> + return 0;
> +
> + /*
> +  * Interrupts are disabled, so it prevents any tear down
> +  * of the page tables.
> +  * See the comment near struct mmu_table_batch.
> +  */
> + local_irq_save(flags);
> + if (virt >= TASK_SIZE)
> + pte = lookup_address(virt, );
> + else {
> + if (current->mm)
> + pte = lookup_address_in_pgd(pgd_offset(current->mm, 
> virt),
> + virt, );

Aside from all the missin {}, I'm fairly sure this is broken since this
happens from NMI context. This can interrupt switch_mm() and things like
use_temporary_mm().

Also; why does this live in the x86 code and not in the generic code?

> + else
> + level = PG_LEVEL_NUM;
> + }
> + local_irq_restore(flags);
> + if (level >= PG_LEVEL_NUM)
> + return PERF_PAGE_SIZE_NONE;
> +
> + return (u64)perf_page_size_map[level];
> +}

> diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
> index 7198ddd..79daacd 100644
> --- a/include/uapi/linux/perf_event.h
> +++ b/include/uapi/linux/perf_event.h
> @@ -141,8 +141,9 @@ enum perf_event_sample_format {
>   PERF_SAMPLE_TRANSACTION = 1U << 17,
>   PERF_SAMPLE_REGS_INTR   = 1U << 18,
>   PERF_SAMPLE_PHYS_ADDR   = 1U << 19,
> + PERF_SAMPLE_DATA_PAGE_SIZE  = 1U << 20,
>  
> - PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
> + PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
>  
>   __PERF_SAMPLE_CALLCHAIN_EARLY   = 1ULL << 63, /* non-ABI; 
> internal use */
>  };
> @@ -863,6 +864,7 @@ enum perf_event_type {
>*  { u64   abi; # enum perf_sample_regs_abi
>*u64   regs[weight(mask)]; } && 
> PERF_SAMPLE_REGS_INTR
>*  { u64   phys_addr;} && PERF_SAMPLE_PHYS_ADDR
> +  *  { u64   data_page_size;} && 
> PERF_SAMPLE_DATA_PAGE_SIZE
>* };
>*/
>   PERF_RECORD_SAMPLE  = 9,
> @@ -1150,6 +1152,18 @@ union perf_mem_data_src {
>  #define PERF_MEM_S(a, s) \
>   (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
>  
> +
> +enum perf_page_size {
> + PERF_PAGE_SIZE_NONE,
> + PERF_PAGE_SIZE_4K,
> + PERF_PAGE_SIZE_8K,
> + PERF_PAGE_SIZE_16K,
> + PERF_PAGE_SIZE_64K,
> + PERF_PAGE_SIZE_2M,
> + PERF_PAGE_SIZE_1G,
> + PERF_PAGE_SIZE_512G,
> +};

Since you have a u64 to store this in, WTH do you use this limited enum?
Are you very sure this covers all the possible page sizes for all
architectures?

Why not simply report the page size in bytes?