Re: [PATCH v3 2/9] powerpc/watchpoint: Fix DAWR exception constraint

2020-07-14 Thread Ravi Bangoria

Hi Jordan,


@@ -536,7 +538,12 @@ static bool check_dawrx_constraints(struct pt_regs *regs, 
int type,
 if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
 return false;

-   if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE))
+   /*
+* The Cache Management instructions other than dcbz never
+* cause a match. i.e. if type is CACHEOP, the instruction
+* is dcbz, and dcbz is treated as Store.
+*/
+   if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & 
HW_BRK_TYPE_WRITE))
 return false;

This change seems seperate to this commit?


I also thought about it but was not sure. See below ...



 if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
@@ -553,7 +560,8 @@ static bool check_dawrx_constraints(struct pt_regs *regs, 
int type,
   * including extraneous exception. Otherwise return false.
   */
  static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
- int type, int size, struct arch_hw_breakpoint 
*info)
+ unsigned long ea, int type, int size,
+ struct arch_hw_breakpoint *info)
  {
 bool in_user_range = dar_in_user_range(regs->dar, info);
 bool dawrx_constraints;
@@ -569,11 +577,10 @@ static bool check_constraints(struct pt_regs *regs, 
struct ppc_inst instr,
 }

 if (unlikely(ppc_inst_equal(instr, ppc_inst(0 {
-   if (in_user_range)
-   return true;
-
-   if (dar_in_hw_range(regs->dar, info)) {
-   info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+   if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+   if (dar_in_hw_range(regs->dar, info))
+   return true;
+   } else {
 return true;

I think this would be clearer as:
 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
!(dar_in_hw_range(regs->dar, info)))
 return false;
 else
 return true;


ok




 }
 return false;
@@ -581,10 +588,20 @@ static bool check_constraints(struct pt_regs *regs, 
struct ppc_inst instr,

 dawrx_constraints = check_dawrx_constraints(regs, type, info);

-   if (dar_user_range_overlaps(regs->dar, size, info))
+   if (type == UNKNOWN) {
+   if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+   if (dar_in_hw_range(regs->dar, info))
+   return dawrx_constraints;
+   } else {
+   return dawrx_constraints;
+   }
+   return false;
+   }

Similar thing here, it could be:
 if ((cpu_has_feature(CPU_FTR_ARCH_31)) &&
!(dar_in_hw_range(regs->dar, info)))
 return false;
 else
 return dawrx_constraints;


ok


+
+   if (ea_user_range_overlaps(ea, size, info))
 return dawrx_constraints;

-   if (dar_hw_range_overlaps(regs->dar, size, info)) {
+   if (ea_hw_range_overlaps(ea, size, info)) {
 if (dawrx_constraints) {
 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
 return true;
@@ -593,8 +610,17 @@ static bool check_constraints(struct pt_regs *regs, struct 
ppc_inst instr,
 return false;
  }

+static int cache_op_size(void)
+{
+#ifdef __powerpc64__
+   return ppc64_caches.l1d.block_size;
+#else
+   return L1_CACHE_BYTES;
+#endif
+}
+
  static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
-int *type, int *size, bool *larx_stcx)
+int *type, int *size, unsigned long *ea)
  {
 struct instruction_op op;

@@ -602,16 +628,23 @@ static void get_instr_detail(struct pt_regs *regs, struct 
ppc_inst *instr,
 return;

 analyse_instr(, regs, *instr);
-
-   /*
-* Set size = 8 if analyse_instr() fails. If it's a userspace
-* watchpoint(valid or extraneous), we can notify user about it.
-* If it's a kernel watchpoint, instruction  emulation will fail
-* in stepping_handler() and watchpoint will be disabled.
-*/
 *type = GETTYPE(op.type);
-   *size = !(*type == UNKNOWN) ? GETSIZE(op.type) : 8;
-   *larx_stcx = (*type == LARX || *type == STCX);
+   *ea = op.ea;
+#ifdef __powerpc64__
+   if (!(regs->msr & MSR_64BIT))
+   *ea &= 0xUL;
+#endif
+
+   *size = GETSIZE(op.type);
+   if (*type == CACHEOP) {
+   *size = cache_op_size();
+   *ea &= ~(*size - 1);
+   }

Again related to CACHEOP, should these changes be mentioned in the
commit message?


For CACHEOP, ea returned by analyse_instr() needs to be aligned down to cache
block size manually. Also, for CACHEOP, size returned by analyse_instr() is 0
and thus 

Re: [PATCH v3 2/9] powerpc/watchpoint: Fix DAWR exception constraint

2020-07-14 Thread Jordan Niethe
On Wed, Jul 8, 2020 at 2:52 PM Ravi Bangoria
 wrote:
>
> Pedro Miraglia Franco de Carvalho noticed that on p8, DAR value is
> inconsistent with different type of load/store. Like for byte,word
> etc. load/stores, DAR is set to the address of the first byte of
> overlap between watch range and real access. But for quadword load/
> store it's set to the address of the first byte of real access. This
> issue has been fixed in p10. In p10(ISA 3.1), DAR is always set to
> the address of the first byte of overlap. Commit 27985b2a640e
> ("powerpc/watchpoint: Don't ignore extraneous exceptions blindly")
> wrongly assumes that DAR is set to the address of the first byte of
> overlap for all load/stores on p8 as well. Fix that. With the fix,
> we now rely on 'ea' provided by analyse_instr(). If analyse_instr()
> fails, generate event unconditionally on p8, and on p10 generate
> event only if DAR is within a DAWR range.
>
> Note: 8xx is not affected.
>
> Fixes: 27985b2a640e ("powerpc/watchpoint: Don't ignore extraneous exceptions 
> blindly")
> Fixes: 74c6881019b7 ("powerpc/watchpoint: Prepare handler to handle more than 
> one watchpoint")
> Reported-by: Pedro Miraglia Franco de Carvalho 
> Signed-off-by: Ravi Bangoria 
> ---
>  arch/powerpc/kernel/hw_breakpoint.c | 93 +++--
>  1 file changed, 63 insertions(+), 30 deletions(-)
>
> diff --git a/arch/powerpc/kernel/hw_breakpoint.c 
> b/arch/powerpc/kernel/hw_breakpoint.c
> index 031e6defc08e..7a66c370a105 100644
> --- a/arch/powerpc/kernel/hw_breakpoint.c
> +++ b/arch/powerpc/kernel/hw_breakpoint.c
> @@ -498,11 +498,11 @@ static bool dar_in_user_range(unsigned long dar, struct 
> arch_hw_breakpoint *info
> return ((info->address <= dar) && (dar - info->address < info->len));
>  }
>
> -static bool dar_user_range_overlaps(unsigned long dar, int size,
> -   struct arch_hw_breakpoint *info)
> +static bool ea_user_range_overlaps(unsigned long ea, int size,
> +  struct arch_hw_breakpoint *info)
>  {
> -   return ((dar < info->address + info->len) &&
> -   (dar + size > info->address));
> +   return ((ea < info->address + info->len) &&
> +   (ea + size > info->address));
>  }
>
>  static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint 
> *info)
> @@ -515,20 +515,22 @@ static bool dar_in_hw_range(unsigned long dar, struct 
> arch_hw_breakpoint *info)
> return ((hw_start_addr <= dar) && (hw_end_addr > dar));
>  }
>
> -static bool dar_hw_range_overlaps(unsigned long dar, int size,
> - struct arch_hw_breakpoint *info)
> +static bool ea_hw_range_overlaps(unsigned long ea, int size,
> +struct arch_hw_breakpoint *info)
>  {
> unsigned long hw_start_addr, hw_end_addr;
>
> hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
> hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
>
> -   return ((dar < hw_end_addr) && (dar + size > hw_start_addr));
> +   return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
>  }
>
>  /*
>   * If hw has multiple DAWR registers, we also need to check all
>   * dawrx constraint bits to confirm this is _really_ a valid event.
> + * If type is UNKNOWN, but privilege level matches, consider it as
> + * a positive match.
>   */
>  static bool check_dawrx_constraints(struct pt_regs *regs, int type,
> struct arch_hw_breakpoint *info)
> @@ -536,7 +538,12 @@ static bool check_dawrx_constraints(struct pt_regs 
> *regs, int type,
> if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
> return false;
>
> -   if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE))
> +   /*
> +* The Cache Management instructions other than dcbz never
> +* cause a match. i.e. if type is CACHEOP, the instruction
> +* is dcbz, and dcbz is treated as Store.
> +*/
> +   if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & 
> HW_BRK_TYPE_WRITE))
> return false;
This change seems seperate to this commit?
>
> if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
> @@ -553,7 +560,8 @@ static bool check_dawrx_constraints(struct pt_regs *regs, 
> int type,
>   * including extraneous exception. Otherwise return false.
>   */
>  static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
> - int type, int size, struct arch_hw_breakpoint 
> *info)
> + unsigned long ea, int type, int size,
> + struct arch_hw_breakpoint *info)
>  {
> bool in_user_range = dar_in_user_range(regs->dar, info);
> bool dawrx_constraints;
> @@ -569,11 +577,10 @@ static bool check_constraints(struct pt_regs *regs, 
> struct ppc_inst instr,
> }
>
> if 

[PATCH v3 2/9] powerpc/watchpoint: Fix DAWR exception constraint

2020-07-07 Thread Ravi Bangoria
Pedro Miraglia Franco de Carvalho noticed that on p8, DAR value is
inconsistent with different type of load/store. Like for byte,word
etc. load/stores, DAR is set to the address of the first byte of
overlap between watch range and real access. But for quadword load/
store it's set to the address of the first byte of real access. This
issue has been fixed in p10. In p10(ISA 3.1), DAR is always set to
the address of the first byte of overlap. Commit 27985b2a640e
("powerpc/watchpoint: Don't ignore extraneous exceptions blindly")
wrongly assumes that DAR is set to the address of the first byte of
overlap for all load/stores on p8 as well. Fix that. With the fix,
we now rely on 'ea' provided by analyse_instr(). If analyse_instr()
fails, generate event unconditionally on p8, and on p10 generate
event only if DAR is within a DAWR range.

Note: 8xx is not affected.

Fixes: 27985b2a640e ("powerpc/watchpoint: Don't ignore extraneous exceptions 
blindly")
Fixes: 74c6881019b7 ("powerpc/watchpoint: Prepare handler to handle more than 
one watchpoint")
Reported-by: Pedro Miraglia Franco de Carvalho 
Signed-off-by: Ravi Bangoria 
---
 arch/powerpc/kernel/hw_breakpoint.c | 93 +++--
 1 file changed, 63 insertions(+), 30 deletions(-)

diff --git a/arch/powerpc/kernel/hw_breakpoint.c 
b/arch/powerpc/kernel/hw_breakpoint.c
index 031e6defc08e..7a66c370a105 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -498,11 +498,11 @@ static bool dar_in_user_range(unsigned long dar, struct 
arch_hw_breakpoint *info
return ((info->address <= dar) && (dar - info->address < info->len));
 }
 
-static bool dar_user_range_overlaps(unsigned long dar, int size,
-   struct arch_hw_breakpoint *info)
+static bool ea_user_range_overlaps(unsigned long ea, int size,
+  struct arch_hw_breakpoint *info)
 {
-   return ((dar < info->address + info->len) &&
-   (dar + size > info->address));
+   return ((ea < info->address + info->len) &&
+   (ea + size > info->address));
 }
 
 static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
@@ -515,20 +515,22 @@ static bool dar_in_hw_range(unsigned long dar, struct 
arch_hw_breakpoint *info)
return ((hw_start_addr <= dar) && (hw_end_addr > dar));
 }
 
-static bool dar_hw_range_overlaps(unsigned long dar, int size,
- struct arch_hw_breakpoint *info)
+static bool ea_hw_range_overlaps(unsigned long ea, int size,
+struct arch_hw_breakpoint *info)
 {
unsigned long hw_start_addr, hw_end_addr;
 
hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
 
-   return ((dar < hw_end_addr) && (dar + size > hw_start_addr));
+   return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
 }
 
 /*
  * If hw has multiple DAWR registers, we also need to check all
  * dawrx constraint bits to confirm this is _really_ a valid event.
+ * If type is UNKNOWN, but privilege level matches, consider it as
+ * a positive match.
  */
 static bool check_dawrx_constraints(struct pt_regs *regs, int type,
struct arch_hw_breakpoint *info)
@@ -536,7 +538,12 @@ static bool check_dawrx_constraints(struct pt_regs *regs, 
int type,
if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
return false;
 
-   if (OP_IS_STORE(type) && !(info->type & HW_BRK_TYPE_WRITE))
+   /*
+* The Cache Management instructions other than dcbz never
+* cause a match. i.e. if type is CACHEOP, the instruction
+* is dcbz, and dcbz is treated as Store.
+*/
+   if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & 
HW_BRK_TYPE_WRITE))
return false;
 
if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
@@ -553,7 +560,8 @@ static bool check_dawrx_constraints(struct pt_regs *regs, 
int type,
  * including extraneous exception. Otherwise return false.
  */
 static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
- int type, int size, struct arch_hw_breakpoint 
*info)
+ unsigned long ea, int type, int size,
+ struct arch_hw_breakpoint *info)
 {
bool in_user_range = dar_in_user_range(regs->dar, info);
bool dawrx_constraints;
@@ -569,11 +577,10 @@ static bool check_constraints(struct pt_regs *regs, 
struct ppc_inst instr,
}
 
if (unlikely(ppc_inst_equal(instr, ppc_inst(0 {
-   if (in_user_range)
-   return true;
-
-   if (dar_in_hw_range(regs->dar, info)) {
-   info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+   if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+