Re: [PATCH v2 06/13] powerpc: Support prefixed instructions in alignment handler

2020-02-11 Thread Jordan Niethe
On Tue, Feb 11, 2020 at 5:14 PM Christophe Leroy
 wrote:
>
>
>
> Le 11/02/2020 à 06:33, Jordan Niethe a écrit :
> > Alignment interrupts can be caused by prefixed instructions accessing
> > memory. In the alignment handler the instruction that caused the
> > exception is loaded and attempted emulate. If the instruction is a
> > prefixed instruction load the prefix and suffix to emulate. After
> > emulating increment the NIP by 8.
> >
> > Prefixed instructions are not permitted to cross 64-byte boundaries. If
> > they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
> > If this occurs send a SIGBUS to the offending process if in user mode.
> > If in kernel mode call bad_page_fault().
> >
> > Signed-off-by: Jordan Niethe 
> > ---
> > v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
> > commit (previously in "powerpc sstep: Prepare to support prefixed
> > instructions").
> >  - Rename sufx to suffix
> >  - Use a macro for calculating instruction length
> > ---
> >   arch/powerpc/include/asm/uaccess.h | 30 ++
> >   arch/powerpc/kernel/align.c|  8 +---
> >   arch/powerpc/kernel/traps.c| 21 -
> >   3 files changed, 55 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/powerpc/include/asm/uaccess.h 
> > b/arch/powerpc/include/asm/uaccess.h
> > index 2f500debae21..30f63a81c8d8 100644
> > --- a/arch/powerpc/include/asm/uaccess.h
> > +++ b/arch/powerpc/include/asm/uaccess.h
> > @@ -474,4 +474,34 @@ static __must_check inline bool 
> > user_access_begin(const void __user *ptr, size_t
> >   #define unsafe_copy_to_user(d, s, l, e) \
> >   unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
> >
>
> Could it go close to other __get_user() and friends instead of being at
> the end of the file ?
Will do.
>
> > +/*
> > + * When reading an instruction iff it is a prefix, the suffix needs to be 
> > also
> > + * loaded.
> > + */
> > +#define __get_user_instr(x, y, ptr)  \
> > +({   \
> > + long __gui_ret = 0; \
> > + y = 0;  \
> > + __gui_ret = __get_user(x, ptr); \
> > + if (!__gui_ret) {   \
> > + if (IS_PREFIX(x))   \
>
> Does this apply to PPC32 ?
No, for now (and the foreseeable future) it will just affect 64s.
> If not, can we make sure IS_PREFIX is constant 0 on PPC32 so that the
> second read gets dropped at compile time ?
>
> Can we instead do :
>
> if (!__gui_ret && IS_PREFIX(x))
Will do.
>
> > + __gui_ret = __get_user(y, ptr + 1); \
> > + }   \
> > + \
> > + __gui_ret;  \
> > +})
> > +
> > +#define __get_user_instr_inatomic(x, y, ptr) \
> > +({   \
> > + long __gui_ret = 0; \
> > + y = 0;  \
> > + __gui_ret = __get_user_inatomic(x, ptr);\
> > + if (!__gui_ret) {   \
> > + if (IS_PREFIX(x))   \
>
> Same commments as above
>
> > + __gui_ret = __get_user_inatomic(y, ptr + 1);\
> > + }   \
> > + \
> > + __gui_ret;  \
> > +})
> > +
> >   #endif  /* _ARCH_POWERPC_UACCESS_H */
> > diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
> > index ba3bf5c3ab62..e42cfaa616d3 100644
> > --- a/arch/powerpc/kernel/align.c
> > +++ b/arch/powerpc/kernel/align.c
> > @@ -293,7 +293,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned 
> > int reg,
> >
> >   int fix_alignment(struct pt_regs *regs)
> >   {
> > - unsigned int instr;
> > + unsigned int instr, suffix;
> >   struct instruction_op op;
> >   int r, type;
> >
> > @@ -303,13 +303,15 @@ int fix_alignment(struct pt_regs *regs)
> >*/
> >   CHECK_FULL_REGS(regs);
> >
> > - if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
> > + if (unlikely(__get_user_instr(instr, suffix,
> > +  (unsigned int __user *)regs->nip)))
> >   return -EFAULT;
> >   if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
> >   /* We don't handle PPC little-endian any more... */
> >   if (cpu_has_feature(CPU_FTR_PPC_LE))
> >   return -EIO;
> >   instr = swab32(instr);
> > + suffix = swab32(suffix);
> >   }
> >
> >   #ifdef CONFIG_SPE
> > @@ -334,7 +336,7 @@ int fix_alignment(struct pt_regs *regs)
> >   if ((instr & 0xfc0006fe) == 

Re: [PATCH v2 06/13] powerpc: Support prefixed instructions in alignment handler

2020-02-10 Thread Christophe Leroy




Le 11/02/2020 à 06:33, Jordan Niethe a écrit :

Alignment interrupts can be caused by prefixed instructions accessing
memory. In the alignment handler the instruction that caused the
exception is loaded and attempted emulate. If the instruction is a
prefixed instruction load the prefix and suffix to emulate. After
emulating increment the NIP by 8.

Prefixed instructions are not permitted to cross 64-byte boundaries. If
they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
If this occurs send a SIGBUS to the offending process if in user mode.
If in kernel mode call bad_page_fault().

Signed-off-by: Jordan Niethe 
---
v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
commit (previously in "powerpc sstep: Prepare to support prefixed
instructions").
 - Rename sufx to suffix
 - Use a macro for calculating instruction length
---
  arch/powerpc/include/asm/uaccess.h | 30 ++
  arch/powerpc/kernel/align.c|  8 +---
  arch/powerpc/kernel/traps.c| 21 -
  3 files changed, 55 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
index 2f500debae21..30f63a81c8d8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -474,4 +474,34 @@ static __must_check inline bool user_access_begin(const 
void __user *ptr, size_t
  #define unsafe_copy_to_user(d, s, l, e) \
unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
  


Could it go close to other __get_user() and friends instead of being at 
the end of the file ?



+/*
+ * When reading an instruction iff it is a prefix, the suffix needs to be also
+ * loaded.
+ */
+#define __get_user_instr(x, y, ptr)\
+({ \
+   long __gui_ret = 0; \
+   y = 0;  \
+   __gui_ret = __get_user(x, ptr); \
+   if (!__gui_ret) {   \
+   if (IS_PREFIX(x))   \


Does this apply to PPC32 ?
If not, can we make sure IS_PREFIX is constant 0 on PPC32 so that the 
second read gets dropped at compile time ?


Can we instead do :

if (!__gui_ret && IS_PREFIX(x))


+   __gui_ret = __get_user(y, ptr + 1); \
+   }   \
+   \
+   __gui_ret;  \
+})
+
+#define __get_user_instr_inatomic(x, y, ptr)   \
+({ \
+   long __gui_ret = 0; \
+   y = 0;  \
+   __gui_ret = __get_user_inatomic(x, ptr);\
+   if (!__gui_ret) {   \
+   if (IS_PREFIX(x))   \


Same commments as above


+   __gui_ret = __get_user_inatomic(y, ptr + 1);\
+   }   \
+   \
+   __gui_ret;  \
+})
+
  #endif/* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ba3bf5c3ab62..e42cfaa616d3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -293,7 +293,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int 
reg,
  
  int fix_alignment(struct pt_regs *regs)

  {
-   unsigned int instr;
+   unsigned int instr, suffix;
struct instruction_op op;
int r, type;
  
@@ -303,13 +303,15 @@ int fix_alignment(struct pt_regs *regs)

 */
CHECK_FULL_REGS(regs);
  
-	if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))

+   if (unlikely(__get_user_instr(instr, suffix,
+(unsigned int __user *)regs->nip)))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
if (cpu_has_feature(CPU_FTR_PPC_LE))
return -EIO;
instr = swab32(instr);
+   suffix = swab32(suffix);
}
  
  #ifdef CONFIG_SPE

@@ -334,7 +336,7 @@ int fix_alignment(struct pt_regs *regs)
if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
return -EIO;
  
-	r = analyse_instr(, regs, instr, PPC_NO_SUFFIX);

+   r = analyse_instr(, regs, instr, suffix);
if (r < 0)
return -EINVAL;
  
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c

index 82a3438300fd..d80b82fc1ae3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -583,6 +583,10 @@ static inline int check_io_access(struct 

[PATCH v2 06/13] powerpc: Support prefixed instructions in alignment handler

2020-02-10 Thread Jordan Niethe
Alignment interrupts can be caused by prefixed instructions accessing
memory. In the alignment handler the instruction that caused the
exception is loaded and attempted emulate. If the instruction is a
prefixed instruction load the prefix and suffix to emulate. After
emulating increment the NIP by 8.

Prefixed instructions are not permitted to cross 64-byte boundaries. If
they do the alignment interrupt is invoked with SRR1 BOUNDARY bit set.
If this occurs send a SIGBUS to the offending process if in user mode.
If in kernel mode call bad_page_fault().

Signed-off-by: Jordan Niethe 
---
v2: - Move __get_user_instr() and __get_user_instr_inatomic() to this
commit (previously in "powerpc sstep: Prepare to support prefixed
instructions").
- Rename sufx to suffix
- Use a macro for calculating instruction length
---
 arch/powerpc/include/asm/uaccess.h | 30 ++
 arch/powerpc/kernel/align.c|  8 +---
 arch/powerpc/kernel/traps.c| 21 -
 3 files changed, 55 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
index 2f500debae21..30f63a81c8d8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -474,4 +474,34 @@ static __must_check inline bool user_access_begin(const 
void __user *ptr, size_t
 #define unsafe_copy_to_user(d, s, l, e) \
unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
 
+/*
+ * When reading an instruction iff it is a prefix, the suffix needs to be also
+ * loaded.
+ */
+#define __get_user_instr(x, y, ptr)\
+({ \
+   long __gui_ret = 0; \
+   y = 0;  \
+   __gui_ret = __get_user(x, ptr); \
+   if (!__gui_ret) {   \
+   if (IS_PREFIX(x))   \
+   __gui_ret = __get_user(y, ptr + 1); \
+   }   \
+   \
+   __gui_ret;  \
+})
+
+#define __get_user_instr_inatomic(x, y, ptr)   \
+({ \
+   long __gui_ret = 0; \
+   y = 0;  \
+   __gui_ret = __get_user_inatomic(x, ptr);\
+   if (!__gui_ret) {   \
+   if (IS_PREFIX(x))   \
+   __gui_ret = __get_user_inatomic(y, ptr + 1);\
+   }   \
+   \
+   __gui_ret;  \
+})
+
 #endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ba3bf5c3ab62..e42cfaa616d3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -293,7 +293,7 @@ static int emulate_spe(struct pt_regs *regs, unsigned int 
reg,
 
 int fix_alignment(struct pt_regs *regs)
 {
-   unsigned int instr;
+   unsigned int instr, suffix;
struct instruction_op op;
int r, type;
 
@@ -303,13 +303,15 @@ int fix_alignment(struct pt_regs *regs)
 */
CHECK_FULL_REGS(regs);
 
-   if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
+   if (unlikely(__get_user_instr(instr, suffix,
+(unsigned int __user *)regs->nip)))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
if (cpu_has_feature(CPU_FTR_PPC_LE))
return -EIO;
instr = swab32(instr);
+   suffix = swab32(suffix);
}
 
 #ifdef CONFIG_SPE
@@ -334,7 +336,7 @@ int fix_alignment(struct pt_regs *regs)
if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
return -EIO;
 
-   r = analyse_instr(, regs, instr, PPC_NO_SUFFIX);
+   r = analyse_instr(, regs, instr, suffix);
if (r < 0)
return -EINVAL;
 
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 82a3438300fd..d80b82fc1ae3 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -583,6 +583,10 @@ static inline int check_io_access(struct pt_regs *regs)
 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
 #define REASON_PRIVILEGED  ESR_PPR
 #define REASON_TRAPESR_PTR
+#define REASON_PREFIXED0
+#define REASON_BOUNDARY0
+
+#define inst_length(reason)4
 
 /* single-step stuff */
 #define single_stepping(regs)  (current->thread.debug.dbcr0 & DBCR0_IC)
@@ -597,6 +601,10 @@ static inline int