This patch converts emulate_spe() to using user_access_being
logic.

Since commit 662bbcb2747c ("mm, sched: Allow uaccess in atomic with
pagefault_disable()"), might_fault() doesn't fire when called from
sections where pagefaults are disabled, which must be the case
when using _inatomic variants of __get_user and __put_user. So
the might_fault() in user_access_begin() is not a problem.

There was a verification of user_mode() together with the access_ok(),
but the function returns in case !user_mode() immediately after
the access_ok() verification, so removing that test has no effect.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/kernel/align.c | 61 ++++++++++++++++++++-----------------
 1 file changed, 33 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index c7797eb958c7..c4d7b445b459 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = {
 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
                       struct ppc_inst ppc_instr)
 {
-       int ret;
        union {
                u64 ll;
                u32 w[2];
@@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int 
reg,
        nb = spe_aligninfo[instr].len;
        flags = spe_aligninfo[instr].flags;
 
-       /* Verify the address of the operand */
-       if (unlikely(user_mode(regs) &&
-                    !access_ok(addr, nb)))
-               return -EFAULT;
-
        /* userland only */
        if (unlikely(!user_mode(regs)))
                return 0;
@@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int 
reg,
                }
        } else {
                temp.ll = data.ll = 0;
-               ret = 0;
                p = addr;
 
+               if (!user_read_access_begin(addr, nb))
+                       return -EFAULT;
+
                switch (nb) {
                case 8:
-                       ret |= __get_user_inatomic(temp.v[0], p++);
-                       ret |= __get_user_inatomic(temp.v[1], p++);
-                       ret |= __get_user_inatomic(temp.v[2], p++);
-                       ret |= __get_user_inatomic(temp.v[3], p++);
+                       unsafe_get_user(temp.v[0], p++, Efault_read);
+                       unsafe_get_user(temp.v[1], p++, Efault_read);
+                       unsafe_get_user(temp.v[2], p++, Efault_read);
+                       unsafe_get_user(temp.v[3], p++, Efault_read);
                        fallthrough;
                case 4:
-                       ret |= __get_user_inatomic(temp.v[4], p++);
-                       ret |= __get_user_inatomic(temp.v[5], p++);
+                       unsafe_get_user(temp.v[4], p++, Efault_read);
+                       unsafe_get_user(temp.v[5], p++, Efault_read);
                        fallthrough;
                case 2:
-                       ret |= __get_user_inatomic(temp.v[6], p++);
-                       ret |= __get_user_inatomic(temp.v[7], p++);
-                       if (unlikely(ret))
-                               return -EFAULT;
+                       unsafe_get_user(temp.v[6], p++, Efault_read);
+                       unsafe_get_user(temp.v[7], p++, Efault_read);
                }
+               user_read_access_end();
 
                switch (instr) {
                case EVLDD:
@@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int 
reg,
 
        /* Store result to memory or update registers */
        if (flags & ST) {
-               ret = 0;
                p = addr;
+
+               if (!user_read_access_begin(addr, nb))
+                       return -EFAULT;
+
                switch (nb) {
                case 8:
-                       ret |= __put_user_inatomic(data.v[0], p++);
-                       ret |= __put_user_inatomic(data.v[1], p++);
-                       ret |= __put_user_inatomic(data.v[2], p++);
-                       ret |= __put_user_inatomic(data.v[3], p++);
+                       unsafe_put_user(data.v[0], p++, Efault_write);
+                       unsafe_put_user(data.v[1], p++, Efault_write);
+                       unsafe_put_user(data.v[2], p++, Efault_write);
+                       unsafe_put_user(data.v[3], p++, Efault_write);
                        fallthrough;
                case 4:
-                       ret |= __put_user_inatomic(data.v[4], p++);
-                       ret |= __put_user_inatomic(data.v[5], p++);
+                       unsafe_put_user(data.v[4], p++, Efault_write);
+                       unsafe_put_user(data.v[5], p++, Efault_write);
                        fallthrough;
                case 2:
-                       ret |= __put_user_inatomic(data.v[6], p++);
-                       ret |= __put_user_inatomic(data.v[7], p++);
+                       unsafe_put_user(data.v[6], p++, Efault_write);
+                       unsafe_put_user(data.v[7], p++, Efault_write);
                }
-               if (unlikely(ret))
-                       return -EFAULT;
+               user_write_access_end();
        } else {
                *evr = data.w[0];
                regs->gpr[reg] = data.w[1];
        }
 
        return 1;
+
+Efault_read:
+       user_read_access_end();
+       return -EFAULT;
+
+Efault_write:
+       user_write_access_end();
+       return -EFAULT;
 }
 #endif /* CONFIG_SPE */
 
-- 
2.25.0

Reply via email to