commit ddf35cf3764b5a182b178105f57515b42e2634f8 upstream.

Based on the x86 commit doing the same.

See commit 304ec1b05031 ("x86/uaccess: Use __uaccess_begin_nospec()
and uaccess_try_nospec") and b3bbfb3fb5d2 ("x86: Introduce
__uaccess_begin_nospec() and uaccess_try_nospec") for more detail.

In all cases we are ordering the load from the potentially
user-controlled pointer vs a previous branch based on an access_ok()
check or similar.

Base on a patch from Michal Suchanek.

Signed-off-by: Michal Suchanek <msucha...@suse.de>
Signed-off-by: Michael Ellerman <m...@ellerman.id.au>
---
 arch/powerpc/include/asm/uaccess.h | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
index 05f1389228d2..e51ce5a0e221 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -269,6 +269,7 @@ do {                                                        
        \
        __chk_user_ptr(ptr);                                    \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -283,6 +284,7 @@ do {                                                        
        \
        __chk_user_ptr(ptr);                                    \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -295,8 +297,10 @@ do {                                                       
        \
        unsigned long  __gu_val = 0;                                    \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
        might_fault();                                                  \
-       if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
+       if (access_ok(VERIFY_READ, __gu_addr, (size))) {                \
+               barrier_nospec();                                       \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+       }                                                               \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                             
\
        __gu_err;                                                       \
 })
@@ -307,6 +311,7 @@ do {                                                        
        \
        unsigned long __gu_val;                                 \
        __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
        __chk_user_ptr(ptr);                                    \
+       barrier_nospec();                                       \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
@@ -323,8 +328,10 @@ extern unsigned long __copy_tofrom_user(void __user *to,
 static inline unsigned long copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
-       if (likely(access_ok(VERIFY_READ, from, n)))
+       if (likely(access_ok(VERIFY_READ, from, n))) {
+               barrier_nospec();
                return __copy_tofrom_user((__force void __user *)to, from, n);
+       }
        memset(to, 0, n);
        return n;
 }
@@ -359,21 +366,27 @@ static inline unsigned long 
__copy_from_user_inatomic(void *to,
 
                switch (n) {
                case 1:
+                       barrier_nospec();
                        __get_user_size(*(u8 *)to, from, 1, ret);
                        break;
                case 2:
+                       barrier_nospec();
                        __get_user_size(*(u16 *)to, from, 2, ret);
                        break;
                case 4:
+                       barrier_nospec();
                        __get_user_size(*(u32 *)to, from, 4, ret);
                        break;
                case 8:
+                       barrier_nospec();
                        __get_user_size(*(u64 *)to, from, 8, ret);
                        break;
                }
                if (ret == 0)
                        return 0;
        }
+
+       barrier_nospec();
        return __copy_tofrom_user((__force void __user *)to, from, n);
 }
 
@@ -400,6 +413,7 @@ static inline unsigned long __copy_to_user_inatomic(void 
__user *to,
                if (ret == 0)
                        return 0;
        }
+
        return __copy_tofrom_user(to, (__force const void __user *)from, n);
 }
 
-- 
2.20.1

Reply via email to