Implement raw_copy_from_user_allowed() which assumes that userspace read
access is open. Use this new function to implement raw_copy_from_user().
Finally, wrap the new function to follow the usual "unsafe_" convention
of taking a label argument. The new raw_copy_from_user_allowed() calls
__copy_tofrom_user() internally, but this is still safe to call in user
access blocks formed with user_*_access_begin()/user_*_access_end()
since asm functions are not instrumented for tracing.

Signed-off-by: Christopher M. Riedl <c...@codefail.de>
---
 arch/powerpc/include/asm/uaccess.h | 28 +++++++++++++++++++---------
 1 file changed, 19 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/include/asm/uaccess.h 
b/arch/powerpc/include/asm/uaccess.h
index ef5bbb705c08..96b4abab4f5a 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -403,38 +403,45 @@ raw_copy_in_user(void __user *to, const void __user 
*from, unsigned long n)
 }
 #endif /* __powerpc64__ */
 
-static inline unsigned long raw_copy_from_user(void *to,
-               const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user_allowed(void *to, const void __user *from, unsigned long n)
 {
-       unsigned long ret;
        if (__builtin_constant_p(n) && (n <= 8)) {
-               ret = 1;
+               unsigned long ret = 1;
 
                switch (n) {
                case 1:
                        barrier_nospec();
-                       __get_user_size(*(u8 *)to, from, 1, ret);
+                       __get_user_size_allowed(*(u8 *)to, from, 1, ret);
                        break;
                case 2:
                        barrier_nospec();
-                       __get_user_size(*(u16 *)to, from, 2, ret);
+                       __get_user_size_allowed(*(u16 *)to, from, 2, ret);
                        break;
                case 4:
                        barrier_nospec();
-                       __get_user_size(*(u32 *)to, from, 4, ret);
+                       __get_user_size_allowed(*(u32 *)to, from, 4, ret);
                        break;
                case 8:
                        barrier_nospec();
-                       __get_user_size(*(u64 *)to, from, 8, ret);
+                       __get_user_size_allowed(*(u64 *)to, from, 8, ret);
                        break;
                }
                if (ret == 0)
                        return 0;
        }
 
+       return __copy_tofrom_user((__force void __user *)to, from, n);
+}
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long ret;
+
        barrier_nospec();
        allow_read_from_user(from, n);
-       ret = __copy_tofrom_user((__force void __user *)to, from, n);
+       ret = raw_copy_from_user_allowed(to, from, n);
        prevent_read_from_user(from, n);
        return ret;
 }
@@ -542,6 +549,9 @@ user_write_access_begin(const void __user *ptr, size_t len)
 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
 #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
 
+#define unsafe_copy_from_user(d, s, l, e) \
+       unsafe_op_wrap(raw_copy_from_user_allowed(d, s, l), e)
+
 #define unsafe_copy_to_user(d, s, l, e) \
 do {                                                                   \
        u8 __user *_dst = (u8 __user *)(d);                             \
-- 
2.29.0

Reply via email to