Hi,

Le 03/03/2026 à 10:19, Sayali Patil a écrit :

On 02/03/26 16:42, Christophe Leroy (CS GROUP) wrote:
Hi Sayali,

Le 28/02/2026 à 14:53, Sayali Patil a écrit :
On powerpc with PREEMPT_FULL or PREEMPT_LAZY and function tracing enabled,
KUAP warnings can be triggered from the VMX usercopy path under memory
stress workloads.

KUAP requires that no subfunctions are called once userspace access has
been enabled. The existing VMX copy implementation violates this
requirement by invoking enter_vmx_usercopy() from the assembly path after
userspace access has already been enabled. If preemption occurs
in this window, the AMR state may not be preserved correctly,
leading to unexpected userspace access state and resulting in
KUAP warnings.

Fix this by restructuring the VMX usercopy flow so that VMX selection
and VMX state management are centralized in raw_copy_tofrom_user(),
which is invoked by the raw_copy_{to,from,in}_user() wrappers.

Introduce a usercopy_mode enum to describe the copy direction
(IN, FROM, TO) and use it to derive the required KUAP permissions.
Userspace access is now enabled and disabled through common helpers
based on the selected mode, ensuring that the correct read/write
permissions are applied consistently.

  The new flow is:

   - raw_copy_{to,from,in}_user() calls raw_copy_tofrom_user()
   - raw_copy_tofrom_user() decides whether to use the VMX path
     based on size and CPU capability
   - Call enter_vmx_usercopy() before enabling userspace access
   - Enable userspace access as per the usercopy mode
     and perform the VMX copy
   - Disable userspace access as per the usercopy mode
   - Call exit_vmx_usercopy()
   - Fall back to the base copy routine if the VMX copy faults

With this change, the VMX assembly routines no longer perform VMX state
management or call helper functions; they only implement the
copy operations.
The previous feature-section based VMX selection inside
__copy_tofrom_user_power7() is removed, and a dedicated
__copy_tofrom_user_power7_vmx() entry point is introduced.

This ensures correct KUAP ordering, avoids subfunction calls
while KUAP is unlocked, and eliminates the warnings while preserving
the VMX fast path.

Fixes: de78a9c42a79 ("powerpc: Add a framework for Kernel Userspace Access Protection")
Reported-by: Shrikanth Hegde <[email protected]>
Closes: https://lore.kernel.org/all/20260109064917.777587-2- [email protected]/
Suggested-by: Christophe Leroy <[email protected]>
Co-developed-by: Aboorva Devarajan <[email protected]>
Signed-off-by: Aboorva Devarajan <[email protected]>
Signed-off-by: Sayali Patil <[email protected]>
---

v1->v2
   - Updated as per the review comments.
   - Centralized VMX usercopy handling in __copy_tofrom_user_vmx() in
     arch/powerpc/lib/vmx-helper.c.
   - Introduced a usercopy_mode enum to describe the copy direction
     (IN, FROM, TO) and derive the required KUAP permissions, avoiding
     duplication across the different usercopy paths.

I like the reduction of duplication you propose but I can't see the added value of that enum, what about:

diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/ include/asm/uaccess.h
index 63d6eb8b004e..14a3219db838 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -329,12 +329,6 @@ do {                                \
 extern unsigned long __copy_tofrom_user(void __user *to,
         const void __user *from, unsigned long size);

-enum usercopy_mode {
-    USERCOPY_IN,
-    USERCOPY_FROM,
-    USERCOPY_TO,
-};
-
 unsigned long __copy_tofrom_user_vmx(void __user *to, const void __user *from,
                 unsigned long size, enum usercopy_mode mode);

@@ -352,48 +346,18 @@ static inline bool will_use_vmx(unsigned long n)
         n > VMX_COPY_THRESHOLD;
 }

-static inline void raw_copy_allow(void __user *to, enum usercopy_mode mode)
-{
-    switch (mode) {
-    case USERCOPY_IN:
-        allow_user_access(to, KUAP_READ_WRITE);
-        break;
-    case USERCOPY_FROM:
-        allow_user_access(NULL, KUAP_READ);
-        break;
-    case USERCOPY_TO:
-        allow_user_access(to, KUAP_WRITE);
-        break;
-    }
-}
-
-static inline void raw_copy_prevent(enum usercopy_mode mode)
-{
-    switch (mode) {
-    case USERCOPY_IN:
-        prevent_user_access(KUAP_READ_WRITE);
-        break;
-    case USERCOPY_FROM:
-        prevent_user_access(KUAP_READ);
-        break;
-    case USERCOPY_TO:
-        prevent_user_access(KUAP_WRITE);
-        break;
-    }
-}
-
 static inline unsigned long raw_copy_tofrom_user(void __user *to,
         const void __user *from, unsigned long n,
-        enum usercopy_mode mode)
+        unsigned long dir)
 {
     unsigned long ret;

     if (will_use_vmx(n))
         return __copy_tofrom_user_vmx(to, from,    n, mode);

-    raw_copy_allow(to, mode);
+    allow_user_access(to, dir);
     ret = __copy_tofrom_user(to, from, n);
-    raw_copy_prevent(mode);
+    prevent_user_access(dir);
     return ret;

 }
@@ -403,22 +367,20 @@ static inline unsigned long
 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
     barrier_nospec();
-    return raw_copy_tofrom_user(to, from, n, USERCOPY_IN);
+    return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE);
 }
 #endif /* __powerpc64__ */

 static inline unsigned long raw_copy_from_user(void *to,
         const void __user *from, unsigned long n)
 {
-    return raw_copy_tofrom_user((__force void __user *)to, from,
-                    n, USERCOPY_FROM);
+    return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ);
 }

 static inline unsigned long
 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-    return raw_copy_tofrom_user(to, (__force const void __user *)from,
-                    n, USERCOPY_TO);
+    return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE);
 }

 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx- helper.c
index 35080885204b..4610f7153fd9 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -11,25 +11,25 @@
 #include <asm/switch_to.h>

 unsigned long __copy_tofrom_user_vmx(void __user *to, const void __user *from,
-            unsigned long size, enum usercopy_mode mode)
+            unsigned long size, unsigned long dir)
 {
     unsigned long ret;

     if (!enter_vmx_usercopy()) {
-        raw_copy_allow(to, mode);
+        allow_user_access(to, dir);
         ret = __copy_tofrom_user(to, from, size);
-        raw_copy_prevent(mode);
+        prevent_user_access(dir);
         return ret;
     }

-    raw_copy_allow(to, mode);
+    allow_user_access(to, dir);
     ret = __copy_tofrom_user_power7_vmx(to, from, size);
-    raw_copy_prevent(mode);
+    prevent_user_access(dir);
     exit_vmx_usercopy();
     if (unlikely(ret)) {
-        raw_copy_allow(to, mode);
+        allow_user_access(to, dir);
         ret = __copy_tofrom_user_base(to, from, size);
-        raw_copy_prevent(mode);
+        prevent_user_access(dir);
     }

     return ret;



Christophe


Hi Christophe,
Thanks for the review.
With the suggested change, we are hitting a compilation error.

The issue is related to how KUAP enforces the access direction.
allow_user_access() contains:

BUILD_BUG_ON(!__builtin_constant_p(dir));

which requires that the access direction is a compile-time constant.
If we pass a runtime value (for example, an unsigned long), the
__builtin_constant_p() check fails and triggers the following build error.

Error:
In function 'allow_user_access', inlined from '__copy_tofrom_user_vmx' at arch/powerpc/lib/vmx-helper.c:19:3:
BUILD_BUG_ON failed: !__builtin_constant_p(dir) 706


The previous implementation worked because allow_user_access() was invoked with enum constants (READ, WRITE, READ_WRITE), which satisfied the __builtin_constant_p() requirement. So in this case, the function must be called with a compile-time constant to satisfy KUAP.

Please let me know if you would prefer a different approach.


Ah, right, I missed that. The problem should only be in vmx-helper.c

What about:

diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 63d6eb8b004e..1e05f66fa647 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -329,14 +329,8 @@ do {                                                       
        \
 extern unsigned long __copy_tofrom_user(void __user *to,
                const void __user *from, unsigned long size);

-enum usercopy_mode {
-       USERCOPY_IN,
-       USERCOPY_FROM,
-       USERCOPY_TO,
-};
-
unsigned long __copy_tofrom_user_vmx(void __user *to, const void __user *from,
-                               unsigned long size, enum usercopy_mode mode);
+                               unsigned long size, unsigned long dir);

 unsigned long __copy_tofrom_user_base(void __user *to,
                const void __user *from, unsigned long size);
@@ -345,55 +339,25 @@ unsigned long __copy_tofrom_user_power7_vmx(void __user *to,
                const void __user *from, unsigned long size);


-static inline bool will_use_vmx(unsigned long n)
+static __always_inline bool will_use_vmx(unsigned long n)
 {
        return IS_ENABLED(CONFIG_ALTIVEC) &&
                cpu_has_feature(CPU_FTR_VMX_COPY) &&
                n > VMX_COPY_THRESHOLD;
 }

-static inline void raw_copy_allow(void __user *to, enum usercopy_mode mode)
-{
-       switch (mode) {
-       case USERCOPY_IN:
-               allow_user_access(to, KUAP_READ_WRITE);
-               break;
-       case USERCOPY_FROM:
-               allow_user_access(NULL, KUAP_READ);
-               break;
-       case USERCOPY_TO:
-               allow_user_access(to, KUAP_WRITE);
-               break;
-       }
-}
-
-static inline void raw_copy_prevent(enum usercopy_mode mode)
-{
-       switch (mode) {
-       case USERCOPY_IN:
-               prevent_user_access(KUAP_READ_WRITE);
-               break;
-       case USERCOPY_FROM:
-               prevent_user_access(KUAP_READ);
-               break;
-       case USERCOPY_TO:
-               prevent_user_access(KUAP_WRITE);
-               break;
-       }
-}
-
-static inline unsigned long raw_copy_tofrom_user(void __user *to,
+static __always_inline unsigned long raw_copy_tofrom_user(void __user *to,
                const void __user *from, unsigned long n,
-               enum usercopy_mode mode)
+               unsigned long dir)
 {
        unsigned long ret;

        if (will_use_vmx(n))
-               return __copy_tofrom_user_vmx(to, from, n, mode);
+               return __copy_tofrom_user_vmx(to, from, n, dir);

-       raw_copy_allow(to, mode);
+       allow_user_access(to, dir);
        ret = __copy_tofrom_user(to, from, n);
-       raw_copy_prevent(mode);
+       prevent_user_access(dir);
        return ret;

 }
@@ -403,22 +367,20 @@ static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
        barrier_nospec();
-       return raw_copy_tofrom_user(to, from, n, USERCOPY_IN);
+       return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE);
 }
 #endif /* __powerpc64__ */

 static inline unsigned long raw_copy_from_user(void *to,
                const void __user *from, unsigned long n)
 {
-       return raw_copy_tofrom_user((__force void __user *)to, from,
-                                       n, USERCOPY_FROM);
+ return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ);
 }

 static inline unsigned long
 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       return raw_copy_tofrom_user(to, (__force const void __user *)from,
-                                       n, USERCOPY_TO);
+ return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE);
 }

 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 35080885204b..5fc6b2997158 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -10,26 +10,49 @@
 #include <linux/hardirq.h>
 #include <asm/switch_to.h>

+static void __allow_user_access(void __user *to, unsigned long dir)
+{
+       if (dir == KUAP_READ)
+               allow_user_access(to, KUAP_READ);
+       else if (dir == KUAP_WRITE)
+               allow_user_access(to, KUAP_WRITE);
+       else if (dir == KUAP_READ_WRITE)
+               allow_user_access(to, KUAP_READ_WRITE);
+}
+
+static void __prevent_user_access(unsigned long dir)
+{
+       if (dir == KUAP_READ)
+               prevent_user_access(KUAP_READ);
+       else if (dir == KUAP_WRITE)
+               prevent_user_access(KUAP_WRITE);
+       else if (dir == KUAP_READ_WRITE)
+               prevent_user_access(KUAP_READ_WRITE);
+}
+
unsigned long __copy_tofrom_user_vmx(void __user *to, const void __user *from,
-                       unsigned long size, enum usercopy_mode mode)
+                       unsigned long size, unsigned long dir)
 {
        unsigned long ret;

+       if (WARN_ON_ONCE(!dir || dir > KUAP_READ_WRITE))
+               return size;
+
        if (!enter_vmx_usercopy()) {
-               raw_copy_allow(to, mode);
+               __allow_user_access(to, dir);
                ret = __copy_tofrom_user(to, from, size);
-               raw_copy_prevent(mode);
+               __prevent_user_access(dir);
                return ret;
        }

-       raw_copy_allow(to, mode);
+       __allow_user_access(to, dir);
        ret = __copy_tofrom_user_power7_vmx(to, from, size);
-       raw_copy_prevent(mode);
+       __prevent_user_access(dir);
        exit_vmx_usercopy();
        if (unlikely(ret)) {
-               raw_copy_allow(to, mode);
+               __allow_user_access(to, dir);
                ret = __copy_tofrom_user_base(to, from, size);
-               raw_copy_prevent(mode);
+               __prevent_user_access(dir);
        }

        return ret;


Christophe

Reply via email to