Hi Jan

On 18.11.25 15:45, Jan Beulich wrote:
On 18.11.2025 14:08, Grygorii Strashko wrote:
On 17.11.25 18:43, Jan Beulich wrote:
On 14.11.2025 15:01, Grygorii Strashko wrote:
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -14,6 +14,10 @@ obj-y += ro-page-fault.o
   obj-$(CONFIG_PV_SHIM) += shim.o
   obj-$(CONFIG_TRACEBUFFER) += trace.o
   obj-y += traps.o
+obj-$(CONFIG_PV) += usercopy.o

Just obj-y with the movement.

However, is the movement (and was the adding of $(CONFIG_PV) in the earlier
version) actually correct? The file also produces copy_{from,to}_unsafe_ll(),
which aren't PV-specific. This may be only a latent issue right now, as we
have only a single use site of copy_from_unsafe(), but those functions need
to remain available. (We may want to arrange for them to be removed when
linking, as long as they're not referenced. But that's a separate topic.)

It is confusing that none of build cfg combinations have failed
(HVM=y PV=n, HVM=n PV=n) :(

copy_to_unsafe_ll()
- called from copy_to_unsafe()
- copy_to_unsafe() has no users (unreachable, MISRA 2.1?)

copy_from_unsafe_ll()
- called from copy_from_unsafe()
- copy_from_unsafe() called from one place do_invalid_op() with
    copy_from_unsafe(,, n = sizeof(bug_insn)).
    Due to __builtin_constant_p(n) check the copy_from_unsafe() call
    optimized by compiler to
    get_unsafe_size(*(uint16_t *)to, from, 2, UA_DROP, ret, 2);

as result copy_from_unsafe_ll() is unreachable also (?).

Yes, these likely all want to become library-like, so they are linked in only
when actually referenced.

If those function are not subject to be removed, the
   usercopy.c can't be moved in "x86/pv", Right?

That's my take, yes.

Making copy_{from,to}_unsafe_ll() available for !PV means
rewriting usercopy.c in some way, Right?

"Re-writing" is probably too much, but some adjustments would be needed if
you want to keep the "unsafe" functions but compile out the "guest" ones.
It may be possible to compile the file twice, once from x86/pv/ and once
from x86/, replacing the self-#include near the bottom of the file. The
former would then produce the "guest" functions, the latter the "unsafe"
ones.

Below is the difference I came up with, will it work?

diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 6e2b17471719..a2017b4600b3 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -71,6 +71,7 @@ obj-y += time.o
 obj-y += traps-setup.o
 obj-y += traps.o
 obj-$(CONFIG_INTEL) += tsx.o
+obj-y += usercopy.o
 obj-y += x86_emulate.o
 obj-$(CONFIG_TBOOT) += tboot.o
 obj-y += hpet.o
diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index 59489cd75af6..1fddfac8303e 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -14,10 +14,7 @@ obj-y += ro-page-fault.o
 obj-$(CONFIG_PV_SHIM) += shim.o
 obj-$(CONFIG_TRACEBUFFER) += trace.o
 obj-y += traps.o
-obj-$(CONFIG_PV) += usercopy.o
+obj-y += usercopy.o
obj-bin-y += dom0_build.init.o
 obj-bin-y += gpr_switch.o
-
-# Allows usercopy.c to include itself
-$(obj)/usercopy.o: CFLAGS-y += -iquote .
diff --git a/xen/arch/x86/pv/usercopy.c b/xen/arch/x86/pv/usercopy.c
index a24b52cc66c1..6ca6eca5d818 100644
--- a/xen/arch/x86/pv/usercopy.c
+++ b/xen/arch/x86/pv/usercopy.c
@@ -64,8 +64,6 @@ unsigned int copy_from_guest_ll(void *to, const void __user 
*from, unsigned int
     return n;
 }
-#if GUARD(1) + 0
-
 /**
  * copy_to_guest_pv: - Copy a block of data into PV guest space.
  * @to:   Destination address, in PV guest space.
@@ -139,16 +137,6 @@ unsigned int copy_from_guest_pv(void *to, const void 
__user *from,
     return n;
 }
-# undef GUARD
-# define GUARD UA_DROP
-# define copy_to_guest_ll copy_to_unsafe_ll
-# define copy_from_guest_ll copy_from_unsafe_ll
-# undef __user
-# define __user
-# include __FILE__
-
-#endif /* GUARD(1) */
-
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
new file mode 100644
index 000000000000..9c12eda64181
--- /dev/null
+++ b/xen/arch/x86/usercopy.c
@@ -0,0 +1,77 @@
+/*
+ * User address space access functions.
+ *
+ * Copyright 1997 Andi Kleen <[email protected]>
+ * Copyright 1997 Linus Torvalds
+ * Copyright 2002 Andi Kleen <[email protected]>
+ */
+
+#include <xen/lib.h>
+#include <xen/sched.h>
+#include <asm/uaccess.h>
+
+# define GUARD UA_DROP
+# define copy_to_guest_ll copy_to_unsafe_ll
+# define copy_from_guest_ll copy_from_unsafe_ll
+# undef __user
+# define __user
+
+unsigned int copy_to_guest_ll(void __user *to, const void *from, unsigned int 
n)
+{
+    GUARD(unsigned dummy);
+
+    stac();
+    asm_inline volatile (
+        GUARD(
+        "    guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
+        )
+        "1:  rep movsb\n"
+        "2:\n"
+        _ASM_EXTABLE(1b, 2b)
+        : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from)
+          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
+        :: "memory" );
+    clac();
+
+    return n;
+}
+
+unsigned int copy_from_guest_ll(void *to, const void __user *from, unsigned 
int n)
+{
+    unsigned dummy;
+
+    stac();
+    asm_inline volatile (
+        GUARD(
+        "    guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
+        )
+        "1:  rep movsb\n"
+        "2:\n"
+        ".section .fixup,\"ax\"\n"
+        "6:  mov  %[cnt], %k[from]\n"
+        "    xchg %%eax, %[aux]\n"
+        "    xor  %%eax, %%eax\n"
+        "    rep stosb\n"
+        "    xchg %[aux], %%eax\n"
+        "    mov  %k[from], %[cnt]\n"
+        "    jmp 2b\n"
+        ".previous\n"
+        _ASM_EXTABLE(1b, 6b)
+        : [cnt] "+c" (n), [to] "+D" (to), [from] "+S" (from),
+          [aux] "=&r" (dummy)
+          GUARD(, [scratch1] "=&r" (dummy), [scratch2] "=&r" (dummy))
+        :: "memory" );
+    clac();
+
+    return n;
+}


Reply via email to