On Sun, Oct 13, 2024 at 11:50:55PM -0700, Linus Torvalds wrote: > Anyway, the attached patch > > diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S > index d066aecf8aeb..7d5730aa18b8 100644 > --- a/arch/x86/lib/getuser.S > +++ b/arch/x86/lib/getuser.S > @@ -37,11 +37,17 @@ > > #define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC > > +#define X86_CANONICAL_MASK ALTERNATIVE \ > + "movq $0x80007fffffffffff,%rdx", \ > + "movq $0x80ffffffffffffff,%rdx", X86_FEATURE_LA57 > + > .macro check_range size:req > .if IS_ENABLED(CONFIG_X86_64) > mov %rax, %rdx > sar $63, %rdx > or %rdx, %rax > + X86_CANONICAL_MASK > + and %rdx,%rax > .else > cmp $TASK_SIZE_MAX-\size+1, %eax > jae .Lbad_get_user
Given that LAM enforces bit 47/56 to be equal to bit 63 I think we can do this unconditionally instead of masking: diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index d066aecf8aeb..86d4511520b1 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -37,9 +37,14 @@ #define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RDTSC +#define SHIFT_LEFT_TO_MSB ALTERNATIVE \ + "shl $(64 - 48), %rdx", \ + "shl $(64 - 57), %rdx", X86_FEATURE_LA57 + .macro check_range size:req .if IS_ENABLED(CONFIG_X86_64) mov %rax, %rdx + SHIFT_LEFT_TO_MSB sar $63, %rdx or %rdx, %rax .else -- Kiryl Shutsemau / Kirill A. Shutemov