Re: [PATCH v1 3/3] powerpc/code-patching: Optimise patch_memcpy() to 4 byte chunks

2024-03-17 Thread Benjamin Gray
On Fri, 2024-03-15 at 06:39 +, Christophe Leroy wrote:
> 
> 
> Le 15/03/2024 à 03:57, Benjamin Gray a écrit :
> > As we are patching instructions, we can assume the length is a
> > multiple
> > of 4 and the destination address is aligned.
> > 
> > Atomicity of patching a prefixed instruction is not a concern, as
> > the
> > original implementation doesn't provide it anyway.
> 
> This patch looks unnecessary.
> 
> copy_to_kernel_nofault() is what you want to use instead.

Yeah, I would drop this patch when using copy_to_kernel_nofault()

> 
> > 
> > Signed-off-by: Benjamin Gray 
> > ---
> >   arch/powerpc/lib/code-patching.c | 8 
> >   1 file changed, 4 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/powerpc/lib/code-patching.c
> > b/arch/powerpc/lib/code-patching.c
> > index c6633759b509..ed450a32918c 100644
> > --- a/arch/powerpc/lib/code-patching.c
> > +++ b/arch/powerpc/lib/code-patching.c
> > @@ -394,10 +394,10 @@ static int patch_memset32(u32 *addr, u32 val,
> > size_t count)
> >     return -EPERM;
> >   }
> >   
> > -static int patch_memcpy(void *dst, void *src, size_t len)
> > +static int patch_memcpy32(u32 *dst, u32 *src, size_t count)
> >   {
> > -   for (void *end = src + len; src < end; dst++, src++)
> > -   __put_kernel_nofault(dst, src, u8, failed);
> > +   for (u32 *end = src + count; src < end; dst++, src++)
> > +   __put_kernel_nofault(dst, src, u32, failed);
> >   
> >     return 0;
> >   
> > @@ -424,7 +424,7 @@ static int __patch_instructions(u32
> > *patch_addr, u32 *code, size_t len, bool rep
> >     err = patch_memset32(patch_addr, val, len
> > / 4);
> >     }
> >     } else {
> > -   err = patch_memcpy(patch_addr, code, len);
> > +   err = patch_memcpy32(patch_addr, code, len / 4);
> >     }
> >   
> >     smp_wmb();  /* smp write barrier */



Re: [PATCH v1 3/3] powerpc/code-patching: Optimise patch_memcpy() to 4 byte chunks

2024-03-15 Thread Christophe Leroy


Le 15/03/2024 à 03:57, Benjamin Gray a écrit :
> As we are patching instructions, we can assume the length is a multiple
> of 4 and the destination address is aligned.
> 
> Atomicity of patching a prefixed instruction is not a concern, as the
> original implementation doesn't provide it anyway.

This patch looks unnecessary.

copy_to_kernel_nofault() is what you want to use instead.

> 
> Signed-off-by: Benjamin Gray 
> ---
>   arch/powerpc/lib/code-patching.c | 8 
>   1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/lib/code-patching.c 
> b/arch/powerpc/lib/code-patching.c
> index c6633759b509..ed450a32918c 100644
> --- a/arch/powerpc/lib/code-patching.c
> +++ b/arch/powerpc/lib/code-patching.c
> @@ -394,10 +394,10 @@ static int patch_memset32(u32 *addr, u32 val, size_t 
> count)
>   return -EPERM;
>   }
>   
> -static int patch_memcpy(void *dst, void *src, size_t len)
> +static int patch_memcpy32(u32 *dst, u32 *src, size_t count)
>   {
> - for (void *end = src + len; src < end; dst++, src++)
> - __put_kernel_nofault(dst, src, u8, failed);
> + for (u32 *end = src + count; src < end; dst++, src++)
> + __put_kernel_nofault(dst, src, u32, failed);
>   
>   return 0;
>   
> @@ -424,7 +424,7 @@ static int __patch_instructions(u32 *patch_addr, u32 
> *code, size_t len, bool rep
>   err = patch_memset32(patch_addr, val, len / 4);
>   }
>   } else {
> - err = patch_memcpy(patch_addr, code, len);
> + err = patch_memcpy32(patch_addr, code, len / 4);
>   }
>   
>   smp_wmb();  /* smp write barrier */


[PATCH v1 3/3] powerpc/code-patching: Optimise patch_memcpy() to 4 byte chunks

2024-03-14 Thread Benjamin Gray
As we are patching instructions, we can assume the length is a multiple
of 4 and the destination address is aligned.

Atomicity of patching a prefixed instruction is not a concern, as the
original implementation doesn't provide it anyway.

Signed-off-by: Benjamin Gray 
---
 arch/powerpc/lib/code-patching.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c6633759b509..ed450a32918c 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -394,10 +394,10 @@ static int patch_memset32(u32 *addr, u32 val, size_t 
count)
return -EPERM;
 }
 
-static int patch_memcpy(void *dst, void *src, size_t len)
+static int patch_memcpy32(u32 *dst, u32 *src, size_t count)
 {
-   for (void *end = src + len; src < end; dst++, src++)
-   __put_kernel_nofault(dst, src, u8, failed);
+   for (u32 *end = src + count; src < end; dst++, src++)
+   __put_kernel_nofault(dst, src, u32, failed);
 
return 0;
 
@@ -424,7 +424,7 @@ static int __patch_instructions(u32 *patch_addr, u32 *code, 
size_t len, bool rep
err = patch_memset32(patch_addr, val, len / 4);
}
} else {
-   err = patch_memcpy(patch_addr, code, len);
+   err = patch_memcpy32(patch_addr, code, len / 4);
}
 
smp_wmb();  /* smp write barrier */
-- 
2.44.0