Re: [PATCH v5 1/6] kexec: move locking into do_kexec_load

2021-07-28 Thread Eric W. Biederman
Arnd Bergmann  writes:

> From: Arnd Bergmann 
>
> The locking is the same between the native and compat version of
> sys_kexec_load(), so it can be done in the common implementation
> to reduce duplication.

Acked-by: "Eric W. Biederman" 

>
> Co-developed-by: Eric Biederman 
> Co-developed-by: Christoph Hellwig 
> Signed-off-by: Arnd Bergmann 
> ---
>  kernel/kexec.c | 44 
>  1 file changed, 16 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/kexec.c b/kernel/kexec.c
> index c82c6c06f051..9c7aef8f4bb6 100644
> --- a/kernel/kexec.c
> +++ b/kernel/kexec.c
> @@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned 
> long nr_segments,
>   unsigned long i;
>   int ret;
>  
> + /*
> +  * Because we write directly to the reserved memory region when loading
> +  * crash kernels we need a mutex here to prevent multiple crash kernels
> +  * from attempting to load simultaneously, and to prevent a crash kernel
> +  * from loading over the top of a in use crash kernel.
> +  *
> +  * KISS: always take the mutex.
> +  */
> + if (!mutex_trylock(_mutex))
> + return -EBUSY;
> +
>   if (flags & KEXEC_ON_CRASH) {
>   dest_image = _crash_image;
>   if (kexec_crash_image)
> @@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned 
> long nr_segments,
>   if (nr_segments == 0) {
>   /* Uninstall image */
>   kimage_free(xchg(dest_image, NULL));
> - return 0;
> + ret = 0;
> + goto out_unlock;
>   }
>   if (flags & KEXEC_ON_CRASH) {
>   /*
> @@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned 
> long nr_segments,
>  
>   ret = kimage_alloc_init(, entry, nr_segments, segments, flags);
>   if (ret)
> - return ret;
> + goto out_unlock;
>  
>   if (flags & KEXEC_PRESERVE_CONTEXT)
>   image->preserve_context = 1;
> @@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned 
> long nr_segments,
>   arch_kexec_protect_crashkres();
>  
>   kimage_free(image);
> +out_unlock:
> + mutex_unlock(_mutex);
>   return ret;
>  }
>  
> @@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, 
> unsigned long, nr_segments,
>   ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
>   return -EINVAL;
>  
> - /* Because we write directly to the reserved memory
> -  * region when loading crash kernels we need a mutex here to
> -  * prevent multiple crash  kernels from attempting to load
> -  * simultaneously, and to prevent a crash kernel from loading
> -  * over the top of a in use crash kernel.
> -  *
> -  * KISS: always take the mutex.
> -  */
> - if (!mutex_trylock(_mutex))
> - return -EBUSY;
> -
>   result = do_kexec_load(entry, nr_segments, segments, flags);
>  
> - mutex_unlock(_mutex);
> -
>   return result;
>  }
>  
> @@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
>   return -EFAULT;
>   }
>  
> - /* Because we write directly to the reserved memory
> -  * region when loading crash kernels we need a mutex here to
> -  * prevent multiple crash  kernels from attempting to load
> -  * simultaneously, and to prevent a crash kernel from loading
> -  * over the top of a in use crash kernel.
> -  *
> -  * KISS: always take the mutex.
> -  */
> - if (!mutex_trylock(_mutex))
> - return -EBUSY;
> -
>   result = do_kexec_load(entry, nr_segments, ksegments, flags);
>  
> - mutex_unlock(_mutex);
> -
>   return result;
>  }
>  #endif


[PATCH v5 1/6] kexec: move locking into do_kexec_load

2021-07-27 Thread Arnd Bergmann
From: Arnd Bergmann 

The locking is the same between the native and compat version of
sys_kexec_load(), so it can be done in the common implementation
to reduce duplication.

Co-developed-by: Eric Biederman 
Co-developed-by: Christoph Hellwig 
Signed-off-by: Arnd Bergmann 
---
 kernel/kexec.c | 44 
 1 file changed, 16 insertions(+), 28 deletions(-)

diff --git a/kernel/kexec.c b/kernel/kexec.c
index c82c6c06f051..9c7aef8f4bb6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -110,6 +110,17 @@ static int do_kexec_load(unsigned long entry, unsigned 
long nr_segments,
unsigned long i;
int ret;
 
+   /*
+* Because we write directly to the reserved memory region when loading
+* crash kernels we need a mutex here to prevent multiple crash kernels
+* from attempting to load simultaneously, and to prevent a crash kernel
+* from loading over the top of a in use crash kernel.
+*
+* KISS: always take the mutex.
+*/
+   if (!mutex_trylock(_mutex))
+   return -EBUSY;
+
if (flags & KEXEC_ON_CRASH) {
dest_image = _crash_image;
if (kexec_crash_image)
@@ -121,7 +132,8 @@ static int do_kexec_load(unsigned long entry, unsigned long 
nr_segments,
if (nr_segments == 0) {
/* Uninstall image */
kimage_free(xchg(dest_image, NULL));
-   return 0;
+   ret = 0;
+   goto out_unlock;
}
if (flags & KEXEC_ON_CRASH) {
/*
@@ -134,7 +146,7 @@ static int do_kexec_load(unsigned long entry, unsigned long 
nr_segments,
 
ret = kimage_alloc_init(, entry, nr_segments, segments, flags);
if (ret)
-   return ret;
+   goto out_unlock;
 
if (flags & KEXEC_PRESERVE_CONTEXT)
image->preserve_context = 1;
@@ -171,6 +183,8 @@ static int do_kexec_load(unsigned long entry, unsigned long 
nr_segments,
arch_kexec_protect_crashkres();
 
kimage_free(image);
+out_unlock:
+   mutex_unlock(_mutex);
return ret;
 }
 
@@ -247,21 +261,8 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned 
long, nr_segments,
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
return -EINVAL;
 
-   /* Because we write directly to the reserved memory
-* region when loading crash kernels we need a mutex here to
-* prevent multiple crash  kernels from attempting to load
-* simultaneously, and to prevent a crash kernel from loading
-* over the top of a in use crash kernel.
-*
-* KISS: always take the mutex.
-*/
-   if (!mutex_trylock(_mutex))
-   return -EBUSY;
-
result = do_kexec_load(entry, nr_segments, segments, flags);
 
-   mutex_unlock(_mutex);
-
return result;
 }
 
@@ -301,21 +302,8 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
return -EFAULT;
}
 
-   /* Because we write directly to the reserved memory
-* region when loading crash kernels we need a mutex here to
-* prevent multiple crash  kernels from attempting to load
-* simultaneously, and to prevent a crash kernel from loading
-* over the top of a in use crash kernel.
-*
-* KISS: always take the mutex.
-*/
-   if (!mutex_trylock(_mutex))
-   return -EBUSY;
-
result = do_kexec_load(entry, nr_segments, ksegments, flags);
 
-   mutex_unlock(_mutex);
-
return result;
 }
 #endif
-- 
2.29.2