During kexec_segment loading, when copying the content of the segment
(i.e. kexec_segment::kbuf or kexec_segment::buf) to its associated
pages, kimage_load_{cma,normal,crash}_segment handle the case where the
physical address of the segment is not page aligned, e.g. in
kimage_load_normal_segment:
```
        page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
        // ...
        ptr = kmap_local_page(page);
        // ...
        ptr += maddr & ~PAGE_MASK;
        mchunk = min_t(size_t, mbytes,
                PAGE_SIZE - (maddr & ~PAGE_MASK));
        // ^^^^ Non page-aligned segments handled here ^^^
        // ...
        if (image->file_mode)
                memcpy(ptr, kbuf, uchunk);
        else
                result = copy_from_user(ptr, buf, uchunk);
```
(similar logic is present in kimage_load_{cma,crash}_segment).

This is actually not needed because, prior to their loading, all
kexec_segments first go through a vetting step in
`sanity_check_segment_list`, which rejects any segment that is not
page-aligned:
```
        for (i = 0; i < nr_segments; i++) {
                unsigned long mstart, mend;
                mstart = image->segment[i].mem;
                mend   = mstart + image->segment[i].memsz;
                // ...
                if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
                        return -EADDRNOTAVAIL;
                // ...
        }
```
In case `sanity_check_segment_list` finds a non-page aligned the whole
kexec load is aborted and no segment is loaded.

This means that `kimage_load_{cma,normal,crash}_segment` never actually
have to handle non page-aligned segments and `(maddr & ~PAGE_MASK) == 0`
is always true no matter if the segment is coming from a file (i.e.
`kexec_file_load` syscall), from a user-space buffer (i.e. `kexec_load`
syscall) or created by the kernel through `kexec_add_buffer`. In the
latter case, `kexec_add_buffer` actually enforces the page alignment:
```
        /* Ensure minimum alignment needed for segments. */
        kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
        kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
```

Signed-off-by: Justinien Bouron <[email protected]>
Reviewed-by: Gunnar Kudrjavets <[email protected]>
---
Changes since v1:
        - Reworked commit message as requested by Baoquan He
          <[email protected]>
        - Removed accidental whitespace change
        - v1 Link: 
https://lore.kernel.org/lkml/[email protected]/

Changes since v2:
        - Removed unused variable in kimage_load_cma_segment() which was
          causing a warning and failing build with `make W=1`. Thanks
          Andy Shevchenko for finding this issue
        - v2 Link: 
https://lore.kernel.org/lkml/[email protected]/
---
 kernel/kexec_core.c | 15 +++------------
 1 file changed, 3 insertions(+), 12 deletions(-)

diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index fa00b239c5d9..5ed7a2383d5d 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -742,7 +742,6 @@ static int kimage_load_cma_segment(struct kimage *image, 
int idx)
        struct kexec_segment *segment = &image->segment[idx];
        struct page *cma = image->segment_cma[idx];
        char *ptr = page_address(cma);
-       unsigned long maddr;
        size_t ubytes, mbytes;
        int result = 0;
        unsigned char __user *buf = NULL;
@@ -754,15 +753,12 @@ static int kimage_load_cma_segment(struct kimage *image, 
int idx)
                buf = segment->buf;
        ubytes = segment->bufsz;
        mbytes = segment->memsz;
-       maddr = segment->mem;
 
        /* Then copy from source buffer to the CMA one */
        while (mbytes) {
                size_t uchunk, mchunk;
 
-               ptr += maddr & ~PAGE_MASK;
-               mchunk = min_t(size_t, mbytes,
-                               PAGE_SIZE - (maddr & ~PAGE_MASK));
+               mchunk = min_t(size_t, mbytes, PAGE_SIZE);
                uchunk = min(ubytes, mchunk);
 
                if (uchunk) {
@@ -784,7 +780,6 @@ static int kimage_load_cma_segment(struct kimage *image, 
int idx)
                }
 
                ptr    += mchunk;
-               maddr  += mchunk;
                mbytes -= mchunk;
 
                cond_resched();
@@ -839,9 +834,7 @@ static int kimage_load_normal_segment(struct kimage *image, 
int idx)
                ptr = kmap_local_page(page);
                /* Start with a clear page */
                clear_page(ptr);
-               ptr += maddr & ~PAGE_MASK;
-               mchunk = min_t(size_t, mbytes,
-                               PAGE_SIZE - (maddr & ~PAGE_MASK));
+               mchunk = min_t(size_t, mbytes, PAGE_SIZE);
                uchunk = min(ubytes, mchunk);
 
                if (uchunk) {
@@ -904,9 +897,7 @@ static int kimage_load_crash_segment(struct kimage *image, 
int idx)
                }
                arch_kexec_post_alloc_pages(page_address(page), 1, 0);
                ptr = kmap_local_page(page);
-               ptr += maddr & ~PAGE_MASK;
-               mchunk = min_t(size_t, mbytes,
-                               PAGE_SIZE - (maddr & ~PAGE_MASK));
+               mchunk = min_t(size_t, mbytes, PAGE_SIZE);
                uchunk = min(ubytes, mchunk);
                if (mchunk > uchunk) {
                        /* Zero the trailing part of the page */
-- 
2.43.0


Reply via email to