On 04/15/25 at 07:10pm, steven chen wrote:
> From: Steven Chen <[email protected]>
 ^^^^^^
> 
> Implement kimage_map_segment() to enable IMA to map the measurement log 
> list to the kimage structure during the kexec 'load' stage. This function
> gathers the source pages within the specified address range, and maps them
> to a contiguous virtual address range.
> 
> This is a preparation for later usage.
> 
> Implement kimage_unmap_segment() for unmapping segments using vunmap().
> 
> From: Tushar Sugandhi <[email protected]>
  ^^^^^^
> Signed-off-by: Tushar Sugandhi <[email protected]>
  ^^^^^^^
> Cc: Eric Biederman <[email protected]>
> Cc: Baoquan He <[email protected]> 
> Cc: Vivek Goyal <[email protected]>
> Cc: Dave Young <[email protected]>
> Signed-off-by: steven chen <[email protected]>
  ^^^^^

The signing on this patch is a little confusing. I can't see who is the
real author, who is the co-author, between you and Tushar. You may need
to refer to Documentation/process/5.Posting.rst to make that clear.

> Acked-by: Baoquan He <[email protected]>
> ---
>  include/linux/kexec.h |  6 +++++
>  kernel/kexec_core.c   | 54 +++++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 60 insertions(+)
> 
> diff --git a/include/linux/kexec.h b/include/linux/kexec.h
> index f0e9f8eda7a3..7d6b12f8b8d0 100644
> --- a/include/linux/kexec.h
> +++ b/include/linux/kexec.h
> @@ -467,13 +467,19 @@ extern bool kexec_file_dbg_print;
>  #define kexec_dprintk(fmt, arg...) \
>          do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
>  
> +extern void *kimage_map_segment(struct kimage *image, unsigned long addr, 
> unsigned long size);
> +extern void kimage_unmap_segment(void *buffer);
>  #else /* !CONFIG_KEXEC_CORE */
>  struct pt_regs;
>  struct task_struct;
> +struct kimage;
>  static inline void __crash_kexec(struct pt_regs *regs) { }
>  static inline void crash_kexec(struct pt_regs *regs) { }
>  static inline int kexec_should_crash(struct task_struct *p) { return 0; }
>  static inline int kexec_crash_loaded(void) { return 0; }
> +static inline void *kimage_map_segment(struct kimage *image, unsigned long 
> addr, unsigned long size)
> +{ return NULL; }
> +static inline void kimage_unmap_segment(void *buffer) { }
>  #define kexec_in_progress false
>  #endif /* CONFIG_KEXEC_CORE */
>  
> diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
> index c0bdc1686154..a5e378e1dc7f 100644
> --- a/kernel/kexec_core.c
> +++ b/kernel/kexec_core.c
> @@ -867,6 +867,60 @@ int kimage_load_segment(struct kimage *image,
>       return result;
>  }
>  
> +void *kimage_map_segment(struct kimage *image,
> +                      unsigned long addr, unsigned long size)
> +{
> +     unsigned long src_page_addr, dest_page_addr = 0;
> +     unsigned long eaddr = addr + size;
> +     kimage_entry_t *ptr, entry;
> +     struct page **src_pages;
> +     unsigned int npages;
> +     void *vaddr = NULL;
> +     int i;
> +
> +     /*
> +      * Collect the source pages and map them in a contiguous VA range.
> +      */
> +     npages = PFN_UP(eaddr) - PFN_DOWN(addr);
> +     src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
> +     if (!src_pages) {
> +             pr_err("Could not allocate ima pages array.\n");
> +             return NULL;
> +     }
> +
> +     i = 0;
> +     for_each_kimage_entry(image, ptr, entry) {
> +             if (entry & IND_DESTINATION) {
> +                     dest_page_addr = entry & PAGE_MASK;
> +             } else if (entry & IND_SOURCE) {
> +                     if (dest_page_addr >= addr && dest_page_addr < eaddr) {
> +                             src_page_addr = entry & PAGE_MASK;
> +                             src_pages[i++] =
> +                                     virt_to_page(__va(src_page_addr));
> +                             if (i == npages)
> +                                     break;
> +                             dest_page_addr += PAGE_SIZE;
> +                     }
> +             }
> +     }
> +
> +     /* Sanity check. */
> +     WARN_ON(i < npages);
> +
> +     vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
> +     kfree(src_pages);
> +
> +     if (!vaddr)
> +             pr_err("Could not map ima buffer.\n");
> +
> +     return vaddr;
> +}
> +
> +void kimage_unmap_segment(void *segment_buffer)
> +{
> +     vunmap(segment_buffer);
> +}
> +
>  struct kexec_load_limit {
>       /* Mutex protects the limit count. */
>       struct mutex mutex;
> -- 
> 2.43.0
> 


Reply via email to