kexec: define functions to map and unmap segments

Implement kimage_map_segment() to enable IMA to map the measurement log
list to the kimage structure during the kexec 'load' stage. This function
gathers the source pages within the specified address range, and maps them
to a contiguous virtual address range.

This is a preparation for later usage.

Implement kimage_unmap_segment() for unmapping segments using vunmap().

Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Dave Young <dyoung@redhat.com>
Co-developed-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com>
Signed-off-by: Steven Chen <chenste@linux.microsoft.com>
Acked-by: Baoquan He <bhe@redhat.com>
Tested-by: Stefan Berger <stefanb@linux.ibm.com> # ppc64/kvm
Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
This commit is contained in:
Steven Chen
2025-04-21 15:25:09 -07:00
committed by Mimi Zohar
parent c95e1acb6d
commit 0091d9241e
2 changed files with 60 additions and 0 deletions

View File

@@ -474,13 +474,19 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
struct kimage;
static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
{ return NULL; }
static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */

View File

@@ -877,6 +877,60 @@ int kimage_load_segment(struct kimage *image,
return result;
}
void *kimage_map_segment(struct kimage *image,
unsigned long addr, unsigned long size)
{
unsigned long src_page_addr, dest_page_addr = 0;
unsigned long eaddr = addr + size;
kimage_entry_t *ptr, entry;
struct page **src_pages;
unsigned int npages;
void *vaddr = NULL;
int i;
/*
* Collect the source pages and map them in a contiguous VA range.
*/
npages = PFN_UP(eaddr) - PFN_DOWN(addr);
src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
if (!src_pages) {
pr_err("Could not allocate ima pages array.\n");
return NULL;
}
i = 0;
for_each_kimage_entry(image, ptr, entry) {
if (entry & IND_DESTINATION) {
dest_page_addr = entry & PAGE_MASK;
} else if (entry & IND_SOURCE) {
if (dest_page_addr >= addr && dest_page_addr < eaddr) {
src_page_addr = entry & PAGE_MASK;
src_pages[i++] =
virt_to_page(__va(src_page_addr));
if (i == npages)
break;
dest_page_addr += PAGE_SIZE;
}
}
}
/* Sanity check. */
WARN_ON(i < npages);
vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
kfree(src_pages);
if (!vaddr)
pr_err("Could not map ima buffer.\n");
return vaddr;
}
void kimage_unmap_segment(void *segment_buffer)
{
vunmap(segment_buffer);
}
struct kexec_load_limit {
/* Mutex protects the limit count. */
struct mutex mutex;