Re: [PATCH 1/2] x86/mm/ident_map: Add PUD level 1GB page support

2017-04-25 Thread Xunlei Pang
On 04/26/2017 at 03:49 AM, Yinghai Lu wrote:
> On Tue, Apr 25, 2017 at 2:13 AM, Xunlei Pang  wrote:
>> The current kernel_ident_mapping_init() creates the identity
>> mapping using 2MB page(PMD level), this patch adds the 1GB
>> page(PUD level) support.
>>
>> This is useful on large machines to save some reserved memory
>> (as paging structures) in the kdump case when kexec setups up
>> identity mappings before booting into the new kernel.
>>
>> We will utilize this new support in the following patch.
>>
>> Signed-off-by: Xunlei Pang 
>> ---
>>  arch/x86/boot/compressed/pagetable.c |  2 +-
>>  arch/x86/include/asm/init.h  |  3 ++-
>>  arch/x86/kernel/machine_kexec_64.c   |  2 +-
>>  arch/x86/mm/ident_map.c  | 13 -
>>  arch/x86/power/hibernate_64.c|  2 +-
>>  5 files changed, 17 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/x86/boot/compressed/pagetable.c 
>> b/arch/x86/boot/compressed/pagetable.c
>> index 56589d0..1d78f17 100644
>> --- a/arch/x86/boot/compressed/pagetable.c
>> +++ b/arch/x86/boot/compressed/pagetable.c
>> @@ -70,7 +70,7 @@ static void *alloc_pgt_page(void *context)
>>   * Due to relocation, pointers must be assigned at run time not build time.
>>   */
>>  static struct x86_mapping_info mapping_info = {
>> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
>> +   .page_flag   = __PAGE_KERNEL_LARGE_EXEC,
>>  };
>>
>>  /* Locates and clears a region for a new top level page table. */
>> diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
>> index 737da62..46eab1a 100644
>> --- a/arch/x86/include/asm/init.h
>> +++ b/arch/x86/include/asm/init.h
>> @@ -4,8 +4,9 @@
>>  struct x86_mapping_info {
>> void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
>> void *context;   /* context for alloc_pgt_page */
>> -   unsigned long pmd_flag;  /* page flag for PMD entry */
>> +   unsigned long page_flag; /* page flag for PMD or PUD entry */
>> unsigned long offset;/* ident mapping offset */
>> +   bool use_pud_page;  /* PUD level 1GB page support */
> how about use direct_gbpages instead?
> use_pud_page is confusing.

ok

>
>>  };
>>
>>  int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t 
>> *pgd_page,
>> diff --git a/arch/x86/kernel/machine_kexec_64.c 
>> b/arch/x86/kernel/machine_kexec_64.c
>> index 085c3b3..1d4f2b0 100644
>> --- a/arch/x86/kernel/machine_kexec_64.c
>> +++ b/arch/x86/kernel/machine_kexec_64.c
>> @@ -113,7 +113,7 @@ static int init_pgtable(struct kimage *image, unsigned 
>> long start_pgtable)
>> struct x86_mapping_info info = {
>> .alloc_pgt_page = alloc_pgt_page,
>> .context= image,
>> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
>> +   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
>> };
>> unsigned long mstart, mend;
>> pgd_t *level4p;
>> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
>> index 04210a2..0ad0280 100644
>> --- a/arch/x86/mm/ident_map.c
>> +++ b/arch/x86/mm/ident_map.c
>> @@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, 
>> pmd_t *pmd_page,
>> if (pmd_present(*pmd))
>> continue;
>>
>> -   set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
>> +   set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
>> }
>>  }
>>
>> @@ -30,6 +30,17 @@ static int ident_pud_init(struct x86_mapping_info *info, 
>> pud_t *pud_page,
>> if (next > end)
>> next = end;
>>
>> +   if (info->use_pud_page) {
>> +   pud_t pudval;
>> +
>> +   if (pud_present(*pud))
>> +   continue;
>> +
>> +   pudval = __pud((addr - info->offset) | 
>> info->page_flag);
>> +   set_pud(pud, pudval);
> should mask addr with PUD_MASK.
>addr &= PUD_MASK;
>set_pud(pud, __pmd(addr - info->offset) | info->page_flag);

Yes, will update, thanks for the catch.

Regards,
Xunlei

>
>
>> +   continue;
>> +   }
>> +
>> if (pud_present(*pud)) {
>> pmd = pmd_offset(pud, 0);
>> ident_pmd_init(info, pmd, addr, next);
>> diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
>> index 6a61194..a6e21fe 100644
>> --- a/arch/x86/power/hibernate_64.c
>> +++ b/arch/x86/power/hibernate_64.c
>> @@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
>>  {
>> struct x86_mapping_info info = {
>> .alloc_pgt_page = alloc_pgt_page,
>> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
>> +   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
>> .offset  

[PATCH] memmap: Parse "Reserved" together with "reserved"

2017-04-25 Thread Yinghai Lu
For x86 with recent kernel after
 commit 640e1b38b0 ("x86/boot/e820: Basic cleanup of e820.c")
change "reserved" to "Reserved" in /sys firmware memmap and /proc/iomem.

So here, we add handling for that too.

Signed-off-by: Yinghai Lu 

---
 kexec/arch/i386/crashdump-x86.c |2 ++
 kexec/arch/ia64/kexec-ia64.c|2 ++
 kexec/arch/mips/kexec-mips.c|2 ++
 kexec/firmware_memmap.c |2 ++
 4 files changed, 8 insertions(+)

Index: kexec-tools/kexec/arch/i386/crashdump-x86.c
===
--- kexec-tools.orig/kexec/arch/i386/crashdump-x86.c
+++ kexec-tools/kexec/arch/i386/crashdump-x86.c
@@ -323,6 +323,8 @@ static int get_crash_memory_ranges(struc
type = RANGE_PMEM;
} else if(memcmp(str,"reserved\n",9) == 0 ) {
type = RANGE_RESERVED;
+   } else if(memcmp(str,"Reserved\n",9) == 0 ) {
+   type = RANGE_RESERVED;
} else if (memcmp(str, "GART\n", 5) == 0) {
gart_start = start;
gart_end = end;
Index: kexec-tools/kexec/arch/ia64/kexec-ia64.c
===
--- kexec-tools.orig/kexec/arch/ia64/kexec-ia64.c
+++ kexec-tools/kexec/arch/ia64/kexec-ia64.c
@@ -117,6 +117,8 @@ int get_memory_ranges(struct memory_rang
}
else if (memcmp(str, "reserved\n", 9) == 0) {
type = RANGE_RESERVED;
+   else if (memcmp(str, "Reserved\n", 9) == 0) {
+   type = RANGE_RESERVED;
}
else if (memcmp(str, "Crash kernel\n", 13) == 0) {
/* Redefine the memory region boundaries if kernel
Index: kexec-tools/kexec/arch/mips/kexec-mips.c
===
--- kexec-tools.orig/kexec/arch/mips/kexec-mips.c
+++ kexec-tools/kexec/arch/mips/kexec-mips.c
@@ -57,6 +57,8 @@ int get_memory_ranges(struct memory_rang
type = RANGE_RAM;
} else if (memcmp(str, "reserved\n", 9) == 0) {
type = RANGE_RESERVED;
+   } else if (memcmp(str, "Reserved\n", 9) == 0) {
+   type = RANGE_RESERVED;
} else {
continue;
}
Index: kexec-tools/kexec/firmware_memmap.c
===
--- kexec-tools.orig/kexec/firmware_memmap.c
+++ kexec-tools/kexec/firmware_memmap.c
@@ -164,6 +164,8 @@ static int parse_memmap_entry(const char
range->type = RANGE_RESERVED;
else if (strcmp(type, "reserved") == 0)
range->type = RANGE_RESERVED;
+   else if (strcmp(type, "Reserved") == 0)
+   range->type = RANGE_RESERVED;
else if (strcmp(type, "ACPI Non-volatile Storage") == 0)
range->type = RANGE_ACPI_NVS;
else if (strcmp(type, "Uncached RAM") == 0)

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [tip:x86/boot] x86/boot/e820: Basic cleanup of e820.c

2017-04-25 Thread Yinghai Lu
On Tue, Apr 11, 2017 at 12:37 AM, tip-bot for Ingo Molnar
 wrote:
> Commit-ID:  640e1b38b00550990cecd809021cd37716e45922
> Gitweb: http://git.kernel.org/tip/640e1b38b00550990cecd809021cd37716e45922
> Author: Ingo Molnar 
> AuthorDate: Sat, 28 Jan 2017 11:13:08 +0100
> Committer:  Ingo Molnar 
> CommitDate: Sat, 28 Jan 2017 14:42:27 +0100
>

> x86/boot/e820: Basic cleanup of e820.c

> @@ -951,49 +924,42 @@ void __init finish_e820_parsing(void)
>  static const char *__init e820_type_to_string(int e820_type)
>  {
> switch (e820_type) {
> -   case E820_RESERVED_KERN:
> -   case E820_RAM:  return "System RAM";
> -   case E820_ACPI: return "ACPI Tables";
> -   case E820_NVS:  return "ACPI Non-volatile Storage";
> -   case E820_UNUSABLE: return "Unusable memory";
> -   case E820_PRAM: return "Persistent Memory (legacy)";
> -   case E820_PMEM: return "Persistent Memory";
> -   default:return "reserved";
> +   case E820_RESERVED_KERN: /* Fall-through: */
> +   case E820_RAM:   return "System RAM";
> +   case E820_ACPI:  return "ACPI Tables";
> +   case E820_NVS:   return "ACPI Non-volatile Storage";
> +   case E820_UNUSABLE:  return "Unusable memory";
> +   case E820_PRAM:  return "Persistent Memory (legacy)";
> +   case E820_PMEM:  return "Persistent Memory";
> +   default: return "Reserved";
> }
>  }
>
...

Hi Ingo,

The reserved ==> Reserved change cause kexec warning.

Unknown type (Reserved) while parsing /sys/firmware/memmap/18/type.
Please report this as bug. Using RANGE_RESERVED now.
Unknown type (Reserved) while parsing /sys/firmware/memmap/16/type.
Please report this as bug. Using RANGE_RESERVED now.
Unknown type (Reserved) while parsing /sys/firmware/memmap/14/type.
Please report this as bug. Using RANGE_RESERVED now.
Unknown type (Reserved) while parsing /sys/firmware/memmap/22/type.
Please report this as bug. Using RANGE_RESERVED now.
Unknown type (Reserved) while parsing /sys/firmware/memmap/9/type.
Please report this as bug. Using RANGE_RESERVED now.
add_buffer: base:43fff6000 bufsz:80e0 memsz:a000
add_buffer: base:43fff1000 bufsz:44ce memsz:44ce
add_buffer: base:43c00 bufsz:f4c5c0 memsz:3581000
add_buffer: base:439d0d000 bufsz:22f2060 memsz:22f2060
add_buffer: base:43fff bufsz:70 memsz:70
add_buffer: base:43ffef000 bufsz:230 memsz:230
10:~/k # cat /sys/firmware/memmap/14/type
Reserved

also /proc/iomem have that changed too.

Yinghai

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


Re: [PATCH 1/2] x86/mm/ident_map: Add PUD level 1GB page support

2017-04-25 Thread Yinghai Lu
On Tue, Apr 25, 2017 at 2:13 AM, Xunlei Pang  wrote:
> The current kernel_ident_mapping_init() creates the identity
> mapping using 2MB page(PMD level), this patch adds the 1GB
> page(PUD level) support.
>
> This is useful on large machines to save some reserved memory
> (as paging structures) in the kdump case when kexec setups up
> identity mappings before booting into the new kernel.
>
> We will utilize this new support in the following patch.
>
> Signed-off-by: Xunlei Pang 
> ---
>  arch/x86/boot/compressed/pagetable.c |  2 +-
>  arch/x86/include/asm/init.h  |  3 ++-
>  arch/x86/kernel/machine_kexec_64.c   |  2 +-
>  arch/x86/mm/ident_map.c  | 13 -
>  arch/x86/power/hibernate_64.c|  2 +-
>  5 files changed, 17 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/boot/compressed/pagetable.c 
> b/arch/x86/boot/compressed/pagetable.c
> index 56589d0..1d78f17 100644
> --- a/arch/x86/boot/compressed/pagetable.c
> +++ b/arch/x86/boot/compressed/pagetable.c
> @@ -70,7 +70,7 @@ static void *alloc_pgt_page(void *context)
>   * Due to relocation, pointers must be assigned at run time not build time.
>   */
>  static struct x86_mapping_info mapping_info = {
> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
> +   .page_flag   = __PAGE_KERNEL_LARGE_EXEC,
>  };
>
>  /* Locates and clears a region for a new top level page table. */
> diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
> index 737da62..46eab1a 100644
> --- a/arch/x86/include/asm/init.h
> +++ b/arch/x86/include/asm/init.h
> @@ -4,8 +4,9 @@
>  struct x86_mapping_info {
> void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
> void *context;   /* context for alloc_pgt_page */
> -   unsigned long pmd_flag;  /* page flag for PMD entry */
> +   unsigned long page_flag; /* page flag for PMD or PUD entry */
> unsigned long offset;/* ident mapping offset */
> +   bool use_pud_page;  /* PUD level 1GB page support */

how about use direct_gbpages instead?
use_pud_page is confusing.

>  };
>
>  int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
> diff --git a/arch/x86/kernel/machine_kexec_64.c 
> b/arch/x86/kernel/machine_kexec_64.c
> index 085c3b3..1d4f2b0 100644
> --- a/arch/x86/kernel/machine_kexec_64.c
> +++ b/arch/x86/kernel/machine_kexec_64.c
> @@ -113,7 +113,7 @@ static int init_pgtable(struct kimage *image, unsigned 
> long start_pgtable)
> struct x86_mapping_info info = {
> .alloc_pgt_page = alloc_pgt_page,
> .context= image,
> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
> +   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
> };
> unsigned long mstart, mend;
> pgd_t *level4p;
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index 04210a2..0ad0280 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, 
> pmd_t *pmd_page,
> if (pmd_present(*pmd))
> continue;
>
> -   set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
> +   set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
> }
>  }
>
> @@ -30,6 +30,17 @@ static int ident_pud_init(struct x86_mapping_info *info, 
> pud_t *pud_page,
> if (next > end)
> next = end;
>
> +   if (info->use_pud_page) {
> +   pud_t pudval;
> +
> +   if (pud_present(*pud))
> +   continue;
> +
> +   pudval = __pud((addr - info->offset) | 
> info->page_flag);
> +   set_pud(pud, pudval);

should mask addr with PUD_MASK.
   addr &= PUD_MASK;
   set_pud(pud, __pmd(addr - info->offset) | info->page_flag);



> +   continue;
> +   }
> +
> if (pud_present(*pud)) {
> pmd = pmd_offset(pud, 0);
> ident_pmd_init(info, pmd, addr, next);
> diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
> index 6a61194..a6e21fe 100644
> --- a/arch/x86/power/hibernate_64.c
> +++ b/arch/x86/power/hibernate_64.c
> @@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
>  {
> struct x86_mapping_info info = {
> .alloc_pgt_page = alloc_pgt_page,
> -   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
> +   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
> .offset = __PAGE_OFFSET,
> };
> unsigned long mstart, mend;
> --
> 1.8.3.1
>

___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 2/2] x86_64/kexec: Use PUD level 1GB page for identity mapping if available

2017-04-25 Thread Xunlei Pang
Kexec setups all identity mappings before booting into the new
kernel, and this will cause extra memory consumption for paging
structures which is quite considerable on modern machines with
huge memory.

E.g. On one 32TB machine, in kdump case, it could waste around
128MB (around 4MB/TB) from the reserved memory after kexec set
all the identity mappings using the current 2MB page, plus the
loaded kdump kernel, initramfs, etc, it caused kexec syscall
-NOMEM failure. As a result, we had to enlarge reserved memory
via "crashkernel=X".

This causes some trouble for distributions that use policies
to evaluate the proper "crashkernel=X" value for users.

Given that on machines with large number of memory, 1GB feature
is very likely available, and that kernel_ident_mapping_init()
supports PUD level 1GB page, to solve this problem, we use 1GB
size page to create the identity mapping pgtable for kdump if
1GB feature is available.

Signed-off-by: Xunlei Pang 
---
 arch/x86/kernel/machine_kexec_64.c | 5 +
 1 file changed, 5 insertions(+)

diff --git a/arch/x86/kernel/machine_kexec_64.c 
b/arch/x86/kernel/machine_kexec_64.c
index 1d4f2b0..41f1ae7 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -122,6 +122,11 @@ static int init_pgtable(struct kimage *image, unsigned 
long start_pgtable)
 
level4p = (pgd_t *)__va(start_pgtable);
clear_page(level4p);
+
+   /* Use PUD level page if available, to save crash memory for kdump */
+   if (direct_gbpages)
+   info.use_pud_page = true;
+
for (i = 0; i < nr_pfn_mapped; i++) {
mstart = pfn_mapped[i].start << PAGE_SHIFT;
mend   = pfn_mapped[i].end << PAGE_SHIFT;
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec


[PATCH 1/2] x86/mm/ident_map: Add PUD level 1GB page support

2017-04-25 Thread Xunlei Pang
The current kernel_ident_mapping_init() creates the identity
mapping using 2MB page(PMD level), this patch adds the 1GB
page(PUD level) support.

This is useful on large machines to save some reserved memory
(as paging structures) in the kdump case when kexec setups up
identity mappings before booting into the new kernel.

We will utilize this new support in the following patch.

Signed-off-by: Xunlei Pang 
---
 arch/x86/boot/compressed/pagetable.c |  2 +-
 arch/x86/include/asm/init.h  |  3 ++-
 arch/x86/kernel/machine_kexec_64.c   |  2 +-
 arch/x86/mm/ident_map.c  | 13 -
 arch/x86/power/hibernate_64.c|  2 +-
 5 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/arch/x86/boot/compressed/pagetable.c 
b/arch/x86/boot/compressed/pagetable.c
index 56589d0..1d78f17 100644
--- a/arch/x86/boot/compressed/pagetable.c
+++ b/arch/x86/boot/compressed/pagetable.c
@@ -70,7 +70,7 @@ static void *alloc_pgt_page(void *context)
  * Due to relocation, pointers must be assigned at run time not build time.
  */
 static struct x86_mapping_info mapping_info = {
-   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
+   .page_flag   = __PAGE_KERNEL_LARGE_EXEC,
 };
 
 /* Locates and clears a region for a new top level page table. */
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 737da62..46eab1a 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -4,8 +4,9 @@
 struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context;   /* context for alloc_pgt_page */
-   unsigned long pmd_flag;  /* page flag for PMD entry */
+   unsigned long page_flag; /* page flag for PMD or PUD entry */
unsigned long offset;/* ident mapping offset */
+   bool use_pud_page;  /* PUD level 1GB page support */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
diff --git a/arch/x86/kernel/machine_kexec_64.c 
b/arch/x86/kernel/machine_kexec_64.c
index 085c3b3..1d4f2b0 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -113,7 +113,7 @@ static int init_pgtable(struct kimage *image, unsigned long 
start_pgtable)
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
.context= image,
-   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
+   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
};
unsigned long mstart, mend;
pgd_t *level4p;
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 04210a2..0ad0280 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -13,7 +13,7 @@ static void ident_pmd_init(struct x86_mapping_info *info, 
pmd_t *pmd_page,
if (pmd_present(*pmd))
continue;
 
-   set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
+   set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
}
 }
 
@@ -30,6 +30,17 @@ static int ident_pud_init(struct x86_mapping_info *info, 
pud_t *pud_page,
if (next > end)
next = end;
 
+   if (info->use_pud_page) {
+   pud_t pudval;
+
+   if (pud_present(*pud))
+   continue;
+
+   pudval = __pud((addr - info->offset) | info->page_flag);
+   set_pud(pud, pudval);
+   continue;
+   }
+
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
ident_pmd_init(info, pmd, addr, next);
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 6a61194..a6e21fe 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -104,7 +104,7 @@ static int set_up_temporary_mappings(void)
 {
struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page,
-   .pmd_flag   = __PAGE_KERNEL_LARGE_EXEC,
+   .page_flag  = __PAGE_KERNEL_LARGE_EXEC,
.offset = __PAGE_OFFSET,
};
unsigned long mstart, mend;
-- 
1.8.3.1


___
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec