Re: [Xen-devel] [RFC XEN PATCH v3 06/39] acpi: probe valid PMEM regions via NFIT

2017-11-03 Thread Haozhong Zhang
On 11/03/17 14:15 +0800, Chao Peng wrote:
> 
> > +static void __init acpi_nfit_register_pmem(struct acpi_nfit_desc
> > *desc)
> > +{
> > +struct nfit_spa_desc *spa_desc;
> > +struct nfit_memdev_desc *memdev_desc;
> > +struct acpi_nfit_system_address *spa;
> > +unsigned long smfn, emfn;
> > +
> > +list_for_each_entry(memdev_desc, &desc->memdev_list, link)
> > +{
> > +spa_desc = memdev_desc->spa_desc;
> > +
> > +if ( !spa_desc ||
> > + (memdev_desc->acpi_table->flags &
> > +  (ACPI_NFIT_MEM_SAVE_FAILED |
> > ACPI_NFIT_MEM_RESTORE_FAILED |
> > +   ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_NOT_ARMED |
> > +   ACPI_NFIT_MEM_MAP_FAILED)) )
> > +continue;
> 
> If failure is detected, is it reasonable to continue? We can print some
> messages at least I think.

I got something wrong here. I should iterate SPA structures, and check
all memdev in each SPA range. If any memdev contains failure flags,
then skip the whole SPA range and print an error message.

Haozhong

> 
> Chao
> > +
> > +spa = spa_desc->acpi_table;
> > +if ( memcmp(spa->range_guid, nfit_spa_pmem_guid, 16) )
> > +continue;
> > +smfn = paddr_to_pfn(spa->address);
> > +emfn = paddr_to_pfn(spa->address + spa->length);
> > +printk(XENLOG_INFO "NFIT: PMEM MFNs 0x%lx - 0x%lx\n", smfn,
> > emfn);
> > +}
> > +}

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [RFC XEN PATCH v3 06/39] acpi: probe valid PMEM regions via NFIT

2017-11-02 Thread Chao Peng

> +static void __init acpi_nfit_register_pmem(struct acpi_nfit_desc
> *desc)
> +{
> +struct nfit_spa_desc *spa_desc;
> +struct nfit_memdev_desc *memdev_desc;
> +struct acpi_nfit_system_address *spa;
> +unsigned long smfn, emfn;
> +
> +list_for_each_entry(memdev_desc, &desc->memdev_list, link)
> +{
> +spa_desc = memdev_desc->spa_desc;
> +
> +if ( !spa_desc ||
> + (memdev_desc->acpi_table->flags &
> +  (ACPI_NFIT_MEM_SAVE_FAILED |
> ACPI_NFIT_MEM_RESTORE_FAILED |
> +   ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_NOT_ARMED |
> +   ACPI_NFIT_MEM_MAP_FAILED)) )
> +continue;

If failure is detected, is it reasonable to continue? We can print some
messages at least I think.

Chao
> +
> +spa = spa_desc->acpi_table;
> +if ( memcmp(spa->range_guid, nfit_spa_pmem_guid, 16) )
> +continue;
> +smfn = paddr_to_pfn(spa->address);
> +emfn = paddr_to_pfn(spa->address + spa->length);
> +printk(XENLOG_INFO "NFIT: PMEM MFNs 0x%lx - 0x%lx\n", smfn,
> emfn);
> +}
> +}

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [RFC XEN PATCH v3 06/39] acpi: probe valid PMEM regions via NFIT

2017-09-10 Thread Haozhong Zhang
A PMEM region with failures (e.g., not properly flushed in the last
power cycle, or some blocks within it are borken) cannot be safely
used by Xen and guest. Scan the state flags of NVDIMM region mapping
structures in NFIT to check whether any failures happened to a PMEM
region. The recovery of those failure are left out of Xen (e.g. left
to the firmware or other management utilities on the bare metal).

Signed-off-by: Haozhong Zhang 
---
Cc: Jan Beulich 
Cc: Andrew Cooper 
---
 xen/arch/x86/acpi/boot.c  |   4 ++
 xen/drivers/acpi/nfit.c   | 153 +-
 xen/include/acpi/actbl1.h |  26 
 xen/include/xen/acpi.h|   1 +
 4 files changed, 183 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/acpi/boot.c b/xen/arch/x86/acpi/boot.c
index 8e6c96dcf6..f52a2c6dc5 100644
--- a/xen/arch/x86/acpi/boot.c
+++ b/xen/arch/x86/acpi/boot.c
@@ -732,5 +732,9 @@ int __init acpi_boot_init(void)
 
acpi_table_parse(ACPI_SIG_BGRT, acpi_invalidate_bgrt);
 
+#ifdef CONFIG_NVDIMM_PMEM
+   acpi_nfit_init();
+#endif
+
return 0;
 }
diff --git a/xen/drivers/acpi/nfit.c b/xen/drivers/acpi/nfit.c
index e099378ee0..b88a587b8d 100644
--- a/xen/drivers/acpi/nfit.c
+++ b/xen/drivers/acpi/nfit.c
@@ -31,11 +31,143 @@ static const uint8_t nfit_spa_pmem_guid[] =
 0xac, 0x43, 0x0d, 0x33, 0x18, 0xb7, 0x8c, 0xdb,
 };
 
+struct nfit_spa_desc {
+struct list_head link;
+struct acpi_nfit_system_address *acpi_table;
+};
+
+struct nfit_memdev_desc {
+struct list_head link;
+struct acpi_nfit_memory_map *acpi_table;
+struct nfit_spa_desc *spa_desc;
+};
+
 struct acpi_nfit_desc {
 struct acpi_table_nfit *acpi_table;
+struct list_head spa_list;
+struct list_head memdev_list;
 };
 
-static struct acpi_nfit_desc nfit_desc;
+static struct acpi_nfit_desc nfit_desc = {
+.spa_list = LIST_HEAD_INIT(nfit_desc.spa_list),
+.memdev_list = LIST_HEAD_INIT(nfit_desc.memdev_list),
+};
+
+static void __init acpi_nfit_del_subtables(struct acpi_nfit_desc *desc)
+{
+struct nfit_spa_desc *spa, *spa_next;
+struct nfit_memdev_desc *memdev, *memdev_next;
+
+list_for_each_entry_safe(spa, spa_next, &desc->spa_list, link)
+{
+list_del(&spa->link);
+xfree(spa);
+}
+list_for_each_entry_safe (memdev, memdev_next, &desc->memdev_list, link)
+{
+list_del(&memdev->link);
+xfree(memdev);
+}
+}
+
+static int __init acpi_nfit_add_subtables(struct acpi_nfit_desc *desc)
+{
+struct acpi_table_nfit *nfit_table = desc->acpi_table;
+uint32_t hdr_offset = sizeof(*nfit_table);
+uint32_t nfit_length = nfit_table->header.length;
+struct acpi_nfit_header *hdr;
+struct nfit_spa_desc *spa_desc;
+struct nfit_memdev_desc *memdev_desc;
+int ret = 0;
+
+#define INIT_DESC(desc, acpi_hdr, acpi_type, desc_list) \
+do {\
+(desc) = xzalloc(typeof(*(desc)));  \
+if ( unlikely(!(desc)) ) {  \
+ret = -ENOMEM;  \
+goto nomem; \
+}   \
+(desc)->acpi_table = (acpi_type *)(acpi_hdr);   \
+INIT_LIST_HEAD(&(desc)->link);  \
+list_add_tail(&(desc)->link, (desc_list));  \
+} while ( 0 )
+
+while ( hdr_offset < nfit_length )
+{
+hdr = (void *)nfit_table + hdr_offset;
+hdr_offset += hdr->length;
+
+switch ( hdr->type )
+{
+case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
+INIT_DESC(spa_desc, hdr, struct acpi_nfit_system_address,
+  &desc->spa_list);
+break;
+
+case ACPI_NFIT_TYPE_MEMORY_MAP:
+INIT_DESC(memdev_desc, hdr, struct acpi_nfit_memory_map,
+  &desc->memdev_list);
+break;
+
+default:
+continue;
+}
+}
+
+#undef INIT_DESC
+
+return 0;
+
+ nomem:
+acpi_nfit_del_subtables(desc);
+
+return ret;
+}
+
+static void __init acpi_nfit_link_subtables(struct acpi_nfit_desc *desc)
+{
+struct nfit_spa_desc *spa_desc;
+struct nfit_memdev_desc *memdev_desc;
+uint16_t spa_idx;
+
+list_for_each_entry(memdev_desc, &desc->memdev_list, link)
+{
+spa_idx = memdev_desc->acpi_table->range_index;
+list_for_each_entry(spa_desc, &desc->spa_list, link)
+{
+if ( spa_desc->acpi_table->range_index == spa_idx )
+break;
+}
+memdev_desc->spa_desc = spa_desc;
+}
+}
+
+static void __init acpi_nfit_register_pmem(struct acpi_nfit_desc *desc)
+{
+struct nfit_spa_desc *spa_desc;
+struct nfit_memdev_desc *memdev_desc;
+struct acpi_nfit_system_address *spa;
+unsigned long smfn, emfn;
+
+list_for_each_entry(memdev_desc, &desc->memdev_list, link)
+{
+spa_desc = memdev_desc->spa_desc;
+