Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing

2019-07-24 Thread Keith Busch
On Thu, Jun 13, 2019 at 01:27:05PM -0700, Rafael J. Wysocki wrote:
> On Wednesday, May 15, 2019 11:54:43 PM CEST Keith Busch wrote:
> > Instead of registering the hmat cache attributes in line with parsing
> > the table, save the attributes in the memory target and register them
> > after parsing completes. This will make it easier to register the
> > attributes later when hot add is supported.
> > 
> > Signed-off-by: Keith Busch 
> > ---
> > v1 -> v2:
> > 
> >   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a 
> > level
> >   1 always existed (Brice).
> > 
> >  drivers/acpi/hmat/hmat.c | 70 
> > +---
> >  1 file changed, 55 insertions(+), 15 deletions(-)
> > 
> > diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> > index 96b7d39a97c6..bf23c9a27958 100644
> > --- a/drivers/acpi/hmat/hmat.c
> > +++ b/drivers/acpi/hmat/hmat.c
> > @@ -36,11 +36,17 @@ enum locality_types {
> >  
> >  static struct memory_locality *localities_types[4];
> >  
> > +struct target_cache {
> > +   struct list_head node;
> > +   struct node_cache_attrs cache_attrs;
> > +};
> > +
> >  struct memory_target {
> > struct list_head node;
> > unsigned int memory_pxm;
> > unsigned int processor_pxm;
> > struct node_hmem_attrs hmem_attrs;
> > +   struct list_head caches;
> >  };
> >  
> >  struct memory_initiator {
> > @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int 
> > mem_pxm)
> > target->memory_pxm = mem_pxm;
> > target->processor_pxm = PXM_INVAL;
> > list_add_tail(&target->node, &targets);
> > +   INIT_LIST_HEAD(&target->caches);
> >  }
> >  
> >  static __init const char *hmat_data_type(u8 type)
> > @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union 
> > acpi_subtable_headers *header,
> >const unsigned long end)
> >  {
> > struct acpi_hmat_cache *cache = (void *)header;
> > -   struct node_cache_attrs cache_attrs;
> > +   struct memory_target *target;
> > +   struct target_cache *tcache;
> > u32 attrs;
> >  
> > if (cache->header.length < sizeof(*cache)) {
> > @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union 
> > acpi_subtable_headers *header,
> > cache->memory_PD, cache->cache_size, attrs,
> > cache->number_of_SMBIOShandles);
> >  
> > -   cache_attrs.size = cache->cache_size;
> > -   cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> > -   cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> > +   target = find_mem_target(cache->memory_PD);
> > +   if (!target)
> > +   return 0;
> > +
> > +   tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> > +   if (!tcache) {
> > +   pr_notice_once("Failed to allocate HMAT cache info\n");
> > +   return 0;
> > +   }
> > +
> > +   tcache->cache_attrs.size = cache->cache_size;
> > +   tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> > +   tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 
> > 16;
> >  
> > switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
> > case ACPI_HMAT_CA_DIRECT_MAPPED:
> > -   cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> > +   tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> > break;
> > case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> > -   cache_attrs.indexing = NODE_CACHE_INDEXED;
> > +   tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
> > break;
> > case ACPI_HMAT_CA_NONE:
> > default:
> > -   cache_attrs.indexing = NODE_CACHE_OTHER;
> > +   tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
> > break;
> > }
> >  
> > switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
> > case ACPI_HMAT_CP_WB:
> > -   cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> > +   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> > break;
> > case ACPI_HMAT_CP_WT:
> > -   cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> > +   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> > break;
> > case ACPI_HMAT_CP_NONE:
> > default:
> > -   cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> > +   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> > break;
> > }
> > +   list_add_tail(&tcache->node, &target->caches);
> >  
> > -   node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
> > return 0;
> >  }
> >  
> > @@ -577,20 +595,37 @@ static __init void 
> > hmat_register_target_initiators(struct memory_target *target)
> > }
> >  }
> >  
> > +static __init void hmat_register_target_cache(struct memory_target *target)
> > +{
> > +   unsigned mem_nid = pxm_to_node(target->memory_pxm);
> > +   struct target_cache *tcache;
> > +
> > +   list_for_each_entry(tcache, &target->caches, node)
> > +   node_add_cache(mem_n

Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing

2019-07-01 Thread Brice Goglin
Le 15/05/2019 à 23:54, Keith Busch a écrit :
> Instead of registering the hmat cache attributes in line with parsing
> the table, save the attributes in the memory target and register them
> after parsing completes. This will make it easier to register the
> attributes later when hot add is supported.
>
> Signed-off-by: Keith Busch 


Sorry for the delay, I finally manage these two patches, they work fine
(tested with a fake HMAT saying that a kmem-hotplug NVDIMM node is local
to 2 initiators among 4 total, and saying that there are 2 memory-side
caches in front of that NVDIMM node).

Tested-by: Brice Goglin 


> ---
> v1 -> v2:
>
>   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
>   1 always existed (Brice).
>
>  drivers/acpi/hmat/hmat.c | 70 
> +---
>  1 file changed, 55 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> index 96b7d39a97c6..bf23c9a27958 100644
> --- a/drivers/acpi/hmat/hmat.c
> +++ b/drivers/acpi/hmat/hmat.c
> @@ -36,11 +36,17 @@ enum locality_types {
>  
>  static struct memory_locality *localities_types[4];
>  
> +struct target_cache {
> + struct list_head node;
> + struct node_cache_attrs cache_attrs;
> +};
> +
>  struct memory_target {
>   struct list_head node;
>   unsigned int memory_pxm;
>   unsigned int processor_pxm;
>   struct node_hmem_attrs hmem_attrs;
> + struct list_head caches;
>  };
>  
>  struct memory_initiator {
> @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int 
> mem_pxm)
>   target->memory_pxm = mem_pxm;
>   target->processor_pxm = PXM_INVAL;
>   list_add_tail(&target->node, &targets);
> + INIT_LIST_HEAD(&target->caches);
>  }
>  
>  static __init const char *hmat_data_type(u8 type)
> @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union 
> acpi_subtable_headers *header,
>  const unsigned long end)
>  {
>   struct acpi_hmat_cache *cache = (void *)header;
> - struct node_cache_attrs cache_attrs;
> + struct memory_target *target;
> + struct target_cache *tcache;
>   u32 attrs;
>  
>   if (cache->header.length < sizeof(*cache)) {
> @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union 
> acpi_subtable_headers *header,
>   cache->memory_PD, cache->cache_size, attrs,
>   cache->number_of_SMBIOShandles);
>  
> - cache_attrs.size = cache->cache_size;
> - cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> - cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> + target = find_mem_target(cache->memory_PD);
> + if (!target)
> + return 0;
> +
> + tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> + if (!tcache) {
> + pr_notice_once("Failed to allocate HMAT cache info\n");
> + return 0;
> + }
> +
> + tcache->cache_attrs.size = cache->cache_size;
> + tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> + tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 
> 16;
>  
>   switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
>   case ACPI_HMAT_CA_DIRECT_MAPPED:
> - cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> + tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
>   break;
>   case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> - cache_attrs.indexing = NODE_CACHE_INDEXED;
> + tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
>   break;
>   case ACPI_HMAT_CA_NONE:
>   default:
> - cache_attrs.indexing = NODE_CACHE_OTHER;
> + tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
>   break;
>   }
>  
>   switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
>   case ACPI_HMAT_CP_WB:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
>   break;
>   case ACPI_HMAT_CP_WT:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
>   break;
>   case ACPI_HMAT_CP_NONE:
>   default:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
>   break;
>   }
> + list_add_tail(&tcache->node, &target->caches);
>  
> - node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
>   return 0;
>  }
>  
> @@ -577,20 +595,37 @@ static __init void 
> hmat_register_target_initiators(struct memory_target *target)
>   }
>  }
>  
> +static __init void hmat_register_target_cache(struct memory_target *target)
> +{
> + unsigned mem_nid = pxm_to_node(target->memory_pxm);
> + struct target_cache *tcache;
> +
> + list_for_each_e

Re: [PATCHv2 1/2] hmat: Register memory-side cache after parsing

2019-06-13 Thread Rafael J. Wysocki
On Wednesday, May 15, 2019 11:54:43 PM CEST Keith Busch wrote:
> Instead of registering the hmat cache attributes in line with parsing
> the table, save the attributes in the memory target and register them
> after parsing completes. This will make it easier to register the
> attributes later when hot add is supported.
> 
> Signed-off-by: Keith Busch 
> ---
> v1 -> v2:
> 
>   Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
>   1 always existed (Brice).
> 
>  drivers/acpi/hmat/hmat.c | 70 
> +---
>  1 file changed, 55 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
> index 96b7d39a97c6..bf23c9a27958 100644
> --- a/drivers/acpi/hmat/hmat.c
> +++ b/drivers/acpi/hmat/hmat.c
> @@ -36,11 +36,17 @@ enum locality_types {
>  
>  static struct memory_locality *localities_types[4];
>  
> +struct target_cache {
> + struct list_head node;
> + struct node_cache_attrs cache_attrs;
> +};
> +
>  struct memory_target {
>   struct list_head node;
>   unsigned int memory_pxm;
>   unsigned int processor_pxm;
>   struct node_hmem_attrs hmem_attrs;
> + struct list_head caches;
>  };
>  
>  struct memory_initiator {
> @@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int 
> mem_pxm)
>   target->memory_pxm = mem_pxm;
>   target->processor_pxm = PXM_INVAL;
>   list_add_tail(&target->node, &targets);
> + INIT_LIST_HEAD(&target->caches);
>  }
>  
>  static __init const char *hmat_data_type(u8 type)
> @@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union 
> acpi_subtable_headers *header,
>  const unsigned long end)
>  {
>   struct acpi_hmat_cache *cache = (void *)header;
> - struct node_cache_attrs cache_attrs;
> + struct memory_target *target;
> + struct target_cache *tcache;
>   u32 attrs;
>  
>   if (cache->header.length < sizeof(*cache)) {
> @@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union 
> acpi_subtable_headers *header,
>   cache->memory_PD, cache->cache_size, attrs,
>   cache->number_of_SMBIOShandles);
>  
> - cache_attrs.size = cache->cache_size;
> - cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> - cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
> + target = find_mem_target(cache->memory_PD);
> + if (!target)
> + return 0;
> +
> + tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
> + if (!tcache) {
> + pr_notice_once("Failed to allocate HMAT cache info\n");
> + return 0;
> + }
> +
> + tcache->cache_attrs.size = cache->cache_size;
> + tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
> + tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 
> 16;
>  
>   switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
>   case ACPI_HMAT_CA_DIRECT_MAPPED:
> - cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
> + tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
>   break;
>   case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
> - cache_attrs.indexing = NODE_CACHE_INDEXED;
> + tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
>   break;
>   case ACPI_HMAT_CA_NONE:
>   default:
> - cache_attrs.indexing = NODE_CACHE_OTHER;
> + tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
>   break;
>   }
>  
>   switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
>   case ACPI_HMAT_CP_WB:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
>   break;
>   case ACPI_HMAT_CP_WT:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
>   break;
>   case ACPI_HMAT_CP_NONE:
>   default:
> - cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
> + tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
>   break;
>   }
> + list_add_tail(&tcache->node, &target->caches);
>  
> - node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
>   return 0;
>  }
>  
> @@ -577,20 +595,37 @@ static __init void 
> hmat_register_target_initiators(struct memory_target *target)
>   }
>  }
>  
> +static __init void hmat_register_target_cache(struct memory_target *target)
> +{
> + unsigned mem_nid = pxm_to_node(target->memory_pxm);
> + struct target_cache *tcache;
> +
> + list_for_each_entry(tcache, &target->caches, node)
> + node_add_cache(mem_nid, &tcache->cache_attrs);
> +}
> +
>  static __init void hmat_register_target_perf(struct memory_target *target)
>  {
>   unsigned mem_nid = pxm_to_node(target->memory_pxm);
>   nod

[PATCHv2 1/2] hmat: Register memory-side cache after parsing

2019-05-15 Thread Keith Busch
Instead of registering the hmat cache attributes in line with parsing
the table, save the attributes in the memory target and register them
after parsing completes. This will make it easier to register the
attributes later when hot add is supported.

Signed-off-by: Keith Busch 
---
v1 -> v2:

  Fixed multi-level caches, and no caches. v1 incorrectly assumed only a level
  1 always existed (Brice).

 drivers/acpi/hmat/hmat.c | 70 +---
 1 file changed, 55 insertions(+), 15 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index 96b7d39a97c6..bf23c9a27958 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -36,11 +36,17 @@ enum locality_types {
 
 static struct memory_locality *localities_types[4];
 
+struct target_cache {
+   struct list_head node;
+   struct node_cache_attrs cache_attrs;
+};
+
 struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct node_hmem_attrs hmem_attrs;
+   struct list_head caches;
 };
 
 struct memory_initiator {
@@ -110,6 +116,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm)
target->memory_pxm = mem_pxm;
target->processor_pxm = PXM_INVAL;
list_add_tail(&target->node, &targets);
+   INIT_LIST_HEAD(&target->caches);
 }
 
 static __init const char *hmat_data_type(u8 type)
@@ -314,7 +321,8 @@ static __init int hmat_parse_cache(union 
acpi_subtable_headers *header,
   const unsigned long end)
 {
struct acpi_hmat_cache *cache = (void *)header;
-   struct node_cache_attrs cache_attrs;
+   struct memory_target *target;
+   struct target_cache *tcache;
u32 attrs;
 
if (cache->header.length < sizeof(*cache)) {
@@ -328,37 +336,47 @@ static __init int hmat_parse_cache(union 
acpi_subtable_headers *header,
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
 
-   cache_attrs.size = cache->cache_size;
-   cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
-   cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+   target = find_mem_target(cache->memory_PD);
+   if (!target)
+   return 0;
+
+   tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
+   if (!tcache) {
+   pr_notice_once("Failed to allocate HMAT cache info\n");
+   return 0;
+   }
+
+   tcache->cache_attrs.size = cache->cache_size;
+   tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+   tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 
16;
 
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
-   cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+   tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
-   cache_attrs.indexing = NODE_CACHE_INDEXED;
+   tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
break;
case ACPI_HMAT_CA_NONE:
default:
-   cache_attrs.indexing = NODE_CACHE_OTHER;
+   tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
break;
}
 
switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
case ACPI_HMAT_CP_WB:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
break;
case ACPI_HMAT_CP_WT:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
break;
case ACPI_HMAT_CP_NONE:
default:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+   tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
break;
}
+   list_add_tail(&tcache->node, &target->caches);
 
-   node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
return 0;
 }
 
@@ -577,20 +595,37 @@ static __init void hmat_register_target_initiators(struct 
memory_target *target)
}
 }
 
+static __init void hmat_register_target_cache(struct memory_target *target)
+{
+   unsigned mem_nid = pxm_to_node(target->memory_pxm);
+   struct target_cache *tcache;
+
+   list_for_each_entry(tcache, &target->caches, node)
+   node_add_cache(mem_nid, &tcache->cache_attrs);
+}
+
 static __init void hmat_register_target_perf(struct memory_target *target)
 {
unsigned mem_nid = pxm_to_node(target->memory_pxm);
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
 }
 
+static __init void hmat_register_target(struct memory_target *target)
+{
+   if (!node_online(pxm_to_node(target->memory_pxm)))

[PATCHv2 1/2] hmat: Register memory-side cache after parsing

2019-04-15 Thread Keith Busch
Instead of registering the hmat cache attributes in line with parsing
the table, save the attributes in the memory target and register them
after parsing completes. This will make it easier to register the
attributes later when hot add is supported.

Signed-off-by: Keith Busch 
---
 drivers/acpi/hmat/hmat.c | 48 +---
 1 file changed, 33 insertions(+), 15 deletions(-)

diff --git a/drivers/acpi/hmat/hmat.c b/drivers/acpi/hmat/hmat.c
index b7824a0309f7..bdb167c026ff 100644
--- a/drivers/acpi/hmat/hmat.c
+++ b/drivers/acpi/hmat/hmat.c
@@ -41,6 +41,7 @@ struct memory_target {
unsigned int memory_pxm;
unsigned int processor_pxm;
struct node_hmem_attrs hmem_attrs;
+   struct node_cache_attrs cache_attrs;
 };
 
 struct memory_initiator {
@@ -314,7 +315,7 @@ static __init int hmat_parse_cache(union 
acpi_subtable_headers *header,
   const unsigned long end)
 {
struct acpi_hmat_cache *cache = (void *)header;
-   struct node_cache_attrs cache_attrs;
+   struct memory_target *target;
u32 attrs;
 
if (cache->header.length < sizeof(*cache)) {
@@ -328,37 +329,40 @@ static __init int hmat_parse_cache(union 
acpi_subtable_headers *header,
cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles);
 
-   cache_attrs.size = cache->cache_size;
-   cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
-   cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
+   target = find_mem_target(cache->memory_PD);
+   if (!target)
+   return 0;
+
+   target->cache_attrs.size = cache->cache_size;
+   target->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
+   target->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 
16;
 
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
case ACPI_HMAT_CA_DIRECT_MAPPED:
-   cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
+   target->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
break;
case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
-   cache_attrs.indexing = NODE_CACHE_INDEXED;
+   target->cache_attrs.indexing = NODE_CACHE_INDEXED;
break;
case ACPI_HMAT_CA_NONE:
default:
-   cache_attrs.indexing = NODE_CACHE_OTHER;
+   target->cache_attrs.indexing = NODE_CACHE_OTHER;
break;
}
 
switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
case ACPI_HMAT_CP_WB:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
+   target->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
break;
case ACPI_HMAT_CP_WT:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
+   target->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
break;
case ACPI_HMAT_CP_NONE:
default:
-   cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
+   target->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
break;
}
 
-   node_add_cache(pxm_to_node(cache->memory_PD), &cache_attrs);
return 0;
 }
 
@@ -577,20 +581,34 @@ static __init void hmat_register_target_initiators(struct 
memory_target *target)
}
 }
 
+static __init void hmat_register_target_cache(struct memory_target *target)
+{
+   unsigned mem_nid = pxm_to_node(target->memory_pxm);
+   node_add_cache(mem_nid, &target->cache_attrs);
+}
+
 static __init void hmat_register_target_perf(struct memory_target *target)
 {
unsigned mem_nid = pxm_to_node(target->memory_pxm);
node_set_perf_attrs(mem_nid, &target->hmem_attrs, 0);
 }
 
+static __init void hmat_register_target(struct memory_target *target)
+{
+   if (!node_online(pxm_to_node(target->memory_pxm)))
+   return;
+
+   hmat_register_target_initiators(target);
+   hmat_register_target_cache(target);
+   hmat_register_target_perf(target);
+}
+
 static __init void hmat_register_targets(void)
 {
struct memory_target *target;
 
-   list_for_each_entry(target, &targets, node) {
-   hmat_register_target_initiators(target);
-   hmat_register_target_perf(target);
-   }
+   list_for_each_entry(target, &targets, node)
+   hmat_register_target(target);
 }
 
 static __init void hmat_free_structures(void)
-- 
2.14.4