[PATCH v7 5/7] mm, hmm: Use devm semantics for hmm_devmem_{add, remove}

2018-10-12 Thread Dan Williams
devm semantics arrange for resources to be torn down when
device-driver-probe fails or when device-driver-release completes.
Similar to devm_memremap_pages() there is no need to support an explicit
remove operation when the users properly adhere to devm semantics.

Note that devm_kzalloc() automatically handles allocating node-local
memory.

Reviewed-by: Christoph Hellwig 
Reviewed-by: Jérôme Glisse 
Cc: "Jérôme Glisse" 
Cc: Logan Gunthorpe 
Signed-off-by: Dan Williams 
---
 include/linux/hmm.h |4 --
 mm/hmm.c|  127 ++-
 2 files changed, 25 insertions(+), 106 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index dde947083d4e..5888ae9f6abf 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -499,8 +499,7 @@ struct hmm_devmem {
  * enough and allocate struct page for it.
  *
  * The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct. The device driver must call hmm_devmem_remove() before the
- * device goes away and before freeing the hmm_devmem struct memory.
+ * driver struct.
  */
 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
  struct device *device,
@@ -508,7 +507,6 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
   struct device *device,
   struct resource *res);
-void hmm_devmem_remove(struct hmm_devmem *devmem);
 
 /*
  * hmm_devmem_page_set_drvdata - set per-page driver data field
diff --git a/mm/hmm.c b/mm/hmm.c
index 774d684fa2b4..60e4b275ad78 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -939,7 +939,6 @@ static void hmm_devmem_ref_exit(void *data)
 
devmem = container_of(ref, struct hmm_devmem, ref);
percpu_ref_exit(ref);
-   devm_remove_action(devmem->device, _devmem_ref_exit, data);
 }
 
 static void hmm_devmem_ref_kill(void *data)
@@ -950,7 +949,6 @@ static void hmm_devmem_ref_kill(void *data)
devmem = container_of(ref, struct hmm_devmem, ref);
percpu_ref_kill(ref);
wait_for_completion(>completion);
-   devm_remove_action(devmem->device, _devmem_ref_kill, data);
 }
 
 static int hmm_devmem_fault(struct vm_area_struct *vma,
@@ -988,7 +986,7 @@ static void hmm_devmem_radix_release(struct resource 
*resource)
mutex_unlock(_devmem_lock);
 }
 
-static void hmm_devmem_release(struct device *dev, void *data)
+static void hmm_devmem_release(void *data)
 {
struct hmm_devmem *devmem = data;
struct resource *resource = devmem->resource;
@@ -996,11 +994,6 @@ static void hmm_devmem_release(struct device *dev, void 
*data)
struct zone *zone;
struct page *page;
 
-   if (percpu_ref_tryget_live(>ref)) {
-   dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-   percpu_ref_put(>ref);
-   }
-
/* pages are dead and unused, undo the arch mapping */
start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
@@ -1126,19 +1119,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem 
*devmem)
return ret;
 }
 
-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
-{
-   struct hmm_devmem *devmem = data;
-
-   return devmem->resource == match_data;
-}
-
-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
-{
-   devres_release(devmem->device, _devmem_release,
-  _devmem_match, devmem->resource);
-}
-
 /*
  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
  *
@@ -1166,8 +1146,7 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 
dev_pagemap_get_ops();
 
-   devmem = devres_alloc_node(_devmem_release, sizeof(*devmem),
-  GFP_KERNEL, dev_to_node(device));
+   devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
if (!devmem)
return ERR_PTR(-ENOMEM);
 
@@ -1181,11 +1160,11 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
ret = percpu_ref_init(>ref, _devmem_ref_release,
  0, GFP_KERNEL);
if (ret)
-   goto error_percpu_ref;
+   return ERR_PTR(ret);
 
-   ret = devm_add_action(device, hmm_devmem_ref_exit, >ref);
+   ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 
>ref);
if (ret)
-   goto error_devm_add_action;
+   return ERR_PTR(ret);
 
size = ALIGN(size, PA_SECTION_SIZE);
addr = min((unsigned long)iomem_resource.end,
@@ -1205,16 +1184,12 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 
devmem->resource = devm_request_mem_region(device, addr, size,
 

[PATCH v7 5/7] mm, hmm: Use devm semantics for hmm_devmem_{add, remove}

2018-10-12 Thread Dan Williams
devm semantics arrange for resources to be torn down when
device-driver-probe fails or when device-driver-release completes.
Similar to devm_memremap_pages() there is no need to support an explicit
remove operation when the users properly adhere to devm semantics.

Note that devm_kzalloc() automatically handles allocating node-local
memory.

Reviewed-by: Christoph Hellwig 
Reviewed-by: Jérôme Glisse 
Cc: "Jérôme Glisse" 
Cc: Logan Gunthorpe 
Signed-off-by: Dan Williams 
---
 include/linux/hmm.h |4 --
 mm/hmm.c|  127 ++-
 2 files changed, 25 insertions(+), 106 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index dde947083d4e..5888ae9f6abf 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -499,8 +499,7 @@ struct hmm_devmem {
  * enough and allocate struct page for it.
  *
  * The device driver can wrap the hmm_devmem struct inside a private device
- * driver struct. The device driver must call hmm_devmem_remove() before the
- * device goes away and before freeing the hmm_devmem struct memory.
+ * driver struct.
  */
 struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
  struct device *device,
@@ -508,7 +507,6 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
   struct device *device,
   struct resource *res);
-void hmm_devmem_remove(struct hmm_devmem *devmem);
 
 /*
  * hmm_devmem_page_set_drvdata - set per-page driver data field
diff --git a/mm/hmm.c b/mm/hmm.c
index 774d684fa2b4..60e4b275ad78 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -939,7 +939,6 @@ static void hmm_devmem_ref_exit(void *data)
 
devmem = container_of(ref, struct hmm_devmem, ref);
percpu_ref_exit(ref);
-   devm_remove_action(devmem->device, _devmem_ref_exit, data);
 }
 
 static void hmm_devmem_ref_kill(void *data)
@@ -950,7 +949,6 @@ static void hmm_devmem_ref_kill(void *data)
devmem = container_of(ref, struct hmm_devmem, ref);
percpu_ref_kill(ref);
wait_for_completion(>completion);
-   devm_remove_action(devmem->device, _devmem_ref_kill, data);
 }
 
 static int hmm_devmem_fault(struct vm_area_struct *vma,
@@ -988,7 +986,7 @@ static void hmm_devmem_radix_release(struct resource 
*resource)
mutex_unlock(_devmem_lock);
 }
 
-static void hmm_devmem_release(struct device *dev, void *data)
+static void hmm_devmem_release(void *data)
 {
struct hmm_devmem *devmem = data;
struct resource *resource = devmem->resource;
@@ -996,11 +994,6 @@ static void hmm_devmem_release(struct device *dev, void 
*data)
struct zone *zone;
struct page *page;
 
-   if (percpu_ref_tryget_live(>ref)) {
-   dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
-   percpu_ref_put(>ref);
-   }
-
/* pages are dead and unused, undo the arch mapping */
start_pfn = (resource->start & ~(PA_SECTION_SIZE - 1)) >> PAGE_SHIFT;
npages = ALIGN(resource_size(resource), PA_SECTION_SIZE) >> PAGE_SHIFT;
@@ -1126,19 +1119,6 @@ static int hmm_devmem_pages_create(struct hmm_devmem 
*devmem)
return ret;
 }
 
-static int hmm_devmem_match(struct device *dev, void *data, void *match_data)
-{
-   struct hmm_devmem *devmem = data;
-
-   return devmem->resource == match_data;
-}
-
-static void hmm_devmem_pages_remove(struct hmm_devmem *devmem)
-{
-   devres_release(devmem->device, _devmem_release,
-  _devmem_match, devmem->resource);
-}
-
 /*
  * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
  *
@@ -1166,8 +1146,7 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 
dev_pagemap_get_ops();
 
-   devmem = devres_alloc_node(_devmem_release, sizeof(*devmem),
-  GFP_KERNEL, dev_to_node(device));
+   devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
if (!devmem)
return ERR_PTR(-ENOMEM);
 
@@ -1181,11 +1160,11 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
ret = percpu_ref_init(>ref, _devmem_ref_release,
  0, GFP_KERNEL);
if (ret)
-   goto error_percpu_ref;
+   return ERR_PTR(ret);
 
-   ret = devm_add_action(device, hmm_devmem_ref_exit, >ref);
+   ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, 
>ref);
if (ret)
-   goto error_devm_add_action;
+   return ERR_PTR(ret);
 
size = ALIGN(size, PA_SECTION_SIZE);
addr = min((unsigned long)iomem_resource.end,
@@ -1205,16 +1184,12 @@ struct hmm_devmem *hmm_devmem_add(const struct 
hmm_devmem_ops *ops,
 
devmem->resource = devm_request_mem_region(device, addr, size,