Now that the mm core supports section-unaligned hotplug of ZONE_DEVICE
memory, we no longer need to add padding at pfn/dax device creation
time. The kernel will still honor padding established by older kernels.

Cc: Toshi Kani <toshi.k...@hpe.com>
Signed-off-by: Dan Williams <dan.j.willi...@intel.com>
---
 drivers/nvdimm/pfn_devs.c |   40 ++++++----------------------------------
 1 file changed, 6 insertions(+), 34 deletions(-)

diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cea8350fbc7e..1cf0d73be9b3 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -557,7 +557,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
        u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
-       u32 start_pad = 0, end_trunc = 0;
        resource_size_t start, size;
        struct nd_namespace_io *nsio;
        struct nd_region *nd_region;
@@ -590,42 +589,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
                return -ENXIO;
        }
 
-       memset(pfn_sb, 0, sizeof(*pfn_sb));
-
-       /*
-        * Check if pmem collides with 'System RAM' when section aligned and
-        * trim it accordingly
-        */
-       nsio = to_nd_namespace_io(&ndns->dev);
-       start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
-       size = resource_size(&nsio->res);
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
-               start = nsio->res.start;
-               start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-       }
-
-       start = nsio->res.start;
-       size = PHYS_SECTION_ALIGN_UP(start + size) - start;
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
-               size = resource_size(&nsio->res);
-               end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + 
size);
-       }
-
-       if (start_pad + end_trunc)
-               dev_info(&nd_pfn->dev, "%s section collision, truncate %d 
bytes\n",
-                               dev_name(&ndns->dev), start_pad + end_trunc);
-
        /*
         * Note, we use 64 here for the standard size of struct page,
         * debugging options may cause it to be larger in which case the
         * implementation will limit the pfns advertised through
         * ->direct_access() to those that are included in the memmap.
         */
-       start += start_pad;
+       nsio = to_nd_namespace_io(&ndns->dev);
+       start = nsio->res.start;
        size = resource_size(&nsio->res);
-       npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+       npfns = (size - SZ_8K) / SZ_4K;
        if (nd_pfn->mode == PFN_MODE_PMEM) {
                unsigned long memmap_size;
 
@@ -642,13 +615,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        else
                return -ENXIO;
 
-       if (offset + start_pad + end_trunc >= size) {
+       if (offset >= size) {
                dev_err(&nd_pfn->dev, "%s unable to satisfy requested 
alignment\n",
                                dev_name(&ndns->dev));
                return -ENXIO;
        }
 
-       npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+       memset(pfn_sb, 0, sizeof(*pfn_sb));
+       npfns = (size - offset) / SZ_4K;
        pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
        pfn_sb->dataoff = cpu_to_le64(offset);
        pfn_sb->npfns = cpu_to_le64(npfns);
@@ -657,8 +631,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
        pfn_sb->version_major = cpu_to_le16(1);
        pfn_sb->version_minor = cpu_to_le16(2);
-       pfn_sb->start_pad = cpu_to_le32(start_pad);
-       pfn_sb->end_trunc = cpu_to_le32(end_trunc);
        pfn_sb->align = cpu_to_le32(nd_pfn->align);
        checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
        pfn_sb->checksum = cpu_to_le64(checksum);

Reply via email to