The current implementation relies on accessing region->pfns directly
within the pfn processing chain, making it difficult to use these
handlers with alternative pfn sources. This tight coupling limits
flexibility when processing pfns from different locations, such as
temporary arrays or external sources.

By threading the pfns pointer through the entire processing chain
(mshv_region_process_range, mshv_region_chunk_size, and all
handlers), we decouple the processing logic from the storage location.
This enables future enhancements like processing pfns from multiple
sources or implementing more sophisticated memory management strategies
without duplicating the core processing logic.

No functional change intended.

Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 drivers/hv/mshv_regions.c |   50 +++++++++++++++++++++++++++++----------------
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
index cb42ee49c2e2f..87204b2b48290 100644
--- a/drivers/hv/mshv_regions.c
+++ b/drivers/hv/mshv_regions.c
@@ -31,6 +31,10 @@
 typedef int (*gfn_handler_t)(struct mshv_region *region,
                              u64 gfn, u64 count, u32 flags);
 
+typedef int (*pfn_handler_t)(struct mshv_region *region, u32 flags,
+                            u64 pfn_offset, u64 pfn_count,
+                            unsigned long *pfns, bool huge_page);
+
 static const struct mmu_interval_notifier_ops mshv_region_mni_ops;
 
 static inline bool mshv_pfn_valid(unsigned long pfn)
@@ -98,6 +102,7 @@ static int mshv_chunk_stride(unsigned long pfn, u64 gfn, u64 
pfn_count)
  * @region    : Memory region whose PFN array is being walked.
  * @pfn_offset: Offset into region->mreg_pfns at which to start.
  * @pfn_count : Upper bound on the run length.
+ * @pfns      : Pointer to an array of PFNs corresponding to the region.
  * @huge_page : Out-parameter set to true if the run may be dispatched
  *              as a 2 MiB chunk; false for 4 KiB-stride dispatch.
  *
@@ -114,12 +119,13 @@ static int mshv_chunk_stride(unsigned long pfn, u64 gfn, 
u64 pfn_count)
  */
 static long mshv_region_chunk_size(struct mshv_region *region,
                                   u64 pfn_offset, u64 pfn_count,
-                                  bool *huge_page)
+                                  unsigned long *pfns, bool *huge_page)
 {
-       unsigned long *pfns = region->mreg_pfns + pfn_offset;
        u64 gfn = region->start_gfn + pfn_offset;
        u64 count = 0, stride;
 
+       pfns += pfn_offset;
+
        if (!mshv_pfn_valid(pfns[0])) {
                for (count = 1; count < pfn_count; count++) {
                        if (mshv_pfn_valid(pfns[count]))
@@ -158,6 +164,7 @@ static long mshv_region_chunk_size(struct mshv_region 
*region,
  * @flags     : Flags to pass to the handler.
  * @pfn_offset: Offset into the region's PFNs array to start processing.
  * @pfn_count : Number of PFNs to process.
+ * @pfns      : Pointer to an array of PFNs corresponding to the region.
  * @handler   : Callback function to handle each chunk of contiguous
  *              valid PFNs.
  *
@@ -173,11 +180,8 @@ static long mshv_region_chunk_size(struct mshv_region 
*region,
 static int mshv_region_process_range(struct mshv_region *region,
                                     u32 flags,
                                     u64 pfn_offset, u64 pfn_count,
-                                    int (*handler)(struct mshv_region *region,
-                                                   u32 flags,
-                                                   u64 pfn_offset,
-                                                   u64 pfn_count,
-                                                   bool huge_page))
+                                    unsigned long *pfns,
+                                    pfn_handler_t handler)
 {
        u64 end;
        long ret;
@@ -193,11 +197,12 @@ static int mshv_region_process_range(struct mshv_region 
*region,
                long count;
 
                count = mshv_region_chunk_size(region, pfn_offset, pfn_count,
-                                              &huge_page);
+                                              pfns, &huge_page);
                if (count < 0)
                        return count;
 
-               ret = handler(region, flags, pfn_offset, count, huge_page);
+               ret = handler(region, flags, pfn_offset, count, pfns,
+                             huge_page);
                if (ret < 0)
                        return ret;
 
@@ -266,16 +271,17 @@ struct mshv_region *mshv_region_create(struct 
mshv_partition *partition,
 static int mshv_region_chunk_share(struct mshv_region *region,
                                   u32 flags,
                                   u64 pfn_offset, u64 pfn_count,
+                                  unsigned long *pfns,
                                   bool huge_page)
 {
-       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[pfn_offset]))
                return -EINVAL;
 
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
-                                             region->mreg_pfns + pfn_offset,
+                                             pfns + pfn_offset,
                                              pfn_count,
                                              HV_MAP_GPA_READABLE |
                                              HV_MAP_GPA_WRITABLE,
@@ -288,22 +294,24 @@ static int mshv_region_share(struct mshv_region *region)
 
        return mshv_region_process_range(region, flags,
                                         0, region->nr_pfns,
+                                        region->mreg_pfns,
                                         mshv_region_chunk_share);
 }
 
 static int mshv_region_chunk_unshare(struct mshv_region *region,
                                     u32 flags,
                                     u64 pfn_offset, u64 pfn_count,
+                                    unsigned long *pfns,
                                     bool huge_page)
 {
-       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[pfn_offset]))
                return -EINVAL;
 
        if (huge_page)
                flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
 
        return hv_call_modify_spa_host_access(region->partition->pt_id,
-                                             region->mreg_pfns + pfn_offset,
+                                             pfns + pfn_offset,
                                              pfn_count, 0,
                                              flags, false);
 }
@@ -314,12 +322,14 @@ static int mshv_region_unshare(struct mshv_region *region)
 
        return mshv_region_process_range(region, flags,
                                         0, region->nr_pfns,
+                                        region->mreg_pfns,
                                         mshv_region_chunk_unshare);
 }
 
 static int mshv_region_chunk_remap(struct mshv_region *region,
                                   u32 flags,
                                   u64 pfn_offset, u64 pfn_count,
+                                  unsigned long *pfns,
                                   bool huge_page)
 {
        /*
@@ -327,7 +337,7 @@ static int mshv_region_chunk_remap(struct mshv_region 
*region,
         * hypervisor track dirty pages, enabling precopy live
         * migration.
         */
-       if (!mshv_pfn_valid(region->mreg_pfns[pfn_offset]))
+       if (!mshv_pfn_valid(pfns[pfn_offset]))
                flags = HV_MAP_GPA_NO_ACCESS;
 
        if (huge_page)
@@ -336,15 +346,17 @@ static int mshv_region_chunk_remap(struct mshv_region 
*region,
        return hv_call_map_ram_pfns(region->partition->pt_id,
                                    region->start_gfn + pfn_offset,
                                    pfn_count, flags,
-                                   region->mreg_pfns + pfn_offset);
+                                   pfns + pfn_offset);
 }
 
 static int mshv_region_remap_pfns(struct mshv_region *region,
                                  u32 map_flags,
-                                 u64 pfn_offset, u64 pfn_count)
+                                 u64 pfn_offset, u64 pfn_count,
+                                 unsigned long *pfns)
 {
        return mshv_region_process_range(region, map_flags,
                                         pfn_offset, pfn_count,
+                                        pfns,
                                         mshv_region_chunk_remap);
 }
 
@@ -353,7 +365,8 @@ static int mshv_region_map(struct mshv_region *region)
        u32 map_flags = region->hv_map_flags;
 
        return mshv_region_remap_pfns(region, map_flags,
-                                     0, region->nr_pfns);
+                                     0, region->nr_pfns,
+                                     region->mreg_pfns);
 }
 
 static void mshv_region_invalidate_pfns(struct mshv_region *region,
@@ -668,7 +681,8 @@ static int mshv_region_collect_and_map(struct mshv_region 
*region,
        }
 
        ret = mshv_region_remap_pfns(region, region->hv_map_flags,
-                                    pfn_offset, pfn_count);
+                                    pfn_offset, pfn_count,
+                                    region->mreg_pfns);
 
        mutex_unlock(&region->mreg_mutex);
 out:



Reply via email to