Prevent infinite fault loops when guests access memory regions without
proper permissions. Currently, mshv_handle_gpa_intercept() attempts to
remap pages for all faults on movable memory regions, regardless of
whether the access type is permitted. When a guest writes to a read-only
region, the remap succeeds but the region remains read-only, causing
immediate re-fault and spinning the vCPU indefinitely.

Validate intercept access type against region permissions before
attempting remaps. Reject writes to non-writable regions and executes to
non-executable regions early, returning false to let the VMM handle the
intercept appropriately.

This also closes a potential DoS vector where malicious guests could
intentionally trigger these fault loops to consume host resources.

Fixes: b9a66cd5ccbb ("mshv: Add support for movable memory regions")
Signed-off-by: Stanislav Kinsburskii <[email protected]>
---
 drivers/hv/mshv_root_main.c |   15 ++++++++++++---
 include/hyperv/hvgdk_mini.h |    6 ++++++
 include/hyperv/hvhdk.h      |    4 ++--
 3 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 9b0acd49c129..bb9fe4985e95 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -657,7 +657,7 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
 {
        struct mshv_partition *p = vp->vp_partition;
        struct mshv_mem_region *region;
-       bool ret;
+       bool ret = false;
        u64 gfn;
 #if defined(CONFIG_X86_64)
        struct hv_x64_memory_intercept_message *msg =
@@ -668,6 +668,8 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
                (struct hv_arm64_memory_intercept_message *)
                vp->vp_intercept_msg_page->u.payload;
 #endif
+       enum hv_intercept_access_type access_type =
+               msg->header.intercept_access_type;
 
        gfn = HVPFN_DOWN(msg->guest_physical_address);
 
@@ -675,12 +677,19 @@ static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
        if (!region)
                return false;
 
+       if (access_type == HV_INTERCEPT_ACCESS_WRITE &&
+           !(region->hv_map_flags & HV_MAP_GPA_WRITABLE))
+               goto put_region;
+
+       if (access_type == HV_INTERCEPT_ACCESS_EXECUTE &&
+           !(region->hv_map_flags & HV_MAP_GPA_EXECUTABLE))
+               goto put_region;
+
        /* Only movable memory ranges are supported for GPA intercepts */
        if (region->mreg_type == MSHV_REGION_TYPE_MEM_MOVABLE)
                ret = mshv_region_handle_gfn_fault(region, gfn);
-       else
-               ret = false;
 
+put_region:
        mshv_region_put(region);
 
        return ret;
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 056ef7b6b360..98b15539e467 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -1532,4 +1532,10 @@ struct hv_mmio_write_input {
        u8 data[HV_HYPERCALL_MMIO_MAX_DATA_LENGTH];
 } __packed;
 
+enum hv_intercept_access_type {
+       HV_INTERCEPT_ACCESS_READ        = 0,
+       HV_INTERCEPT_ACCESS_WRITE       = 1,
+       HV_INTERCEPT_ACCESS_EXECUTE     = 2
+};
+
 #endif /* _HV_HVGDK_MINI_H */
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
index 245f3db53bf1..5e83d3714966 100644
--- a/include/hyperv/hvhdk.h
+++ b/include/hyperv/hvhdk.h
@@ -779,7 +779,7 @@ struct hv_x64_intercept_message_header {
        u32 vp_index;
        u8 instruction_length:4;
        u8 cr8:4; /* Only set for exo partitions */
-       u8 intercept_access_type;
+       u8 intercept_access_type; /* enum hv_intercept_access_type */
        union hv_x64_vp_execution_state execution_state;
        struct hv_x64_segment_register cs_segment;
        u64 rip;
@@ -825,7 +825,7 @@ union hv_arm64_vp_execution_state {
 struct hv_arm64_intercept_message_header {
        u32 vp_index;
        u8 instruction_length;
-       u8 intercept_access_type;
+       u8 intercept_access_type; /* enum hv_intercept_access_type */
        union hv_arm64_vp_execution_state execution_state;
        u64 pc;
        u64 cpsr;



Reply via email to