When balloon batching is not supported by the hypervisor, the guest
frame number (GFN) must fit in 32-bit. However, due to a bug, this check
was mistakenly ignored. In practice, when total RAM is greater than
16TB, the balloon does not work currently, making this bug unlikely to
happen.

Fixes: ef0f8f112984 ("VMware balloon: partially inline
vmballoon_reserve_page.")

Reviewed-by: Xavier Deguillard <xdeguill...@vmware.com>
Signed-off-by: Nadav Amit <na...@vmware.com>
---
 drivers/misc/vmw_balloon.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index a73b5d64403a..496607a9046a 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -448,7 +448,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, 
unsigned long pfn,
 
        pfn32 = (u32)pfn;
        if (pfn32 != pfn)
-               return -1;
+               return -EINVAL;
 
        STATS_INC(b->stats.lock[false]);
 
@@ -458,7 +458,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, 
unsigned long pfn,
 
        pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
        STATS_INC(b->stats.lock_fail[false]);
-       return 1;
+       return -EIO;
 }
 
 static int vmballoon_send_batched_lock(struct vmballoon *b,
@@ -595,11 +595,12 @@ static int vmballoon_lock_page(struct vmballoon *b, 
unsigned int num_pages,
 
        locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
                                                                target);
-       if (locked > 0) {
+       if (locked) {
                STATS_INC(b->stats.refused_alloc[false]);
 
-               if (hv_status == VMW_BALLOON_ERROR_RESET ||
-                               hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+               if (locked == -EIO &&
+                   (hv_status == VMW_BALLOON_ERROR_RESET ||
+                    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
                        vmballoon_free_page(page, false);
                        return -EIO;
                }
@@ -615,7 +616,7 @@ static int vmballoon_lock_page(struct vmballoon *b, 
unsigned int num_pages,
                } else {
                        vmballoon_free_page(page, false);
                }
-               return -EIO;
+               return locked;
        }
 
        /* track allocated page */
-- 
2.17.0

Reply via email to