Dropping the reference count of PageOffline() pages allows offlining
code to skip them. However, we also have to convert PG_reserved to
another flag - let's use PG_dirty - so has_unmovable_pages() will
properly handle them. PG_reserved pages get detected as unmovable right
away.

We need the flag to see if we are onlining pages the first time, or if
we allocated them via alloc_contig_range().

Properly take care of offlining code also modifying the stats and
special handling in case the driver gets unloaded.

Cc: "Michael S. Tsirkin" <m...@redhat.com>
Cc: Jason Wang <jasow...@redhat.com>
Cc: Oscar Salvador <osalva...@suse.de>
Cc: Michal Hocko <mho...@kernel.org>
Cc: Igor Mammedov <imamm...@redhat.com>
Cc: Dave Young <dyo...@redhat.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Pavel Tatashin <pasha.tatas...@soleen.com>
Cc: Stefan Hajnoczi <stefa...@redhat.com>
Cc: Vlastimil Babka <vba...@suse.cz>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 drivers/virtio/virtio_mem.c | 64 ++++++++++++++++++++++++++++++++++++-
 1 file changed, 63 insertions(+), 1 deletion(-)

diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 5a142a371222..a12a0f9c076b 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -564,6 +564,53 @@ static void virtio_mem_notify_online(struct virtio_mem 
*vm, unsigned long mb_id,
                virtio_mem_retry(vm);
 }
 
+static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
+                                           unsigned long mb_id)
+{
+       const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+       unsigned long pfn;
+       int sb_id, i;
+
+       for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+               if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       continue;
+               /*
+                * Drop our reference to the pages so the memory can get
+                * offlined and add the unplugged pages to the managed
+                * page counters (so offlining code can correctly subtract
+                * them again).
+                */
+               pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                              sb_id * vm->subblock_size);
+               adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
+               for (i = 0; i < nr_pages; i++)
+                       page_ref_dec(pfn_to_page(pfn + i));
+       }
+}
+
+static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
+                                            unsigned long mb_id)
+{
+       const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+       unsigned long pfn;
+       int sb_id, i;
+
+       for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
+               if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+                       continue;
+               /*
+                * Get the reference we dropped when going offline and
+                * subtract the unplugged pages from the managed page
+                * counters.
+                */
+               pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
+                              sb_id * vm->subblock_size);
+               adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+               for (i = 0; i < nr_pages; i++)
+                       page_ref_inc(pfn_to_page(pfn + i));
+       }
+}
+
 /*
  * This callback will either be called synchonously from add_memory() or
  * asynchronously (e.g., triggered via user space). We have to be careful
@@ -611,6 +658,7 @@ static int virtio_mem_memory_notifier_cb(struct 
notifier_block *nb,
                        break;
                mutex_lock(&vm->hotplug_mutex);
                vm->hotplug_active = true;
+               virtio_mem_notify_going_offline(vm, mb_id);
                break;
        case MEM_GOING_ONLINE:
                spin_lock_irq(&vm->removal_lock);
@@ -636,6 +684,12 @@ static int virtio_mem_memory_notifier_cb(struct 
notifier_block *nb,
                mutex_unlock(&vm->hotplug_mutex);
                break;
        case MEM_CANCEL_OFFLINE:
+               if (!vm->hotplug_active)
+                       break;
+               virtio_mem_notify_cancel_offline(vm, mb_id);
+               vm->hotplug_active = false;
+               mutex_unlock(&vm->hotplug_mutex);
+               break;
        case MEM_CANCEL_ONLINE:
                if (!vm->hotplug_active)
                        break;
@@ -660,8 +714,11 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
                struct page *page = pfn_to_page(pfn);
 
                __SetPageOffline(page);
-               if (!onlined)
+               if (!onlined) {
                        SetPageDirty(page);
+                       /* FIXME: remove after cleanups */
+                       ClearPageReserved(page);
+               }
        }
 }
 
@@ -1719,6 +1776,11 @@ static void virtio_mem_remove(struct virtio_device *vdev)
                rc = virtio_mem_mb_remove(vm, mb_id);
                BUG_ON(rc);
        }
+       /*
+        * After we unregistered our callbacks, user space can no longer
+        * offline partially plugged online memory blocks. No need to worry
+        * about them.
+        */
 
        /* unregister callbacks */
        unregister_virtio_mem_device(vm);
-- 
2.23.0

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

Reply via email to