Under high levels of memory page churn, the array of pages in the
l1 page buffer pool can overflow and overwrite the data in adjacent
memory pages.  This change verifies that there is indeed enough
room to add pages to the pool.

Additionally, this change limits l1 page pool size management to the
l1 fill thread only.  Calls into l1::refill and l1::unfill now just
do the minimum amount of work necessary to continue.

Signed-off-by: Timmons C. Player <timmons.pla...@spirent.com>
---
 core/mempool.cc | 43 ++++++++++++++++++++++++++++++-------------
 1 file changed, 30 insertions(+), 13 deletions(-)

diff --git a/core/mempool.cc b/core/mempool.cc
index 50c938f..1817a09 100644
--- a/core/mempool.cc
+++ b/core/mempool.cc
@@ -1099,8 +1099,16 @@ struct l1 {
     }
     static void* alloc_page_local();
     static bool free_page_local(void* v);
-    void* pop() { return _pages[--nr]; }
-    void push(void* page) { _pages[nr++] = page; }
+    void* pop()
+    {
+        assert(nr);
+        return _pages[--nr];
+    }
+    void push(void* page)
+    {
+        assert(nr < 512);
+        _pages[nr++] = page;
+    }
     void* top() { return _pages[nr - 1]; }
     void wake_thread() { _fill_thread.wake(); }
     static void fill_thread();
@@ -1151,12 +1159,10 @@ public:
        _fill_thread.start();
     }
 
-    page_batch* alloc_page_batch(l1& pbuf)
+    page_batch* alloc_page_batch()
     {
         page_batch* pb;
-        while (!(pb = try_alloc_page_batch()) &&
-                // Check again since someone else might change pbuf.nr when we 
sleep
-                (pbuf.nr + page_batch::nr_pages < pbuf.max / 2)) {
+        while (!(pb = try_alloc_page_batch())) {
             WITH_LOCK(migration_lock) {
                 DROP_LOCK(preempt_lock) {
                     refill();
@@ -1247,10 +1253,14 @@ void l1::fill_thread()
                 }
         });
         if (pbuf.nr < pbuf.watermark_lo) {
-            refill();
+            while (pbuf.nr + page_batch::nr_pages < pbuf.max / 2) {
+                refill();
+            }
         }
         if (pbuf.nr > pbuf.watermark_hi) {
-            unfill();
+            while (pbuf.nr > page_batch::nr_pages + pbuf.max / 2) {
+                unfill();
+            }
         }
     }
 }
@@ -1259,11 +1269,18 @@ void l1::refill()
 {
     SCOPE_LOCK(preempt_lock);
     auto& pbuf = get_l1();
-    while (pbuf.nr + page_batch::nr_pages < pbuf.max / 2) {
-        auto* pb = global_l2.alloc_page_batch(pbuf);
+    if (pbuf.nr + page_batch::nr_pages < pbuf.max / 2) {
+        auto* pb = global_l2.alloc_page_batch();
         if (pb) {
-            for (auto& page : pb->pages) {
-                pbuf.push(page);
+            // Other threads might have filled the array while we waited for
+            // the page batch.  Make sure there is enough room to add the pages
+            // we just acquired, otherwise return them.
+            if (pbuf.nr + page_batch::nr_pages <= pbuf.max) {
+                for (auto& page : pb->pages) {
+                    pbuf.push(page);
+                }
+            } else {
+                global_l2.free_page_batch(pb);
             }
         }
     }
@@ -1273,7 +1290,7 @@ void l1::unfill()
 {
     SCOPE_LOCK(preempt_lock);
     auto& pbuf = get_l1();
-    while (pbuf.nr > page_batch::nr_pages + pbuf.max / 2) {
+    if (pbuf.nr > page_batch::nr_pages + pbuf.max / 2) {
         auto* pb = static_cast<page_batch*>(pbuf.top());
         for (size_t i = 0 ; i < page_batch::nr_pages; i++) {
             pb->pages[i] = pbuf.pop();
-- 
2.7.4

-- 
You received this message because you are subscribed to the Google Groups "OSv 
Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to osv-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to