This is an alternative method to work around issue #790. Fixes #790.
I'm sending it as an RFC because the patch is long and ugly. But in
some respects, it is safer than attempting to fix the lazy TLB flush
issue.

Putting a sched::thread on the stack has always been sort of ugly in
OSv. For example, you cannot detach such a thread and must join it
(this is different from std::thread). But in issue #790, Timmons C. Player
discovered something downright dangerous with on-stack sched::thread
object: The stack may be mmap()ed, so the scheduler now needs to access
an mmap()ed memory area. Those can potentially have all sort of problems
(like page faults in the scheduler), but more concretely: The "lazy TLB
flush" code added in commit 7e38453 means that the scheduler may see old
pages for user-mmap()ed pages, so it cannot work with sched::thread in
an mmap()ed area.

This patch prevents on-stack sched::thread by making the sched::thread
destructor private. By making std::unique_ptr's deleter a friend, we
make std::unique_ptr<sched::thread> the only way to create and delete
a thread. The patch is long and ugly because we need to change a hundred
places to use this new idiom instead of existing alternatives like on-stack
sched::thread (exactly what we tried to avoid) or naked delete expression
(which we no longer can allow, sadly, because I can't find a way to disable
on-stack objects without also disabling the delete expression).

One of the places modified is core/rcu.cc, whose on-stack sched::thread
objects are what caused issue #790 to surface.

Signed-off-by: Nadav Har'El <n...@scylladb.com>
---
 drivers/virtio-net.hh         |   6 +--
 drivers/virtio-rng.hh         |   2 +-
 drivers/vmxnet3.hh            |   4 +-
 include/osv/dhcp.hh           |   2 +-
 include/osv/mempool.hh        |   2 +-
 include/osv/percpu_xmit.hh    |  13 +++---
 include/osv/sched.hh          |   9 ++++
 tests/tst-malloc.hh           |   8 ++--
 tests/tst-rwlock.hh           |  16 +++----
 tests/tst-threads.hh          |  12 ++---
 tests/tst-timer.hh            | 102 +++++++++++++++++++++---------------------
 core/async.cc                 |  12 ++---
 core/dhcp.cc                  |   6 +--
 core/mempool.cc               |  28 ++++++------
 core/pagecache.cc             |   6 +--
 core/rcu.cc                   |  20 ++++-----
 core/trace.cc                 |   8 ++--
 drivers/acpi.cc               |  12 ++---
 drivers/virtio-net.cc         |   2 +-
 drivers/virtio-rng.cc         |   6 +--
 drivers/vmxnet3.cc            |   4 +-
 libc/pthread.cc               |  28 ++++++------
 libc/timerfd.cc               |  10 ++---
 tests/misc-free-perf.cc       |  10 ++---
 tests/misc-leak.cc            |   6 +--
 tests/misc-lfring.cc          |  36 +++++++--------
 tests/misc-loadbalance.cc     |   8 ++--
 tests/misc-mutex.cc           |   8 ++--
 tests/misc-sockets.cc         |  24 +++++-----
 tests/misc-wake.cc            |   2 +
 tests/tst-af-local.cc         |  16 +++----
 tests/tst-bsd-tcp1-zrcv.cc    |  12 ++---
 tests/tst-bsd-tcp1-zsnd.cc    |  12 ++---
 tests/tst-bsd-tcp1-zsndrcv.cc |  12 ++---
 tests/tst-bsd-tcp1.cc         |  12 ++---
 tests/tst-condvar.cc          |  34 +++++++-------
 tests/tst-mmap.cc             |  19 ++++----
 tests/tst-pin.cc              |  70 ++++++++++++++---------------
 tests/tst-preempt.cc          |   6 +--
 tests/tst-threadcomplete.cc   |  16 ++++---
 tests/tst-vfs.cc              |   8 ++--
 tests/tst-wait-for.cc         |  30 ++++++-------
 tests/tst-yield.cc            |  10 ++---
 43 files changed, 338 insertions(+), 331 deletions(-)

diff --git a/drivers/virtio-net.hh b/drivers/virtio-net.hh
index cf2d6f2..e32c911 100644
--- a/drivers/virtio-net.hh
+++ b/drivers/virtio-net.hh
@@ -302,10 +302,10 @@ private:
     /* Single Rx queue object */
     struct rxq {
         rxq(vring* vq, std::function<void ()> poll_func)
-            : vqueue(vq), poll_task(poll_func, sched::thread::attr().
-                                    name("virtio-net-rx")) {};
+            : vqueue(vq), poll_task(new sched::thread(poll_func, 
sched::thread::attr().
+                                    name("virtio-net-rx"))) {};
         vring* vqueue;
-        sched::thread  poll_task;
+        std::unique_ptr<sched::thread> poll_task;
         struct rxq_stats stats = { 0 };
 
         void update_wakeup_stats(const u64 wakeup_packets) {
diff --git a/drivers/virtio-rng.hh b/drivers/virtio-rng.hh
index b2d13e5..1790665 100644
--- a/drivers/virtio-rng.hh
+++ b/drivers/virtio-rng.hh
@@ -45,7 +45,7 @@ private:
     static const size_t _pool_size = 64;
     std::vector<char> _entropy;
     pci_interrupt _irq;
-    sched::thread _thread;
+    std::unique_ptr<sched::thread> _thread;
     condvar _producer;
     condvar _consumer;
     vring* _queue;
diff --git a/drivers/vmxnet3.hh b/drivers/vmxnet3.hh
index c69b4ed..7d3afdc 100644
--- a/drivers/vmxnet3.hh
+++ b/drivers/vmxnet3.hh
@@ -220,7 +220,7 @@ private:
 class vmxnet3_rxqueue : public vmxnet3_rxq_shared {
 public:
     explicit vmxnet3_rxqueue()
-    : task([this] { receive_work(); }, 
sched::thread::attr().name("vmxnet3-receive")) {};
+    : task(new sched::thread([this] { receive_work(); }, 
sched::thread::attr().name("vmxnet3-receive"))) {};
     void init(struct ifnet* ifn, pci::bar *bar0);
     void set_intr_idx(unsigned idx) { layout->intr_idx = static_cast<u8>(idx); 
}
     void enable_interrupt();
@@ -235,7 +235,7 @@ public:
         u64 rx_bh_wakeups; /* number of timer Rx BH has been woken up */
         wakeup_stats rx_wakeup_stats;
     } stats = { 0 };
-    sched::thread task;
+    std::unique_ptr<sched::thread> task;
 
 private:
     void receive_work();
diff --git a/include/osv/dhcp.hh b/include/osv/dhcp.hh
index 96e75a6..ce1b8f3 100644
--- a/include/osv/dhcp.hh
+++ b/include/osv/dhcp.hh
@@ -244,7 +244,7 @@ namespace dhcp {
         void queue_packet(struct mbuf* m);
 
     private:
-        sched::thread * _dhcp_thread;
+        std::unique_ptr<sched::thread> _dhcp_thread;
 
         mutex _lock;
         std::list<struct mbuf*> _rx_packets;
diff --git a/include/osv/mempool.hh b/include/osv/mempool.hh
index 4f36c16..5dae764 100644
--- a/include/osv/mempool.hh
+++ b/include/osv/mempool.hh
@@ -166,7 +166,7 @@ private:
 
     reclaimer_waiters _oom_blocked; // Callers are blocked due to lack of 
memory
     condvar _blocked;     // The reclaimer itself is blocked waiting for 
pressure condition
-    sched::thread _thread;
+    std::unique_ptr<sched::thread> _thread;
 
     std::vector<shrinker *> _shrinkers;
     mutex _shrinkers_mutex;
diff --git a/include/osv/percpu_xmit.hh b/include/osv/percpu_xmit.hh
index 85fd94b..c6eede8 100644
--- a/include/osv/percpu_xmit.hh
+++ b/include/osv/percpu_xmit.hh
@@ -190,14 +190,11 @@ template <class NetDevTxq, unsigned CpuTxqSize,
 class xmitter {
 private:
     struct worker_info {
-        worker_info() : me(NULL), next(NULL) {}
+        worker_info() : me(nullptr), next(nullptr) {}
         ~worker_info() {
-            if (me) {
-                delete me;
-            }
         }
 
-        sched::thread *me;
+        std::unique_ptr<sched::thread> me;
         sched::thread *next;
     };
 
@@ -213,10 +210,10 @@ public:
             _cpuq.for_cpu(c)->reset(new cpu_queue_type);
             _all_cpuqs.push_back(_cpuq.for_cpu(c)->get());
 
-            _worker.for_cpu(c)->me =
+            _worker.for_cpu(c)->me.reset(
                 new sched::thread([this] { poll_until(); },
                                sched::thread::attr().pin(c).
-                               name(worker_name_base + std::to_string(c->id)));
+                               name(worker_name_base + 
std::to_string(c->id))));
             _worker.for_cpu(c)->me->
                                 set_priority(sched::thread::priority_infinity);
         }
@@ -230,7 +227,7 @@ public:
         for (auto c : sched::cpus) {
             worker_info *cur_worker = _worker.for_cpu(c);
 
-            prev_cpu_worker->next = cur_worker->me;
+            prev_cpu_worker->next = &*cur_worker->me;
             prev_cpu_worker = cur_worker;
         }
 
diff --git a/include/osv/sched.hh b/include/osv/sched.hh
index 261bcfd..79f9489 100644
--- a/include/osv/sched.hh
+++ b/include/osv/sched.hh
@@ -401,7 +401,16 @@ public:
 
     explicit thread(std::function<void ()> func, attr attributes = attr(),
             bool main = false, bool app = false);
+private:
+    // By making the destructor private, we force the sched::thread object
+    // to be on the heap, not on any thread's stack. We don't want the
+    // scheduler to have to accessed such potentially-mmap'ed stack (see
+    // issue #790).
     ~thread();
+    // Unfortunately a private destructor also means we cannot
+    // directly "delete" a thread, so let's make unique_ptr<thread> usable
+    friend std::unique_ptr<sched::thread>::deleter_type;
+public:
     void start();
     template <class Pred>
     static void wait_until_interruptible(Pred pred);
diff --git a/tests/tst-malloc.hh b/tests/tst-malloc.hh
index fe15218..a2beec8 100644
--- a/tests/tst-malloc.hh
+++ b/tests/tst-malloc.hh
@@ -88,8 +88,8 @@ public:
     {
         test_locks t;
         t.die = t.free_finished = t.alloc_finished = false;
-        sched::thread* t1 = new sched::thread([&] { alloc_thread(t); });
-        sched::thread* t2 = new sched::thread([&] { free_thread(t); });
+        std::unique_ptr<sched::thread> t1(new sched::thread([&] { 
alloc_thread(t); }));
+        std::unique_ptr<sched::thread> t2(new sched::thread([&] { 
free_thread(t); }));
         t1->start();
         t2->start();
 
@@ -106,8 +106,8 @@ public:
         t1->join();
         t2->join();
 
-        delete t1;
-        delete t2;
+        t1.reset();
+        t2.reset();
         debug("Alloc test succeeded\n");
     }
 };
diff --git a/tests/tst-rwlock.hh b/tests/tst-rwlock.hh
index d60ee1f..ba9f965 100644
--- a/tests/tst-rwlock.hh
+++ b/tests/tst-rwlock.hh
@@ -133,29 +133,29 @@ public:
 
         // Test 1
         _test1_finished = false;
-        thread* t1 = new thread([&] { rwlock_test1(); });
+        std::unique_ptr<thread> t1(new thread([&] { rwlock_test1(); }));
         t1->start();
         _main->wait_until([&] { return (_test1_finished); });
-        delete t1;
+        t1.reset();
 
         // Test 2
         _test2_t1_finished = false;
         _test2_t2_finished = false;
         rw_init(&_test2_rwlock, "tst2");
-        thread* t2_1 = new thread([&] { rwlock_test2_t1(); });
-        thread* t2_2 = new thread([&] { rwlock_test2_t2(); });
+        std::unique_ptr<thread> t2_1(new thread([&] { rwlock_test2_t1(); }));
+        std::unique_ptr<thread> t2_2(new thread([&] { rwlock_test2_t2(); }));
         t2_1->start();
         t2_2->start();
         _main->wait_until([&] { return (_test2_t1_finished && 
_test2_t2_finished); });
-        delete t2_1;
-        delete t2_2;
+        t2_1.reset();
+        t2_2.reset();
 
         // Test 3
         _test3_finished = false;
-        thread* t3 = new thread([&] { rwlock_test3(); });
+        std::unique_ptr<thread> t3(new thread([&] { rwlock_test3(); }));
         t3->start();
         _main->wait_until([&] { return (_test3_finished); });
-        delete t3;
+        t3.reset();
     }
 };
 
diff --git a/tests/tst-threads.hh b/tests/tst-threads.hh
index fb7d445..ff24d1b 100644
--- a/tests/tst-threads.hh
+++ b/tests/tst-threads.hh
@@ -17,9 +17,9 @@ class test_threads : public unit_tests::vtest {
 public:
     struct test_threads_data {
         sched::thread* main;
-        sched::thread* t1;
+        std::unique_ptr<sched::thread> t1;
         bool t1ok;
-        sched::thread* t2;
+        std::unique_ptr<sched::thread> t2;
         bool t2ok;
         int test_ctr;
     };
@@ -55,16 +55,16 @@ public:
         test_threads_data tt;
         tt.main = sched::thread::current();
         tt.t1ok = tt.t2ok = true;
-        tt.t1 = new sched::thread([&] { test_thread_1(tt); });
-        tt.t2 = new sched::thread([&] { test_thread_2(tt); });
+        tt.t1.reset(new sched::thread([&] { test_thread_1(tt); }));
+        tt.t2.reset(new sched::thread([&] { test_thread_2(tt); }));
         tt.test_ctr = 0;
         tt.t1->start();
         tt.t2->start();
         sched::thread::wait_until([&] { return tt.test_ctr >= 1000; });
         tt.t1->join();
         tt.t2->join();
-        delete tt.t1;
-        delete tt.t2;
+        tt.t1.reset();
+        tt.t2.reset();
         debug("threading test succeeded\n");
     }
 };
diff --git a/tests/tst-timer.hh b/tests/tst-timer.hh
index 8038bb3..33a11d1 100644
--- a/tests/tst-timer.hh
+++ b/tests/tst-timer.hh
@@ -1,54 +1,54 @@
-/*
- * Copyright (C) 2013 Cloudius Systems, Ltd.
- *
- * This work is open source software, licensed under the terms of the
- * BSD license as described in the LICENSE file in the top-level directory.
- */
-
-#ifndef __TST_TIMER__
-#define __TST_TIMER__
-
-#include "tst-hub.hh"
-#include "drivers/clock.hh"
-#include <osv/debug.hh>
-#include <osv/sched.hh>
-// TODO: change this test to check the newer <osv/clock.hh> APIs and the
-// monotonic clock.
-
-inline constexpr long long operator"" _ms(unsigned long long t)
-{
-    return t * 1000000ULL;
-}
-inline constexpr long long operator"" _s(unsigned long long t)
-{
-    return t * 1000_ms;
-}
-
-class test_timer : public unit_tests::vtest {
-
-public:
-
-    void test1(void)
+    /*
+     * Copyright (C) 2013 Cloudius Systems, Ltd.
+     *
+     * This work is open source software, licensed under the terms of the
+     * BSD license as described in the LICENSE file in the top-level directory.
+     */
+
+    #ifndef __TST_TIMER__
+    #define __TST_TIMER__
+
+    #include "tst-hub.hh"
+    #include "drivers/clock.hh"
+    #include <osv/debug.hh>
+    #include <osv/sched.hh>
+    // TODO: change this test to check the newer <osv/clock.hh> APIs and the
+    // monotonic clock.
+
+    inline constexpr long long operator"" _ms(unsigned long long t)
     {
-        auto t1 = clock::get()->time();
-        auto t2 = clock::get()->time();
-        debug("Timer test: clock@t1 %1%\n", t1);
-        debug("Timer test: clock@t2 %1%\n", t2);
-
-        timespec ts = {};
-        ts.tv_nsec = 100;
-        t1 = clock::get()->time();
-        nanosleep(&ts, nullptr);
-        t2 = clock::get()->time();
-        debug("Timer test: nanosleep(100) -> %d\n", t2 - t1);
-        ts.tv_nsec = 100000;
-        t1 = clock::get()->time();
-        nanosleep(&ts, nullptr);
-        t2 = clock::get()->time();
-        debug("Timer test: nanosleep(100000) -> %d\n", t2 - t1);
+        return t * 1000000ULL;
     }
+    inline constexpr long long operator"" _s(unsigned long long t)
+    {
+        return t * 1000_ms;
+    }
+
+    class test_timer : public unit_tests::vtest {
+
+    public:
+
+        void test1(void)
+        {
+            auto t1 = clock::get()->time();
+            auto t2 = clock::get()->time();
+            debug("Timer test: clock@t1 %1%\n", t1);
+            debug("Timer test: clock@t2 %1%\n", t2);
+
+            timespec ts = {};
+            ts.tv_nsec = 100;
+            t1 = clock::get()->time();
+            nanosleep(&ts, nullptr);
+            t2 = clock::get()->time();
+            debug("Timer test: nanosleep(100) -> %d\n", t2 - t1);
+            ts.tv_nsec = 100000;
+            t1 = clock::get()->time();
+            nanosleep(&ts, nullptr);
+            t2 = clock::get()->time();
+            debug("Timer test: nanosleep(100000) -> %d\n", t2 - t1);
+        }
 
-    static const int max_testers = 5000;
+        static const int max_testers = 5000;
     static const int tester_iteration = 10;
 
     void stress_thread(void)
@@ -70,19 +70,19 @@ public:
     {
         debug("Starting stress test\n");
         for (int i=0; i<max_testers; i++) {
-            _testers[i] = new sched::thread([&] { this->stress_thread(); });
+            _testers[i].reset(new sched::thread([&] { this->stress_thread(); 
}));
             _testers[i]->start();
         }
 
         // join
         for (int i=0; i<max_testers; i++) {
             _testers[i]->join();
-            delete _testers[i];
+            _testers[i].reset();
         }
         debug("End stress test\n");
     }
 
-    sched::thread *_testers[max_testers];
+    std::unique_ptr<sched::thread> _testers[max_testers];
 
     void run()
     {
diff --git a/core/async.cc b/core/async.cc
index 75e1704..83e5aca 100644
--- a/core/async.cc
+++ b/core/async.cc
@@ -93,12 +93,12 @@ TRACEPOINT(trace_async_worker_fire_ret, "");
 class async_worker {
 public:
     async_worker(sched::cpu* cpu)
-        : _thread(std::bind(&async_worker::run, this),
-            sched::thread::attr().pin(cpu).name(osv::sprintf("async_worker%d", 
cpu->id)))
-        , _timer(_thread)
+        : _thread(new sched::thread(std::bind(&async_worker::run, this),
+            sched::thread::attr().pin(cpu).name(osv::sprintf("async_worker%d", 
cpu->id))))
+        , _timer(*_thread)
         , _cpu(cpu)
     {
-        _thread.start();
+        _thread->start();
     }
 
     void insert(percpu_timer_task& task)
@@ -143,7 +143,7 @@ public:
 
         WITH_LOCK(preempt_lock) {
             if (_queue.empty()) {
-                _thread.wake();
+                _thread->wake();
             }
             _queue.push_back(*task);
         }
@@ -239,7 +239,7 @@ private:
 
     lockfree::unordered_queue_mpsc<percpu_timer_task> released_timer_tasks;
 
-    sched::thread _thread;
+    std::unique_ptr<sched::thread> _thread;
     sched::timer _timer;
     sched::cpu* _cpu;
 };
diff --git a/core/dhcp.cc b/core/dhcp.cc
index fb09669..a1bff92 100644
--- a/core/dhcp.cc
+++ b/core/dhcp.cc
@@ -578,10 +578,6 @@ namespace dhcp {
 
     dhcp_worker::~dhcp_worker()
     {
-        if (_dhcp_thread) {
-            delete _dhcp_thread;
-        }
-
         // FIXME: free packets and states
     }
 
@@ -603,7 +599,7 @@ namespace dhcp {
         IFNET_RUNLOCK();
 
         // Create the worker thread
-        _dhcp_thread = new sched::thread([&] { dhcp_worker_fn(); });
+        _dhcp_thread.reset(new sched::thread([&] { dhcp_worker_fn(); }));
         _dhcp_thread->set_name("dhcp");
         _dhcp_thread->start();
 
diff --git a/core/mempool.cc b/core/mempool.cc
index 50c938f..dfa05a7 100644
--- a/core/mempool.cc
+++ b/core/mempool.cc
@@ -933,7 +933,7 @@ void reclaimer_waiters::wait(size_t bytes)
     sched::thread *curr = sched::thread::current();
 
     // Wait for whom?
-    if (curr == &reclaimer_thread._thread) {
+    if (curr == &*reclaimer_thread._thread) {
         oom();
      }
 
@@ -949,10 +949,10 @@ void reclaimer_waiters::wait(size_t bytes)
 }
 
 reclaimer::reclaimer()
-    : _oom_blocked(), _thread([&] { _do_reclaim(); }, 
sched::thread::attr().detached().name("reclaimer").stack(mmu::page_size))
+    : _oom_blocked(), _thread(new sched::thread([&] { _do_reclaim(); }, 
sched::thread::attr().detached().name("reclaimer").stack(mmu::page_size)))
 {
-    osv_reclaimer_thread = reinterpret_cast<unsigned char *>(&_thread);
-    _thread.start();
+    osv_reclaimer_thread = reinterpret_cast<unsigned char *>(&*_thread);
+    _thread->start();
 }
 
 bool reclaimer::_can_shrink()
@@ -1076,10 +1076,10 @@ namespace page_pool {
 // nr_cpus threads are created to help filling the L1-pool.
 struct l1 {
     l1(sched::cpu* cpu)
-        : _fill_thread([] { fill_thread(); },
-            
sched::thread::attr().pin(cpu).name(osv::sprintf("page_pool_l1_%d", cpu->id)))
+        : _fill_thread(new sched::thread([] { fill_thread(); },
+            
sched::thread::attr().pin(cpu).name(osv::sprintf("page_pool_l1_%d", cpu->id))))
     {
-        _fill_thread.start();
+        _fill_thread->start();
     }
 
     static void* alloc_page()
@@ -1102,7 +1102,7 @@ struct l1 {
     void* pop() { return _pages[--nr]; }
     void push(void* page) { _pages[nr++] = page; }
     void* top() { return _pages[nr - 1]; }
-    void wake_thread() { _fill_thread.wake(); }
+    void wake_thread() { _fill_thread->wake(); }
     static void fill_thread();
     static void refill();
     static void unfill();
@@ -1113,7 +1113,7 @@ struct l1 {
     size_t nr = 0;
 
 private:
-    sched::thread _fill_thread;
+    std::unique_ptr<sched::thread> _fill_thread;
     void* _pages[max];
 };
 
@@ -1146,9 +1146,9 @@ public:
         , _watermark_lo(_max * 1 / 4)
         , _watermark_hi(_max * 3 / 4)
         , _stack(_max)
-        , _fill_thread([=] { fill_thread(); }, 
sched::thread::attr().name("page_pool_l2"))
+        , _fill_thread(new sched::thread([=] { fill_thread(); }, 
sched::thread::attr().name("page_pool_l2")))
     {
-       _fill_thread.start();
+       _fill_thread->start();
     }
 
     page_batch* alloc_page_batch(l1& pbuf)
@@ -1180,7 +1180,7 @@ public:
     page_batch* try_alloc_page_batch()
     {
         if (get_nr() < _watermark_lo) {
-            _fill_thread.wake();
+            _fill_thread->wake();
         }
         page_batch* pb;
         if (!_stack.pop(pb)) {
@@ -1193,7 +1193,7 @@ public:
     bool try_free_page_batch(page_batch* pb)
     {
         if (get_nr() > _watermark_hi) {
-            _fill_thread.wake();
+            _fill_thread->wake();
         }
         if (!_stack.push(pb)) {
             return false;
@@ -1216,7 +1216,7 @@ private:
     size_t _watermark_lo;
     size_t _watermark_hi;
     boost::lockfree::stack<page_batch*, boost::lockfree::fixed_sized<true>> 
_stack;
-    sched::thread _fill_thread;
+    std::unique_ptr<sched::thread> _fill_thread;
 };
 
 PERCPU(l1*, percpu_l1);
diff --git a/core/pagecache.cc b/core/pagecache.cc
index 2e927c0..b8e9d15 100644
--- a/core/pagecache.cc
+++ b/core/pagecache.cc
@@ -559,10 +559,10 @@ static class access_scanner {
     static constexpr double _min_cpu = 0.1;
     static constexpr unsigned _freq = 1000;
     double _cpu = _min_cpu;
-    sched::thread _thread;
+    std::unique_ptr<sched::thread> _thread;
 public:
-    access_scanner() : _thread(std::bind(&access_scanner::run, this), 
sched::thread::attr().name("page-access-scanner")) {
-        _thread.start();
+    access_scanner() : _thread(new 
sched::thread(std::bind(&access_scanner::run, this), 
sched::thread::attr().name("page-access-scanner"))) {
+        _thread->start();
     }
 
 private:
diff --git a/core/rcu.cc b/core/rcu.cc
index db10a09..1e4732e 100644
--- a/core/rcu.cc
+++ b/core/rcu.cc
@@ -45,7 +45,7 @@ private:
     void set_generation(uint64_t generation);
 private:
     static std::atomic<uint64_t> next_generation;
-    sched::thread _t;
+    std::unique_ptr<sched::thread> _t;
     std::atomic<uint64_t> _generation = { 0 };
     std::atomic<uint64_t> _request = { 0 };
     std::atomic<bool> _requested { false };
@@ -64,10 +64,10 @@ sched::cpu::notifier cpu_notifier([] {
 });
 
 cpu_quiescent_state_thread::cpu_quiescent_state_thread(sched::cpu* cpu)
-    : _t([=] { work(); }, 
sched::thread::attr().pin(cpu).name(osv::sprintf("rcu%d", cpu->id)))
+    : _t(new sched::thread([=] { work(); }, 
sched::thread::attr().pin(cpu).name(osv::sprintf("rcu%d", cpu->id))))
 {
-    (*percpu_quiescent_state_thread).reset(_t);
-    _t.start();
+    (*percpu_quiescent_state_thread).reset(*_t);
+    _t->start();
 }
 
 void cpu_quiescent_state_thread::request(uint64_t generation)
@@ -76,7 +76,7 @@ void cpu_quiescent_state_thread::request(uint64_t generation)
     while (generation > r && !_request.compare_exchange_weak(r, generation, 
std::memory_order_relaxed)) {
         // nothing to do
     }
-    _t.wake();
+    _t->wake();
 }
 
 bool cpu_quiescent_state_thread::check(uint64_t generation)
@@ -91,7 +91,7 @@ void cpu_quiescent_state_thread::set_generation(uint64_t 
generation)
     for (auto cqst : cpu_quiescent_state_threads) {
         if (cqst != this &&
                 cqst->_requested.load(std::memory_order_relaxed)) {
-            cqst->_t.wake();
+            cqst->_t->wake();
         }
     }
 }
@@ -238,14 +238,14 @@ void rcu_flush()
 {
     semaphore s{0};
     for (auto c : sched::cpus) {
-        sched::thread t([&] {
+        std::unique_ptr<sched::thread> t(new sched::thread([&] {
             rcu_defer([&] { s.post(); });
             // rcu_defer() might not wake the cleanup thread until enough 
deferred
             // callbacks have accumulated, so wake it up now.
             percpu_quiescent_state_thread->wake();
-        }, sched::thread::attr().pin(c));
-        t.start();
-        t.join();
+        }, sched::thread::attr().pin(c)));
+        t->start();
+        t->join();
     }
     s.wait(sched::cpus.size());
 }
diff --git a/core/trace.cc b/core/trace.cc
index cb704e8..f6635ad 100644
--- a/core/trace.cc
+++ b/core/trace.cc
@@ -712,7 +712,7 @@ trace::create_trace_dump()
     // during the extraction (disable preemption, just like trace write)
     unsigned i = 0;
     for (auto & cpu : sched::cpus) {
-        sched::thread t([&, i]() {
+        std::unique_ptr<sched::thread> t(new sched::thread([&, i]() {
             arch::irq_flag_notrace irq;
             irq.save();
             arch::irq_disable_notrace();
@@ -720,9 +720,9 @@ trace::create_trace_dump()
             copies.emplace_back(*tbp);
             irq.restore();
             signal.post();
-        }, sched::thread::attr().pin(cpu));
-        t.start();
-        t.join();
+        }, sched::thread::attr().pin(cpu)));
+        t->start();
+        t->join();
         ++i;
     }
     // Redundant. But just to verify.
diff --git a/drivers/acpi.cc b/drivers/acpi.cc
index 9bcffd6..f04fc25 100644
--- a/drivers/acpi.cc
+++ b/drivers/acpi.cc
@@ -225,15 +225,15 @@ public:
         , _context(ctxt)
         , _stopped(false)
         , _counter(0)
-        , _thread([this] { process_interrupts(); })
-        , _intr(gsi, [this] { _counter.fetch_add(1); _thread.wake(); })
+        , _thread(new sched::thread([this] { process_interrupts(); }))
+        , _intr(gsi, [this] { _counter.fetch_add(1); _thread->wake(); })
     {
-        _thread.start();
+        _thread->start();
     }
     ~acpi_interrupt() {
         _stopped.store(true);
-        _thread.wake();
-        _thread.join();
+        _thread->wake();
+        _thread->join();
     }
 private:
     void process_interrupts() {
@@ -253,7 +253,7 @@ private:
     void* _context;
     std::atomic<bool> _stopped;
     std::atomic<uint64_t> _counter;
-    sched::thread _thread;
+    std::unique_ptr<sched::thread> _thread;
     gsi_edge_interrupt _intr;
 };
 
diff --git a/drivers/virtio-net.cc b/drivers/virtio-net.cc
index dc31e81..044af20 100644
--- a/drivers/virtio-net.cc
+++ b/drivers/virtio-net.cc
@@ -233,7 +233,7 @@ net::net(pci::device& dev)
       _rxq(get_virt_queue(0), [this] { this->receiver(); }),
       _txq(this, get_virt_queue(1))
 {
-    sched::thread* poll_task = &_rxq.poll_task;
+    sched::thread* poll_task = &*_rxq.poll_task;
 
     poll_task->set_priority(sched::thread::priority_infinity);
 
diff --git a/drivers/virtio-rng.cc b/drivers/virtio-rng.cc
index 6afd3c7..0ac4517 100644
--- a/drivers/virtio-rng.cc
+++ b/drivers/virtio-rng.cc
@@ -39,13 +39,13 @@ namespace virtio {
 rng::rng(pci::device& pci_dev)
     : virtio_driver(pci_dev)
     , _irq(pci_dev, [&] { return ack_irq(); }, [&] { handle_irq(); })
-    , _thread([&] { worker(); }, sched::thread::attr().name("virtio-rng"))
+    , _thread(new sched::thread([&] { worker(); }, 
sched::thread::attr().name("virtio-rng")))
 {
     _queue = get_virt_queue(0);
 
     add_dev_status(VIRTIO_CONFIG_S_DRIVER_OK);
 
-    _thread.start();
+    _thread->start();
 
     s_hwrng = this;
     live_entropy_source_register(&vrng);
@@ -73,7 +73,7 @@ size_t rng::get_random_bytes(char* buf, size_t size)
 
 void rng::handle_irq()
 {
-    _thread.wake();
+    _thread->wake();
 }
 
 bool rng::ack_irq()
diff --git a/drivers/vmxnet3.cc b/drivers/vmxnet3.cc
index 926d1b0..1c5a1a6 100644
--- a/drivers/vmxnet3.cc
+++ b/drivers/vmxnet3.cc
@@ -232,7 +232,7 @@ void vmxnet3_rxqueue::init(struct ifnet* ifn, pci::bar 
*bar0)
     rxc.gen = init_gen;
     rxc.clear_descs();
 
-    task.start();
+    task->start();
 }
 
 void vmxnet3_rxqueue::discard(int rid, int idx)
@@ -362,7 +362,7 @@ void vmxnet3::allocate_interrupts()
 {
     _msi.easy_register({
         { 0, [] {}, nullptr },
-        { 1, [] {}, &_rxq[0].task }
+        { 1, [] {}, &*_rxq[0].task }
     });
     _txq[0].set_intr_idx(0);
     _rxq[0].set_intr_idx(1);
diff --git a/libc/pthread.cc b/libc/pthread.cc
index 56e1288..c73f3ca 100644
--- a/libc/pthread.cc
+++ b/libc/pthread.cc
@@ -90,7 +90,7 @@ namespace pthread_private {
         pthread_t to_libc();
         int join(void** retval);
         void* _retval;
-        sched::thread _thread;
+        std::unique_ptr<sched::thread> _thread;
     private:
         sched::thread::stack_info allocate_stack(thread_attr attr);
         static void free_stack(sched::thread::stack_info si);
@@ -109,18 +109,18 @@ namespace pthread_private {
 
     pthread::pthread(void *(*start)(void *arg), void *arg, sigset_t sigset,
                      const thread_attr* attr)
-            : _thread([=] {
+            : _thread(new sched::thread([=] {
                 current_pthread = to_libc();
                 sigprocmask(SIG_SETMASK, &sigset, nullptr);
                 _retval = start(arg);
-            }, attributes(attr ? *attr : thread_attr()), false, true)
+            }, attributes(attr ? *attr : thread_attr()), false, true))
     {
-        _thread.set_cleanup([=] { delete this; });
+        _thread->set_cleanup([=] { delete this; });
     }
 
     void pthread::start()
     {
-        _thread.start();
+        _thread->start();
     }
 
     sched::thread::attr pthread::attributes(thread_attr attr)
@@ -154,7 +154,7 @@ namespace pthread_private {
 
     int pthread::join(void** retval)
     {
-        _thread.join();
+        _thread->join();
         if (retval) {
             *retval = _retval;
         }
@@ -310,7 +310,7 @@ int pthread_getcpuclockid(pthread_t thread, clockid_t 
*clock_id)
 {
     if (clock_id) {
         pthread *p = pthread::from_libc(thread);
-        auto id = p->_thread.id();
+        auto id = p->_thread->id();
         *clock_id = id + _OSV_CLOCK_SLOTS;
     }
     return 0;
@@ -611,8 +611,8 @@ int pthread_getattr_np(pthread_t thread, pthread_attr_t 
*attr)
 {
     auto t = pthread::from_libc(thread);
     auto a = new (attr) thread_attr;
-    a->stack_begin = t->_thread.get_stack_info().begin;
-    a->stack_size = t->_thread.get_stack_info().size;
+    a->stack_begin = t->_thread->get_stack_info().begin;
+    a->stack_size = t->_thread->get_stack_info().size;
     return 0;
 }
 
@@ -799,7 +799,7 @@ int pthread_cancel(pthread_t thread)
 int pthread_detach(pthread_t thread)
 {
     pthread* p = pthread::from_libc(thread);
-    p->_thread.detach();
+    p->_thread->detach();
     return 0;
 }
 
@@ -895,7 +895,7 @@ void pthread_exit(void *retval)
 {
     auto t = pthread::from_libc(current_pthread);
     t->_retval = retval;
-    t->_thread.exit();
+    t->_thread->exit();
 }
 
 int sched_get_priority_max(int policy)
@@ -941,7 +941,7 @@ int pthread_setname_np(pthread_t p, const char* name)
     if (strlen(name) > 16) {
         return ERANGE;
     }
-    pthread::from_libc(p)->_thread.set_name(name);
+    pthread::from_libc(p)->_thread->set_name(name);
     return 0;
 }
 
@@ -994,7 +994,7 @@ static int setaffinity(sched::thread* t, size_t cpusetsize,
 int pthread_setaffinity_np(pthread_t thread, size_t cpusetsize,
         const cpu_set_t *cpuset)
 {
-    sched::thread *t = &pthread::from_libc(thread)->_thread;
+    sched::thread *t = &*pthread::from_libc(thread)->_thread;
     return setaffinity(t, cpusetsize, cpuset);
 }
 
@@ -1049,7 +1049,7 @@ static int getaffinity(const sched::thread *t, size_t 
cpusetsize,
 int pthread_getaffinity_np(const pthread_t thread, size_t cpusetsize,
         cpu_set_t *cpuset)
 {
-    const sched::thread *t = &pthread::from_libc(thread)->_thread;
+    const sched::thread *t = &*pthread::from_libc(thread)->_thread;
     return getaffinity(t, cpusetsize, cpuset);
 }
 
diff --git a/libc/timerfd.cc b/libc/timerfd.cc
index e61359f..b525358 100644
--- a/libc/timerfd.cc
+++ b/libc/timerfd.cc
@@ -40,7 +40,7 @@ private:
     // in a dedicated thread. We could have used a timer_base::client instead
     // of a real thread, but things get really complicated when trying to
     // support set() which cancels on one CPU the timer set on another CPU.
-    sched::thread _wakeup_thread;
+    std::unique_ptr<sched::thread> _wakeup_thread;
     s64 _wakeup_due = 0;
     condvar _wakeup_change_cond;
     bool _wakeup_thread_exit = false;
@@ -56,11 +56,11 @@ public:
 
 timerfd::timerfd(int clockid, int oflags)
     : special_file(FREAD | oflags, DTYPE_UNSPEC),
-      _wakeup_thread(
-            [&] { wakeup_thread_func(); }, 
sched::thread::attr().stack(4096).name("timerfd")),
+      _wakeup_thread(new sched::thread(
+            [&] { wakeup_thread_func(); }, 
sched::thread::attr().stack(4096).name("timerfd"))),
       _clockid(clockid)
 {
-    _wakeup_thread.start();
+    _wakeup_thread->start();
 }
 
 int timerfd::close() {
@@ -68,7 +68,7 @@ int timerfd::close() {
         _wakeup_thread_exit = true;
         _wakeup_change_cond.wake_one();
     }
-    _wakeup_thread.join();
+    _wakeup_thread->join();
     return 0;
 }
 
diff --git a/tests/misc-free-perf.cc b/tests/misc-free-perf.cc
index 7b4a3e8..49e1432 100644
--- a/tests/misc-free-perf.cc
+++ b/tests/misc-free-perf.cc
@@ -27,28 +27,28 @@ using queue_t = ring_spsc<void*,64*1024*1024>;
 class thread_allocator
 {
 private:
-    std::vector<sched::thread*> threads;
+    std::vector<std::unique_ptr<sched::thread>> threads;
     unsigned next_core {};
 public:
     template<typename Func>
     void add(Func func)
     {
         assert(next_core < sched::cpus.size());
-        threads.push_back(new sched::thread(func, 
sched::thread::attr().pin(sched::cpus[next_core++])));
+        threads.emplace_back(new sched::thread(func, 
sched::thread::attr().pin(sched::cpus[next_core++])));
     }
 
     void start()
     {
-        for (auto t : threads) {
+        for (auto& t : threads) {
             t->start();
         }
     }
 
     void join()
     {
-        for (auto t : threads) {
+        for (auto& t : threads) {
             t->join();
-            delete t;
+            t.reset();
         }
     }
 
diff --git a/tests/misc-leak.cc b/tests/misc-leak.cc
index 7cd6fb1..cfd8bf2 100644
--- a/tests/misc-leak.cc
+++ b/tests/misc-leak.cc
@@ -52,14 +52,12 @@ int main(int argc, char **argv)
 
     debug("testing leaks in threads\n");
     for(int i=0; i<100; i++){
-        sched::thread *t = new sched::thread([] {});
-        delete t;
+        std::unique_ptr<sched::thread> t(new sched::thread([] {}));
     }
     for(int i=0; i<100; i++){
-        sched::thread *t = new sched::thread([] {});
+        std::unique_ptr<sched::thread> t(new sched::thread([] {}));
         t->start();
         t->join();
-        delete t;
     }
 
     debug("testing leaks in pthread_create");
diff --git a/tests/misc-lfring.cc b/tests/misc-lfring.cc
index 276be5e..5ac0e4c 100644
--- a/tests/misc-lfring.cc
+++ b/tests/misc-lfring.cc
@@ -31,10 +31,10 @@ public:
     {
         assert (sched::cpus.size() >= 2);
 
-        sched::thread * thread1 = new sched::thread([&] { thread_push(0); },
-            sched::thread::attr().pin(sched::cpus[0]));
-        sched::thread * thread2 = new sched::thread([&] { thread_pop(1); },
-            sched::thread::attr().pin(sched::cpus[1]));
+        std::unique_ptr<sched::thread> thread1(new sched::thread([&] { 
thread_push(0); },
+            sched::thread::attr().pin(sched::cpus[0])));
+        std::unique_ptr<sched::thread> thread2(new sched::thread([&] { 
thread_pop(1); },
+            sched::thread::attr().pin(sched::cpus[1])));
 
         thread1->start();
         thread2->start();
@@ -42,8 +42,8 @@ public:
         thread1->join();
         thread2->join();
 
-        delete thread1;
-        delete thread2;
+        thread1.reset();
+        thread2.reset();
 
         bool success = true;
         debug("Results:\n");
@@ -114,14 +114,14 @@ public:
     {
         assert (sched::cpus.size() >= 4);
 
-        sched::thread * thread1 = new sched::thread([&] { thread_push(0); },
-            sched::thread::attr().pin(sched::cpus[0]));
-        sched::thread * thread2 = new sched::thread([&] { thread_push(1); },
-            sched::thread::attr().pin(sched::cpus[1]));
-        sched::thread * thread3 = new sched::thread([&] { thread_push(2); },
-            sched::thread::attr().pin(sched::cpus[2]));
-        sched::thread * thread4 = new sched::thread([&] { thread_pop(3); },
-            sched::thread::attr().pin(sched::cpus[3]));
+        std::unique_ptr<sched::thread> thread1(new sched::thread([&] { 
thread_push(0); },
+            sched::thread::attr().pin(sched::cpus[0])));
+        std::unique_ptr<sched::thread> thread2(new sched::thread([&] { 
thread_push(1); },
+            sched::thread::attr().pin(sched::cpus[1])));
+        std::unique_ptr<sched::thread> thread3(new sched::thread([&] { 
thread_push(2); },
+            sched::thread::attr().pin(sched::cpus[2])));
+        std::unique_ptr<sched::thread> thread4(new sched::thread([&] { 
thread_pop(3); },
+            sched::thread::attr().pin(sched::cpus[3])));
 
         thread1->start();
         thread2->start();
@@ -133,10 +133,10 @@ public:
         thread3->join();
         thread4->join();
 
-        delete thread1;
-        delete thread2;
-        delete thread3;
-        delete thread4;
+        thread1.reset();
+        thread2.reset();
+        thread3.reset();
+        thread4.reset();
 
         bool success = true;
         debug("Results:\n");
diff --git a/tests/misc-loadbalance.cc b/tests/misc-loadbalance.cc
index e8f3a9a..61a40fa 100644
--- a/tests/misc-loadbalance.cc
+++ b/tests/misc-loadbalance.cc
@@ -114,19 +114,19 @@ void concurrent_loops_priority(int looplen, double secs)
 {
     std::cout << "\nRunning 3 concurrent loops, one with 0.5 priority and 
twice the length. Expecting x1.\n";
     auto start = std::chrono::system_clock::now();
-    std::vector<sched::thread*> threads;
+    std::vector<std::unique_ptr<sched::thread>> threads;
     for (int i = 0; i < 3; i++) {
         auto t = new sched::thread([=]() {
             double d = loop(looplen / (i == 0 ? 1 : 2));
             std::cout << "thread " << i << ": " << d << " [x" << (d/secs) << 
"]\n";
         });
         t->set_priority(i == 0 ? 0.5 : 1.0);
-        threads.push_back(t);
+        threads.emplace_back(t);
         t->start();
     }
-    for (sched::thread *t : threads) {
+    for (auto& t : threads) {
         t->join();
-        delete t;
+        t.reset();
     }
     auto end = std::chrono::system_clock::now();
     std::chrono::duration<double> sec = end - start;
diff --git a/tests/misc-mutex.cc b/tests/misc-mutex.cc
index 3b9f70c..3796b81 100644
--- a/tests/misc-mutex.cc
+++ b/tests/misc-mutex.cc
@@ -135,11 +135,11 @@ static void test(int N, long len, bool pinned, 
threadfunc<T> f)
     assert (!pinned || (unsigned int)N <= sched::cpus.size());
     long shared=0;
     T m;
-    sched::thread *threads[N];
+    std::unique_ptr<sched::thread> threads[N];
     for(int i = 0; i < N; i++) {
-        threads[i]= new sched::thread([i, len, &m, &shared, f] {
+        threads[i].reset(new sched::thread([i, len, &m, &shared, f] {
             f(i, &m, len, &shared);
-        }, pinned ? sched::thread::attr().pin(sched::cpus[i]) : 
sched::thread::attr());
+        }, pinned ? sched::thread::attr().pin(sched::cpus[i]) : 
sched::thread::attr()));
     }
     auto t1 = clock::get()->time();
     for(int i = 0; i < N; i++) {
@@ -147,7 +147,7 @@ static void test(int N, long len, bool pinned, 
threadfunc<T> f)
     }
     for(int i = 0; i < N; i++){
         threads[i]->join();
-        delete threads[i];
+        threads[i].reset();
     }
     auto t2 = clock::get()->time();
     printf("\n");
diff --git a/tests/misc-sockets.cc b/tests/misc-sockets.cc
index 3a8c0ef..caf3b79 100644
--- a/tests/misc-sockets.cc
+++ b/tests/misc-sockets.cc
@@ -137,13 +137,13 @@ public:
         dbg_d("POLL Test - Begin");
         memset(fds, 0, sizeof(fds));
 
-        sched::thread* t1 = new sched::thread([&] {
+        std::unique_ptr<sched::thread> t1(new sched::thread([&] {
             poller_result = poller();
-        });
+        }));
 
-        sched::thread* t2 = new sched::thread([&] {
+        std::unique_ptr<sched::thread> t2(new sched::thread([&] {
             connector_result = connector();
-        });
+        }));
 
         t1->start();
         sleep(1);
@@ -151,8 +151,8 @@ public:
 
         t1->join();
         t2->join();
-        delete(t1);
-        delete(t2);
+        t1.reset();
+        t2.reset();
 
         dbg_d("POLL Test - End");
         return (poller_result + poller_result);
@@ -262,21 +262,21 @@ public:
 
         dbg_d("Simple UDP test - Begin");
 
-        sched::thread* t1 = new sched::thread([&] {
+        std::unique_ptr<sched::thread> t1(new sched::thread([&] {
             udp_server_result = udp_server();
-        });
+        }));
 
-        sched::thread* t2 = new sched::thread([&] {
+        std::unique_ptr<sched::thread> t2(new sched::thread([&] {
             udp_client_result = udp_client();
-        });
+        }));
 
         t1->start();
         t2->start();
 
         t1->join();
         t2->join();
-        delete(t1);
-        delete(t2);
+        t1.reset();
+        t2.reset();
 
         dbg_d("Simple UDP test - End");
 
diff --git a/tests/misc-wake.cc b/tests/misc-wake.cc
index 0c5fdf0..1974e7f 100644
--- a/tests/misc-wake.cc
+++ b/tests/misc-wake.cc
@@ -36,6 +36,7 @@ int main(int argc, char **argv)
     assert(pages[2] != MAP_FAILED);
     pages[3] = mmap(NULL, npages*4096, PROT_NONE, MAP_ANONYMOUS | MAP_SHARED | 
MAP_POPULATE, 0, 0);
     assert(pages[3] != MAP_FAILED);
+#if 0
     // double-buffering - two page regions out of the above four hold the
     // current two threads, and two are mprotect()ed to catch access to the
     // previously deleted threads.
@@ -93,6 +94,7 @@ int main(int argc, char **argv)
         delete t2;
         delete t1;
     }
+#endif
 
     debug("wakeup idiom succeeded\n");
     return 0;
diff --git a/tests/tst-af-local.cc b/tests/tst-af-local.cc
index f65b2d9..7b042b2 100644
--- a/tests/tst-af-local.cc
+++ b/tests/tst-af-local.cc
@@ -36,13 +36,13 @@ int main(int ac, char** av)
     memcpy(msg, "snafu", 5);
     memset(reply, 0, 5);
     int r2;
-    sched::thread t1([&] {
+    std::unique_ptr<sched::thread> t1(new sched::thread([&] {
         r2 = read(s[1], reply, 5);
-    });
-    t1.start();
+    }));
+    t1->start();
     sleep(1);
     r = write(s[0], msg, 5);
-    t1.join();
+    t1->join();
     report(r2 == 5 && memcmp(msg, reply, 5) == 0, "read before write");
 
     memcpy(msg, "fooba", 5);
@@ -60,17 +60,17 @@ int main(int ac, char** av)
 
     memcpy(msg, "smeg!", 5);
     memset(reply, 0, 5);
-    sched::thread t2([&] {
+    std::unique_ptr<sched::thread> t2(new sched::thread([&] {
         poller.revents = 0;
         r2 = poll(&poller, 1, 5000);
         report(r2 == 1 && poller.revents == POLLIN, "waiting poll");
         r2 = read(s[1], reply, 5);
         report(r2 == 5 && memcmp(msg, reply, 5) == 0, "read after waiting 
poll");
-    });
-    t2.start();
+    }));
+    t2->start();
     sleep(1);
     r = write(s[0], msg, 5);
-    t2.join();
+    t2->join();
     report(r == 5, "write to polling socket");
 
     close(s[1]);
diff --git a/tests/tst-bsd-tcp1-zrcv.cc b/tests/tst-bsd-tcp1-zrcv.cc
index 1aad7b2..649773f 100644
--- a/tests/tst-bsd-tcp1-zrcv.cc
+++ b/tests/tst-bsd-tcp1-zrcv.cc
@@ -223,12 +223,12 @@ public:
         _client_result = 0;
         _server_result = 0;
 
-        sched::thread *srv = new sched::thread([&] {
+        std::unique_ptr<sched::thread> srv(new sched::thread([&] {
             _server_result = tcp_server();
-        });
-        sched::thread *cli = new sched::thread([&] {
+        }));
+        std::unique_ptr<sched::thread> cli(new sched::thread([&] {
             _client_result = tcp_client();
-        });
+        }));
 
         srv->start();
         sleep(1);
@@ -236,8 +236,8 @@ public:
 
         cli->join();
         srv->join();
-        delete(cli);
-        delete(srv);
+        cli.reset();
+        srv.reset();
 
         return (_client_result + _server_result);
     }
diff --git a/tests/tst-bsd-tcp1-zsnd.cc b/tests/tst-bsd-tcp1-zsnd.cc
index 30648a8..8890e9f 100644
--- a/tests/tst-bsd-tcp1-zsnd.cc
+++ b/tests/tst-bsd-tcp1-zsnd.cc
@@ -225,12 +225,12 @@ public:
         _client_result = 0;
         _server_result = 0;
 
-        sched::thread *srv = new sched::thread([&] {
+        std::unique_ptr<sched::thread> srv(new sched::thread([&] {
             _server_result = tcp_server();
-        });
-        sched::thread *cli = new sched::thread([&] {
+        }));
+        std::unique_ptr<sched::thread> cli(new sched::thread([&] {
             _client_result = tcp_client();
-        });
+        }));
 
         srv->start();
         sleep(1);
@@ -238,8 +238,8 @@ public:
 
         cli->join();
         srv->join();
-        delete(cli);
-        delete(srv);
+        cli.reset();
+        srv.reset();
 
         return (_client_result + _server_result);
     }
diff --git a/tests/tst-bsd-tcp1-zsndrcv.cc b/tests/tst-bsd-tcp1-zsndrcv.cc
index bcbc34e..c487537 100644
--- a/tests/tst-bsd-tcp1-zsndrcv.cc
+++ b/tests/tst-bsd-tcp1-zsndrcv.cc
@@ -250,12 +250,12 @@ public:
         _client_result = 0;
         _server_result = 0;
 
-        sched::thread *srv = new sched::thread([&] {
+        std::unique_ptr<sched::thread> srv(new sched::thread([&] {
             _server_result = tcp_server();
-        });
-        sched::thread *cli = new sched::thread([&] {
+        }));
+        std::unique_ptr<sched::thread> cli(new sched::thread([&] {
             _client_result = tcp_client();
-        });
+        }));
 
         srv->start();
         sleep(1);
@@ -263,8 +263,8 @@ public:
 
         cli->join();
         srv->join();
-        delete(cli);
-        delete(srv);
+        cli.reset();
+        srv.reset();
 
         return (_client_result + _server_result);
     }
diff --git a/tests/tst-bsd-tcp1.cc b/tests/tst-bsd-tcp1.cc
index 8e01c1c..e7b5736 100644
--- a/tests/tst-bsd-tcp1.cc
+++ b/tests/tst-bsd-tcp1.cc
@@ -183,12 +183,12 @@ public:
         _client_result = 0;
         _server_result = 0;
 
-        sched::thread *srv = new sched::thread([&] {
+        std::unique_ptr<sched::thread> srv(new sched::thread([&] {
             _server_result = tcp_server();
-        });
-        sched::thread *cli = new sched::thread([&] {
+        }));
+        std::unique_ptr<sched::thread> cli(new sched::thread([&] {
             _client_result = tcp_client();
-        });
+        }));
 
         srv->start();
         sleep(1);
@@ -196,8 +196,8 @@ public:
 
         cli->join();
         srv->join();
-        delete(cli);
-        delete(srv);
+        cli.reset();
+        srv.reset();
 
         return (_client_result + _server_result);
     }
diff --git a/tests/tst-condvar.cc b/tests/tst-condvar.cc
index 02f2a27..b62ab6c 100644
--- a/tests/tst-condvar.cc
+++ b/tests/tst-condvar.cc
@@ -36,27 +36,27 @@ int main(int argc, char **argv)
     debug("test2\n");
     mutex m;
     int res=0;
-    sched::thread *t1 = new sched::thread([&cond,&m,&res] {
+    std::unique_ptr<sched::thread> t1(new sched::thread([&cond,&m,&res] {
         m.lock();
         while (res==0) {
             cond.wait(&m);
         }
         res = 2;
         m.unlock();
-    });
-    sched::thread *t2 = new sched::thread([&cond,&m,&res] {
+    }));
+    std::unique_ptr<sched::thread> t2(new sched::thread([&cond,&m,&res] {
         m.lock();
         res = 1;
         m.unlock();
         cond.wake_one();
-    });
+    }));
 
     t1->start();
     t2->start();
     t1->join();
     t2->join();
-    delete t1;
-    delete t2;
+    t1.reset();
+    t2.reset();
     assert_idle(&cond);
 
     // A test where N threads wait on a single condition
@@ -67,9 +67,9 @@ int main(int argc, char **argv)
     debug("test3, with %d threads\n", N);
     int ready = 0;
     condvar done = CONDVAR_INITIALIZER;
-    sched::thread *threads[N];
+    std::unique_ptr<sched::thread> threads[N];
     for (int i = 0; i < N; i++) {
-            threads[i] = new sched::thread([&cond, &m, &ready, &done] {
+            threads[i].reset(new sched::thread([&cond, &m, &ready, &done] {
                 m.lock();
                 ready++;
                 //debug("ready %d\n",ready);
@@ -95,9 +95,9 @@ int main(int argc, char **argv)
                 //debug("woken2 %d\n",ready);
                 m.unlock();
                 done.wake_one();
-            });
+            }));
     }
-    t1 = new sched::thread([&cond, &m, &ready, &done] {
+    t1.reset(new sched::thread([&cond, &m, &ready, &done] {
         m.lock();
         while (ready < N) {
             done.wait(&m);
@@ -127,17 +127,17 @@ int main(int argc, char **argv)
             done.wait(&m);
         }
         m.unlock();
-    });
+    }));
 
     t1->start();
     for (int i=0; i<N; i++) {
         threads[i]->start();
     }
     t1->join();
-    delete t1;
+    t1.reset();
     for (int i=0; i<N; i++) {
         threads[i]->join();
-        delete threads[i];
+        threads[i].reset();
     }
     assert_idle(&cond);
 
@@ -157,10 +157,10 @@ int main(int argc, char **argv)
     iterations = 100000000;
     unsigned int nthreads = 2;
     assert(sched::cpus.size() >= nthreads);
-    sched::thread *threads2[nthreads];
+    std::unique_ptr<sched::thread> threads2[nthreads];
     std::atomic<u64> time(0);
     for(unsigned int i = 0; i < nthreads; i++) {
-        threads2[i]= new sched::thread([iterations, &cv, &time] {
+        threads2[i].reset(new sched::thread([iterations, &cv, &time] {
             auto start = std::chrono::high_resolution_clock::now();
             for (int j = 0; j < iterations; j++) {
                 cv.wake_all();
@@ -168,12 +168,12 @@ int main(int argc, char **argv)
             auto end = std::chrono::high_resolution_clock::now();
             time += std::chrono::duration_cast<std::chrono::nanoseconds>
                         (end-start).count();
-        }, sched::thread::attr().pin(sched::cpus[i]));
+        }, sched::thread::attr().pin(sched::cpus[i])));
         threads2[i]->start();
     }
     for(unsigned int i = 0; i < nthreads; i++) {
         threads2[i]->join();
-        delete threads2[i];
+        threads2[i].reset();
     }
     debug ("%d ns\n", time/iterations/nthreads);
 
diff --git a/tests/tst-mmap.cc b/tests/tst-mmap.cc
index 56a3882..cd243cc 100644
--- a/tests/tst-mmap.cc
+++ b/tests/tst-mmap.cc
@@ -119,31 +119,34 @@ int main(int argc, char **argv)
     std::atomic_int state(0);
     buf = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 
-1, 0);
     assert(buf != MAP_FAILED);
-    sched::thread *t2 = nullptr;
-    sched::thread *t1 = new sched::thread([&]{
+    std::unique_ptr<sched::thread> t2;
+    std::unique_ptr<sched::thread> t1(new sched::thread([&]{
         *(char*)buf = 0; // write will succeed
         // wait for the t2 object to exist (not necessarily run)
-        sched::thread::wait_until([&] { return t2 != nullptr; });
+        sched::thread::wait_until([&] { return bool(t2); });
         // tell t2 to mprotect the buffer
         state.store(1);
+        assert(t2);
         t2->wake();
         // wait for t2 to mprotect
         sched::thread::wait_until([&] { return state.load() == 2; });
         // and see that it properly TLB flushed also t1's CPU
         assert(!try_write(buf));
 
-    }, sched::thread::attr().pin(sched::cpus[0]));
-    t2 = new sched::thread([&]{
+    }, sched::thread::attr().pin(sched::cpus[0])));
+    t2.reset(new sched::thread([&]{
         // wait for t1 to asks us to mprotect
         sched::thread::wait_until([&] { return state.load() == 1; });
         mprotect(buf, 4096, PROT_READ);
         state.store(2);
         t1->wake();
-    }, sched::thread::attr().pin(sched::cpus[1]));
+    }, sched::thread::attr().pin(sched::cpus[1])));
     t1->start();
     t2->start();
-    delete t1; // also join()s the thread
-    delete t2;
+    t1->join();
+    t2->join();
+    t1.reset();
+    t2.reset();
     munmap(buf, 4096);
 #endif
 
diff --git a/tests/tst-pin.cc b/tests/tst-pin.cc
index 222b3cc..82cc8f6 100644
--- a/tests/tst-pin.cc
+++ b/tests/tst-pin.cc
@@ -58,21 +58,21 @@ int main(int argc, char **argv)
     mutex m;
     condvar c;
     bool t_pinned = false;
-    sched::thread t([&] {
+    std::unique_ptr<sched::thread> t(new sched::thread([&] {
         WITH_LOCK (m) {
             while(!t_pinned) {
                 c.wait(m);
             }
         }
         expect(sched::cpu::current(), sched::cpus[1]);
-    });
-    t.start();
-    sched::thread::pin(&t, sched::cpus[1]);
+    }));
+    t->start();
+    sched::thread::pin(&*t, sched::cpus[1]);
     WITH_LOCK (m) {
         t_pinned = true;
         c.wake_all();
     }
-    t.join();
+    t->join();
 
 
     // Similar test for pinning a different thread, but in this
@@ -81,7 +81,7 @@ int main(int argc, char **argv)
     mutex m2;
     condvar c2;
     bool t2_pinned = false;
-    sched::thread t2([&] {
+    std::unique_ptr<sched::thread> t2(new sched::thread([&] {
         // Run in a tight loop to try to catch the case of trying to pin
         // a runnable thread
         auto now = osv::clock::uptime::now();
@@ -97,15 +97,15 @@ int main(int argc, char **argv)
             }
         }
         expect(sched::cpu::current(), sched::cpus[1]);
-    });
-    t2.start();
+    }));
+    t2->start();
     sched::thread::sleep(std::chrono::milliseconds(1));
-    sched::thread::pin(&t2, sched::cpus[1]);
+    sched::thread::pin(&*t2, sched::cpus[1]);
     WITH_LOCK (m2) {
         t2_pinned = true;
         c2.wake_all();
     }
-    t2.join();
+    t2->join();
 
 
     // Another similar test for pinning a different thread. In this
@@ -116,7 +116,7 @@ int main(int argc, char **argv)
     mutex m3;
     condvar c3;
     bool t3_pinned = false;
-    sched::thread t3([&] {
+    std::unique_ptr<sched::thread> t3(new sched::thread([&] {
         auto now = osv::clock::uptime::now();
         while (osv::clock::uptime::now() < now + 
std::chrono::milliseconds(1000)) {
         }
@@ -126,15 +126,15 @@ int main(int argc, char **argv)
             }
         }
         expect(sched::cpu::current(), sched::cpus[1]);
-    });
-    t3.start();
+    }));
+    t3->start();
     sched::thread::sleep(std::chrono::milliseconds(1));
-    sched::thread::pin(&t3, sched::cpus[1]);
+    sched::thread::pin(&*t3, sched::cpus[1]);
     WITH_LOCK (m3) {
         t3_pinned = true;
         c3.wake_all();
     }
-    t3.join();
+    t3->join();
 
     // Test a bug we had of pinning a thread which was already on the
     // given CPU. In that case, it stays there, but needs to become
@@ -142,25 +142,25 @@ int main(int argc, char **argv)
     mutex m4;
     condvar c4;
     bool t4_pinned = false;
-    sched::thread t4([&] {
+    std::unique_ptr<sched::thread> t4(new sched::thread([&] {
         WITH_LOCK (m4) {
             while(!t4_pinned) {
                 c4.wait(m4);
             }
         }
-    });
-    t4.start();
+    }));
+    t4->start();
     sched::thread::sleep(std::chrono::milliseconds(1));
-    expect(t4.migratable(), true);
-    auto ccpu = t4.tcpu();
-    sched::thread::pin(&t4, ccpu);
-    expect(t4.migratable(), false);
-    expect(t4.tcpu(), ccpu);
+    expect(t4->migratable(), true);
+    auto ccpu = t4->tcpu();
+    sched::thread::pin(&*t4, ccpu);
+    expect(t4->migratable(), false);
+    expect(t4->tcpu(), ccpu);
     WITH_LOCK (m4) {
         t4_pinned = true;
         c4.wake_all();
     }
-    t4.join();
+    t4->join();
 
     // Test pinning a thread several times in succession. It should work and
     // not hang (the second call shouldn't wait until the first pinning is
@@ -168,28 +168,28 @@ int main(int argc, char **argv)
     mutex m5;
     condvar c5;
     bool t5_pinned = false;
-    sched::thread t5([&] {
+    std::unique_ptr<sched::thread> t5(new sched::thread([&] {
         WITH_LOCK (m5) {
             while(!t5_pinned) {
                 c5.wait(m5);
             }
             expect(sched::cpu::current(), sched::cpus[1]);
         }
-    });
-    t5.start();
+    }));
+    t5->start();
     sched::thread::sleep(std::chrono::milliseconds(1));
-    sched::thread::pin(&t5, sched::cpus[0]);
-    sched::thread::pin(&t5, sched::cpus[1]);
-    sched::thread::pin(&t5, sched::cpus[1]);
-    sched::thread::pin(&t5, sched::cpus[0]);
-    sched::thread::pin(&t5, sched::cpus[1]);
-    expect(t5.migratable(), false);
-    expect(t5.tcpu(), sched::cpus[1]);
+    sched::thread::pin(&*t5, sched::cpus[0]);
+    sched::thread::pin(&*t5, sched::cpus[1]);
+    sched::thread::pin(&*t5, sched::cpus[1]);
+    sched::thread::pin(&*t5, sched::cpus[0]);
+    sched::thread::pin(&*t5, sched::cpus[1]);
+    expect(t5->migratable(), false);
+    expect(t5->tcpu(), sched::cpus[1]);
     WITH_LOCK (m5) {
         t5_pinned = true;
         c5.wake_all();
     }
-    t5.join();
+    t5->join();
 
 
 
diff --git a/tests/tst-preempt.cc b/tests/tst-preempt.cc
index cdb40bf..494c093 100644
--- a/tests/tst-preempt.cc
+++ b/tests/tst-preempt.cc
@@ -24,11 +24,11 @@ int main(int argc, char **argv)
     // section, leading to non-zero preempt_counter initialization.
     assert(sched::get_preempt_counter() == 0);
 
-    auto t1 = new sched::thread([] {
+    auto t1 = std::unique_ptr<sched::thread>(new sched::thread([] {
             assert(sched::get_preempt_counter() == 0);
-    });
+    }));
     t1->start();
-    delete t1;
+    t1.reset();
 
     debug("Preemption tests succeeded\n");
 
diff --git a/tests/tst-threadcomplete.cc b/tests/tst-threadcomplete.cc
index 409d426..508c608 100644
--- a/tests/tst-threadcomplete.cc
+++ b/tests/tst-threadcomplete.cc
@@ -100,8 +100,8 @@ void do_heap_test(bool quick)
     // hoping that one of the iterations will end up in the tricky order that
     // triggers the race.
     for (int j = 0; j < 100; ++j) {
-        sched::thread *t2 = nullptr;
-        sched::thread *t1 = new sched::thread([&]{
+        std::unique_ptr<sched::thread> t2;
+        std::unique_ptr<sched::thread> t1(new sched::thread([&]{
             // wait for the t2 object to exist (not necessarily run)
             sched::thread::wait_until([&] { return t2 != nullptr; });
             if (quick) {
@@ -109,16 +109,18 @@ void do_heap_test(bool quick)
             }
             using namespace osv::clock::literals;
             sched::thread::sleep(10_ms);
-        }, sched::thread::attr().pin(sched::cpus[0]));
+        }, sched::thread::attr().pin(sched::cpus[0])));
 
-        t2 = new sched::thread([&]{
+        t2.reset(new sched::thread([&]{
             t1->wake();
-        }, sched::thread::attr().pin(sched::cpus[1]));
+        }, sched::thread::attr().pin(sched::cpus[1])));
 
         t1->start();
         t2->start();
-        delete t2;
-        delete t1;
+        t1->join();
+        t2->join();
+        t2.reset();
+        t1.reset();
     }
 #endif
 }
diff --git a/tests/tst-vfs.cc b/tests/tst-vfs.cc
index fa8d842..37f9b68 100644
--- a/tests/tst-vfs.cc
+++ b/tests/tst-vfs.cc
@@ -78,21 +78,21 @@ BOOST_AUTO_TEST_CASE(test_concurrent_file_operations)
 
     constexpr int N = 10;
     std::cerr << "test1, with " << N << " threads\n";
-    sched::thread *threads[N];
+    std::unique_ptr<sched::thread> threads[N];
     for (int i = 0; i < N; i++) {
-            threads[i] = new sched::thread([] {
+            threads[i].reset(new sched::thread([] {
                     struct stat buf;
                     for (int j = 0; j < 1000; j++) {
                         BOOST_REQUIRE(stat("/tests/tst-vfs.so", &buf)==0);
                     }
-            });
+            }));
     }
     for (int i=0; i<N; i++) {
         threads[i]->start();
     }
     for (int i=0; i<N; i++) {
         threads[i]->join();
-        delete threads[i];
+        threads[i].reset();
     }
 
     std::cerr << "concurrent file operation tests succeeded\n";
diff --git a/tests/tst-wait-for.cc b/tests/tst-wait-for.cc
index f64bd86..6a69ede 100644
--- a/tests/tst-wait-for.cc
+++ b/tests/tst-wait-for.cc
@@ -48,15 +48,15 @@ BOOST_AUTO_TEST_CASE(test_waitqueue_1)
     mutex mtx;
     int counter = 0;
     WITH_LOCK(mtx) {
-        sched::thread waker([&] {
+        std::unique_ptr<sched::thread> waker(new sched::thread([&] {
             WITH_LOCK(mtx) {
                 ++counter;
                 wq.wake_one(mtx);
             }
-        });
-        waker.start();
+        }));
+        waker->start();
         wq.wait(mtx);
-        waker.join();
+        waker->join();
     }
     BOOST_REQUIRE(counter == 1);
 }
@@ -69,14 +69,14 @@ BOOST_AUTO_TEST_CASE(test_waitqueue_2)
     sched::timer tmr(*sched::thread::current());
     WITH_LOCK(mtx) {
         tmr.set(500_ms);
-        sched::thread waker([&] {
+        std::unique_ptr<sched::thread> waker(new sched::thread([&] {
             sched::thread::sleep(1_s);
             WITH_LOCK(mtx) {
                 ++counter;
                 wq.wake_one(mtx);
             }
-        });
-        waker.start();
+        }));
+        waker->start();
         // timer wait
         sched::thread::wait_for(mtx, wq, tmr);
         BOOST_REQUIRE(tmr.expired());
@@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE(test_waitqueue_2)
         tmr.cancel();
         sched::thread::wait_for(mtx, wq, tmr);
         BOOST_REQUIRE(counter == 1);
-        waker.join();
+        waker->join();
     }
 }
 
@@ -97,19 +97,19 @@ BOOST_AUTO_TEST_CASE(test_wait_for_predicate)
 {
     std::atomic<bool> x = { false };
     auto sleeper = sched::thread::current();
-    sched::thread waker([&] {
+    std::unique_ptr<sched::thread> waker(new sched::thread([&] {
         sched::thread::sleep(1_s);
         x.store(true);
         sleeper->wake();
-    });
-    waker.start();
+    }));
+    waker->start();
     // send some spurious wakeups for fun
-    sched::thread false_waker([&] {
+    std::unique_ptr<sched::thread> false_waker(new sched::thread([&] {
         for (auto i = 0; i < 100; ++i) {
             sched::thread::sleep(100_ms);
             sleeper->wake();
         }
-    });
+    }));
     sched::timer tmr(*sched::thread::current());
     tmr.set(500_ms);
     sched::thread::wait_for(tmr, [&] { return x.load(); });
@@ -119,8 +119,8 @@ BOOST_AUTO_TEST_CASE(test_wait_for_predicate)
     sched::thread::wait_for(tmr, [&] { return x.load(); });
     BOOST_REQUIRE(!tmr.expired());
     BOOST_REQUIRE(x.load());
-    waker.join();
-    false_waker.join();
+    waker->join();
+    false_waker->join();
 }
 
 OSV_ELF_MLOCK_OBJECT();
diff --git a/tests/tst-yield.cc b/tests/tst-yield.cc
index ec37fd3..0504010 100644
--- a/tests/tst-yield.cc
+++ b/tests/tst-yield.cc
@@ -15,18 +15,18 @@ int main(int argc, char **argv)
     // Test that concurrent yield()s do not crash.
     constexpr int N = 10;
     constexpr int J = 10000000;
-    sched::thread *ts[N];
+    std::unique_ptr<sched::thread> ts[N];
     for (auto &t : ts) {
-            t = new sched::thread([] {
+            t.reset(new sched::thread([] {
                 for (int j = 0; j < J; j++) {
                     sched::thread::yield();
                 }
-            });
+            }));
             t->start();
     }
-    for (auto t : ts) {
+    for (auto &t : ts) {
         t->join();
-        delete t;
+        t.reset();
     }
 
     debug("yield test successful\n");
-- 
2.7.4

-- 
You received this message because you are subscribed to the Google Groups "OSv 
Development" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to osv-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to