---
 bin/varnishd/storage/storage_persistent.c      |    8 ++++--
 bin/varnishd/storage/storage_persistent.h      |    1 +
 bin/varnishd/storage/storage_persistent_silo.c |   36 +++++++++++++++++++++---
 3 files changed, 38 insertions(+), 7 deletions(-)

diff --git a/bin/varnishd/storage/storage_persistent.c 
b/bin/varnishd/storage/storage_persistent.c
index 4306e4d..a41d52a 100644
--- a/bin/varnishd/storage/storage_persistent.c
+++ b/bin/varnishd/storage/storage_persistent.c
@@ -680,6 +680,8 @@ smp_allocx(struct stevedore *st, size_t min_size, size_t 
max_size,
                        (*so)->ptr = 0;;
                        sg->objs = *so;
                        *idx = ++sg->p.lobjlist;
+                       /* Add ref early so segment will stick around */
+                       sg->nobj++;
                }
                (void)smp_spaceleft(sc, sg);    /* for the assert */
        }
@@ -740,20 +742,20 @@ smp_allocobj(struct stevedore *stv, struct busyobj *bo, 
struct objcore **ocp,
        oc = o->objcore;
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
        oc->flags |= OC_F_LRUDONTMOVE;
+       smp_init_oc(oc, sg, objidx);
 
        Lck_Lock(&sc->mtx);
        sg->nfixed++;
-       sg->nobj++;
+       assert(sg->nobj > 0);   /* Our ref added by smp_allocx() */
 
        /* We have to do this somewhere, might as well be here... */
+       so = smp_find_so(sg, objidx); /* Might have changed during unlock */
        assert(sizeof so->hash == DIGEST_LEN);
        memcpy(so->hash, oc->objhead->digest, DIGEST_LEN);
        so->ttl = EXP_Grace(NULL, o);
        so->ptr = (uint8_t*)o - sc->base;
        so->ban = BAN_Time(oc->ban);
 
-       smp_init_oc(oc, sg, objidx);
-
        Lck_Unlock(&sc->mtx);
        return (o);
 }
diff --git a/bin/varnishd/storage/storage_persistent.h 
b/bin/varnishd/storage/storage_persistent.h
index 9e0642d..0496a95 100644
--- a/bin/varnishd/storage/storage_persistent.h
+++ b/bin/varnishd/storage/storage_persistent.h
@@ -204,6 +204,7 @@ void smp_load_seg(struct worker *, const struct smp_sc *sc, 
struct smp_seg *sg);
 void smp_new_seg(struct smp_sc *sc);
 void smp_close_seg(struct smp_sc *sc, struct smp_seg *sg);
 void smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx);
+struct smp_object * smp_find_so(const struct smp_seg *sg, unsigned priv2);
 void smp_sync_segs(struct smp_sc *sc);
 
 /* storage_persistent_subr.c */
diff --git a/bin/varnishd/storage/storage_persistent_silo.c 
b/bin/varnishd/storage/storage_persistent_silo.c
index e432302..07b6c9a 100644
--- a/bin/varnishd/storage/storage_persistent_silo.c
+++ b/bin/varnishd/storage/storage_persistent_silo.c
@@ -43,6 +43,7 @@
 #include "hash/hash_slinger.h"
 #include "vsha256.h"
 #include "vtim.h"
+#include "vmb.h"
 
 #include "persistent.h"
 #include "storage/storage_persistent.h"
@@ -267,6 +268,7 @@ smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
        sg->flags |= SMP_SEG_SYNCSIGNS;
 
        /* Remove the new flag and request sync of segment list */
+       VMB();                  /* See comments in smp_oc_getobj() */
        sg->flags &= ~SMP_SEG_NEW;
        smp_sync_segs(sc);
 }
@@ -298,9 +300,11 @@ smp_check_reserve(struct smp_sc *sc)
 }
 
 /*---------------------------------------------------------------------
+ * Find the struct smp_object in the segment's object list by
+ * it's objindex (oc->priv2)
  */
 
-static struct smp_object *
+struct smp_object *
 smp_find_so(const struct smp_seg *sg, unsigned priv2)
 {
        struct smp_object *so;
@@ -401,16 +405,33 @@ smp_oc_getobj(struct dstat *ds, struct objcore *oc)
        struct storage *st;
        uint64_t l;
        int bad;
+       int has_lock;
 
+       CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
        /* Some calls are direct, but they should match anyway */
        assert(oc->methods->getobj == smp_oc_getobj);
 
-       CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
        if (ds == NULL)
                AZ(oc->flags & OC_F_NEEDFIXUP);
 
        CAST_OBJ_NOTNULL(sg, oc->priv, SMP_SEG_MAGIC);
+       if (sg->flags & SMP_SEG_NEW) {
+               /* Segment is new and can be closed and compacted at
+                * any time. We need to keep a lock during access to
+                * the objlist. */
+               Lck_Lock(&sg->sc->mtx);
+               has_lock = 1;
+       } else {
+               /* Since the NEW flag is removed after the compacting
+                * and a memory barrier, any compacting should have
+                * been done with the changes visible to us if we
+                * can't see the flag. Should be safe to proceed
+                * without locks. */
+               has_lock = 0;
+       }
        so = smp_find_so(sg, oc->priv2);
+       AN(so);
+       AN(so->ptr);
 
        o = (void*)(sg->sc->base + so->ptr);
        /*
@@ -426,11 +447,17 @@ smp_oc_getobj(struct dstat *ds, struct objcore *oc)
         * If this flag is not set, it will not be, and the lock is not
         * needed to test it.
         */
-       if (!(oc->flags & OC_F_NEEDFIXUP))
+       if (!(oc->flags & OC_F_NEEDFIXUP)) {
+               if (has_lock)
+                       Lck_Unlock(&sg->sc->mtx);
                return (o);
+       }
 
        AN(ds);
-       Lck_Lock(&sg->sc->mtx);
+       if (!has_lock) {
+               Lck_Lock(&sg->sc->mtx);
+               has_lock = 1;
+       }
        /* Check again, we might have raced. */
        if (oc->flags & OC_F_NEEDFIXUP) {
                /* We trust caller to have a refcnt for us */
@@ -457,6 +484,7 @@ smp_oc_getobj(struct dstat *ds, struct objcore *oc)
                ds->n_vampireobject--;
                oc->flags &= ~OC_F_NEEDFIXUP;
        }
+       AN(has_lock);
        Lck_Unlock(&sg->sc->mtx);
        EXP_Rearm(o);
        return (o);
-- 
1.7.9.5


_______________________________________________
varnish-dev mailing list
[email protected]
https://www.varnish-cache.org/lists/mailman/listinfo/varnish-dev

Reply via email to