This allows us to avoid waiting for disk IO while holding the silo
mutex.
---
 bin/varnishd/storage/storage_persistent.c      |   67 +++++++++++++++++++++++-
 bin/varnishd/storage/storage_persistent.h      |    3 +-
 bin/varnishd/storage/storage_persistent_silo.c |   66 ++++++-----------------
 3 files changed, 83 insertions(+), 53 deletions(-)

diff --git a/bin/varnishd/storage/storage_persistent.c 
b/bin/varnishd/storage/storage_persistent.c
index 914de31..0575c11 100644
--- a/bin/varnishd/storage/storage_persistent.c
+++ b/bin/varnishd/storage/storage_persistent.c
@@ -342,6 +342,64 @@ smp_open_segs(struct smp_sc *sc, struct smp_signspace *spc)
 }
 
 /*--------------------------------------------------------------------
+ * Write the segmentlist back to the silo.
+ *
+ * We write the first copy, sync it synchronously, then write the
+ * second copy and sync it synchronously.
+ *
+ * Provided the kernel doesn't lie, that means we will always have
+ * at least one valid copy on in the silo.
+ */
+
+static void
+smp_save_segs(struct smp_sc *sc)
+{
+       struct smp_segptr *ss;
+       struct smp_seg *sg, *sg2;
+       uint64_t length;
+
+       Lck_AssertHeld(&sc->mtx);
+       sc->flags &= ~SMP_SC_SYNC;
+
+       /*
+        * Remove empty segments from the front of the list
+        * before we write the segments to disk.
+        */
+       VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
+               if (sg->nobj > 0)
+                       break;
+               if (sg == sc->cur_seg)
+                       continue;
+               VTAILQ_REMOVE(&sc->segments, sg, list);
+               LRU_Free(sg->lru);
+               FREE_OBJ(sg);
+       }
+
+       Lck_Unlock(&sc->mtx);
+       AZ(smp_chk_signspace(&sc->seg1)); /* Page in */
+       smp_reset_signspace(&sc->seg1);
+       Lck_Lock(&sc->mtx);
+
+       /* First write to seg1 while holding lock */
+       ss = SIGNSPACE_FRONT(&sc->seg1);
+       length = 0;
+       VTAILQ_FOREACH(sg, &sc->segments, list) {
+               assert(sg->p.offset < sc->mediasize);
+               assert(sg->p.offset + sg->p.length <= sc->mediasize);
+               *ss = sg->p;
+               ss++;
+               length += sizeof *ss;
+       }
+
+       Lck_Unlock(&sc->mtx);
+       smp_append_signspace(&sc->seg1, length);
+       smp_sync_sign(&sc->seg1.ctx); /* Sync without lock */
+       /* Copy seg1 to seg2 */
+       smp_copy_signspace(&sc->seg2, &sc->seg1);
+       smp_sync_sign(&sc->seg2.ctx);
+       Lck_Lock(&sc->mtx);
+}
+/*--------------------------------------------------------------------
  * Silo worker thread
  */
 
@@ -368,8 +426,7 @@ smp_thread(struct worker *wrk, void *priv)
        /* Housekeeping loop */
        Lck_Lock(&sc->mtx);
        while (!(sc->flags & SMP_SC_STOP)) {
-               sg = VTAILQ_FIRST(&sc->segments);
-               if (sg != NULL && sg != sc->cur_seg && sg->nobj == 0)
+               if (sc->flags & SMP_SC_SYNC)
                        smp_save_segs(sc);
 
                Lck_Unlock(&sc->mtx);
@@ -733,6 +790,12 @@ debug_persistent(struct cli *cli, const char * const * av, 
void *priv)
        if (!strcmp(av[3], "sync")) {
                if (sc->cur_seg != NULL)
                        smp_close_seg(sc, sc->cur_seg);
+               smp_sync_segs(sc);
+               while (sc->flags & SMP_SC_SYNC) {
+                       Lck_Unlock(&sc->mtx);
+                       VTIM_sleep(0.1);
+                       Lck_Lock(&sc->mtx);
+               }
                smp_new_seg(sc);
        } else if (!strcmp(av[3], "dump")) {
                debug_report_silo(cli, sc, 1);
diff --git a/bin/varnishd/storage/storage_persistent.h 
b/bin/varnishd/storage/storage_persistent.h
index c2cbac3..0c4e3a3 100644
--- a/bin/varnishd/storage/storage_persistent.h
+++ b/bin/varnishd/storage/storage_persistent.h
@@ -108,6 +108,7 @@ struct smp_sc {
        unsigned                flags;
 #define SMP_SC_LOADED          (1 << 0)
 #define SMP_SC_STOP            (1 << 1)
+#define SMP_SC_SYNC            (1 << 2)
 
        const struct stevedore  *stevedore;
        int                     fd;
@@ -194,7 +195,7 @@ void smp_load_seg(struct worker *, const struct smp_sc *sc, 
struct smp_seg *sg);
 void smp_new_seg(struct smp_sc *sc);
 void smp_close_seg(struct smp_sc *sc, struct smp_seg *sg);
 void smp_init_oc(struct objcore *oc, struct smp_seg *sg, unsigned objidx);
-void smp_save_segs(struct smp_sc *sc);
+void smp_sync_segs(struct smp_sc *sc);
 
 /* storage_persistent_subr.c */
 
diff --git a/bin/varnishd/storage/storage_persistent_silo.c 
b/bin/varnishd/storage/storage_persistent_silo.c
index 7ca79d6..fb384ee 100644
--- a/bin/varnishd/storage/storage_persistent_silo.c
+++ b/bin/varnishd/storage/storage_persistent_silo.c
@@ -48,59 +48,14 @@
 #include "storage/storage_persistent.h"
 
 /*--------------------------------------------------------------------
- * Write the segmentlist back to the silo.
- *
- * We write the first copy, sync it synchronously, then write the
- * second copy and sync it synchronously.
- *
- * Provided the kernel doesn't lie, that means we will always have
- * at least one valid copy on in the silo.
+ * Signal smp_thread() to sync the segment list to disk
  */
 
-static void
-smp_save_seg(const struct smp_sc *sc, struct smp_signspace *spc)
-{
-       struct smp_segptr *ss;
-       struct smp_seg *sg;
-       uint64_t length;
-
-       Lck_AssertHeld(&sc->mtx);
-       smp_reset_signspace(spc);
-       ss = SIGNSPACE_DATA(spc);
-       length = 0;
-       VTAILQ_FOREACH(sg, &sc->segments, list) {
-               assert(sg->p.offset < sc->mediasize);
-               assert(sg->p.offset + sg->p.length <= sc->mediasize);
-               *ss = sg->p;
-               ss++;
-               length += sizeof *ss;
-       }
-       smp_append_signspace(spc, length);
-       smp_sync_sign(&spc->ctx);
-}
-
 void
-smp_save_segs(struct smp_sc *sc)
+smp_sync_segs(struct smp_sc *sc)
 {
-       struct smp_seg *sg, *sg2;
-
        Lck_AssertHeld(&sc->mtx);
-
-       /*
-        * Remove empty segments from the front of the list
-        * before we write the segments to disk.
-        */
-       VTAILQ_FOREACH_SAFE(sg, &sc->segments, list, sg2) {
-               if (sg->nobj > 0)
-                       break;
-               if (sg == sc->cur_seg)
-                       continue;
-               VTAILQ_REMOVE(&sc->segments, sg, list);
-               LRU_Free(sg->lru);
-               FREE_OBJ(sg);
-       }
-       smp_save_seg(sc, &sc->seg1);
-       smp_save_seg(sc, &sc->seg2);
+       sc->flags |= SMP_SC_SYNC;
 }
 
 /*--------------------------------------------------------------------
@@ -177,6 +132,12 @@ smp_new_seg(struct smp_sc *sc)
        AZ(sc->cur_seg);
        Lck_AssertHeld(&sc->mtx);
 
+       if (sc->flags & SMP_SC_STOP) {
+               /* Housekeeping thread is stopping, don't allow new
+                * segments as there is noone around to persist it */
+               return;
+       }
+
        /* XXX: find where it goes in silo */
 
        memset(&tmpsg, 0, sizeof tmpsg);
@@ -303,8 +264,8 @@ smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
        smp_reset_sign(sg->ctx);
        smp_sync_sign(sg->ctx);
 
-       /* Save segment list */
-       smp_save_segs(sc);
+       /* Request sync of segment list */
+       smp_sync_segs(sc);
        sc->free_offset = smp_segend(sg);
 }
 
@@ -524,6 +485,11 @@ smp_oc_freeobj(struct objcore *oc)
        sg->nobj--;
        sg->nfixed--;
 
+       if (sg->nobj == 0 && sg == VTAILQ_FIRST(&sg->sc->segments)) {
+               /* Sync segments to remove empty at start */
+               sg->sc->flags |= SMP_SC_SYNC;
+       }
+
        Lck_Unlock(&sg->sc->mtx);
 }
 
-- 
1.7.9.5


_______________________________________________
varnish-dev mailing list
[email protected]
https://www.varnish-cache.org/lists/mailman/listinfo/varnish-dev

Reply via email to