Hi,

Below is a summary of counters removed and renamed in master wrt 4.0 and
IMO what we need to bring back for 4.1. Unless specifically mentioned they
are relative to MAIN.

Removed:

backend_toolate - IIUC cannot happen anymore
client_req_413 - unused in 4.0
fetch_close
fetch_oldhttp - used to indicate backends < 1.1
VBE.<backend exp>.vcls - no longer needed

Renamed:

fetch_none (was fetch_zero)
sc_pipe_overflow (was sess_pipe_overflow)
rx_body (was client_req_411)

Broken:

backend_conn is never incremented (#1725)

To be in parity with 4.0 we should address #1725.
Not sure if there is any value in fetch_oldhttp or fetch_close.

As for counters we might want to add, several people have mentioned the
need for per-backend errors. There are 3 places in cache_backend.c that
have the "XXX: Per backend stats ?" comment that we can start with and what
the attached patch covers.
That said, this might be post 4.1 material.

Haven't looked at #1725 yet.

f.-
From 8f9945a2eff3e502b9f73fd273340922824ae024 Mon Sep 17 00:00:00 2001
From: "Federico G. Schwindt" <[email protected]>
Date: Mon, 13 Jul 2015 19:30:09 +0100
Subject: [PATCH] Add some error counters to VBE

Add a new set of tests for fields and use it to tests most VBE counters.
---
 bin/varnishd/cache/cache_backend.c |  12 +++--
 bin/varnishtest/tests/README       |   1 +
 bin/varnishtest/tests/f00000.vtc   | 100 +++++++++++++++++++++++++++++++++++++
 include/tbl/vsc_fields.h           |  12 +++++
 4 files changed, 122 insertions(+), 3 deletions(-)
 create mode 100644 bin/varnishtest/tests/f00000.vtc

diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c
index 0591ec7..d9ce95a 100644
--- a/bin/varnishd/cache/cache_backend.c
+++ b/bin/varnishd/cache/cache_backend.c
@@ -72,13 +72,17 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
 	AN(bp->vsc);
 
 	if (!VBE_Healthy(bp, NULL)) {
-		// XXX: per backend stats ?
+		Lck_Lock(&bp->mtx);
+		bp->vsc->unhealthy++;
+		Lck_Unlock(&bp->mtx);
 		VSC_C_main->backend_unhealthy++;
 		return (NULL);
 	}
 
 	if (bp->max_connections > 0 && bp->n_conn >= bp->max_connections) {
-		// XXX: per backend stats ?
+		Lck_Lock(&bp->mtx);
+		bp->vsc->busy++;
+		Lck_Unlock(&bp->mtx);
 		VSC_C_main->backend_busy++;
 		return (NULL);
 	}
@@ -93,7 +97,9 @@ vbe_dir_getfd(struct worker *wrk, struct backend *bp, struct busyobj *bo)
 	FIND_TMO(connect_timeout, tmod, bo, bp);
 	vc = VBT_Get(bp->tcp_pool, tmod, bp, wrk);
 	if (vc == NULL) {
-		// XXX: Per backend stats ?
+		Lck_Lock(&bp->mtx);
+		bp->vsc->fail++;
+		Lck_Unlock(&bp->mtx);
 		VSC_C_main->backend_fail++;
 		bo->htc = NULL;
 		return (NULL);
diff --git a/bin/varnishtest/tests/README b/bin/varnishtest/tests/README
index 85b309b..5f9d931 100644
--- a/bin/varnishtest/tests/README
+++ b/bin/varnishtest/tests/README
@@ -18,6 +18,7 @@ Naming scheme
 	id ~ [c] --> Complex functionality tests
 	id ~ [d] --> Director VMOD tests
 	id ~ [e] --> ESI tests
+	id ~ [f] --> Field tests
 	id ~ [g] --> GZIP tests
 	id ~ [j] --> JAIL tests
 	id ~ [l] --> VSL tests
diff --git a/bin/varnishtest/tests/f00000.vtc b/bin/varnishtest/tests/f00000.vtc
new file mode 100644
index 0000000..c58d128
--- /dev/null
+++ b/bin/varnishtest/tests/f00000.vtc
@@ -0,0 +1,100 @@
+varnishtest "VBE.*.* fields"
+
+server s1 -repeat 10 {
+	rxreq
+	txresp -body "foo"
+} -start
+
+server s2 {
+	rxreq
+	sema r1 sync 2
+	sema r2 sync 2
+	txresp
+} -start
+
+varnish v1 -vcl {
+	backend s1 {
+		.host = "${s1_addr}"; .port = "${s1_port}";
+		.probe = { }
+	}
+	backend s2 {
+		.host = "${s2_addr}"; .port = "${s2_port}";
+		.max_connections = 1;
+	}
+	backend s3 {
+		.host = "${bad_ip}"; .port = "9090";
+		.connect_timeout = 0.5s;
+	}
+	sub vcl_recv {
+		if (req.url ~ "1") {
+			set req.backend_hint = s1;
+		} elif (req.url ~ "2") {
+			set req.backend_hint = s2;
+		} else {
+			set req.backend_hint = s3;
+		}
+		return (pass);
+	}
+} -start
+
+client c1 {
+	txreq -url /1
+	rxresp
+} -run
+
+varnish v1 -expect VBE.vcl1.s1.happy > 0
+varnish v1 -expect VBE.vcl1.s1.busy == 0
+varnish v1 -expect VBE.vcl1.s1.fail == 0
+varnish v1 -expect VBE.vcl1.s1.unhealthy == 0
+varnish v1 -expect VBE.vcl1.s1.conn == 1
+varnish v1 -expect VBE.vcl1.s1.req == 1
+varnish v1 -expect VBE.vcl1.s1.bereq_hdrbytes > 0
+varnish v1 -expect VBE.vcl1.s1.bereq_hdrbytes > 0
+varnish v1 -expect VBE.vcl1.s1.beresp_hdrbytes > 0
+varnish v1 -expect VBE.vcl1.s1.beresp_bodybytes > 0
+
+varnish v1 -cliok "backend.set_health s1 sick"
+
+client c1 {
+	txreq -url /1
+	rxresp
+} -run
+
+varnish v1 -expect VBE.vcl1.s1.unhealthy == 1
+
+client c1 {
+	txreq -url /2
+	rxresp
+} -start
+
+client c2 {
+	sema r1 sync 2
+	txreq -url /2
+	rxresp
+} -run
+
+sema r2 sync 2
+client c1 -wait
+
+varnish v1 -expect VBE.vcl1.s2.conn == 1
+varnish v1 -expect VBE.vcl1.s2.req == 1
+varnish v1 -expect VBE.vcl1.s2.busy == 1
+
+client c1 {
+	txreq -url /3
+	rxresp
+} -run
+
+varnish v1 -expect VBE.vcl1.s3.happy == 0
+varnish v1 -expect VBE.vcl1.s3.busy == 0
+varnish v1 -expect VBE.vcl1.s3.fail == 1
+varnish v1 -expect VBE.vcl1.s3.unhealthy == 0
+varnish v1 -expect VBE.vcl1.s3.conn == 0
+varnish v1 -expect VBE.vcl1.s3.req == 0
+
+varnish v1 -expect client_req == 5
+varnish v1 -expect s_fetch == 5
+varnish v1 -expect backend_unhealthy == 1
+varnish v1 -expect backend_fail == 1
+varnish v1 -expect backend_req == 2
+varnish v1 -expect backend_busy == 1
diff --git a/include/tbl/vsc_fields.h b/include/tbl/vsc_fields.h
index 7d45f5c..f5b5234 100644
--- a/include/tbl/vsc_fields.h
+++ b/include/tbl/vsc_fields.h
@@ -221,6 +221,18 @@ VSC_F(req,			uint64_t, 0, 'c', 'i', info,
     "Backend requests sent",
 	""
 )
+VSC_F(unhealthy,		uint64_t, 0, 'c', 'i', info,
+    "Backend conn. not attempted",
+	""
+)
+VSC_F(busy,			uint64_t, 0, 'c', 'i', info,
+    "Backend conn. too many",
+	""
+)
+VSC_F(fail,			uint64_t, 0, 'c', 'i', info,
+    "Backend conn. failures",
+	""
+)
 
 #endif
 
-- 
2.1.4

_______________________________________________
varnish-dev mailing list
[email protected]
https://www.varnish-cache.org/lists/mailman/listinfo/varnish-dev

Reply via email to