# HG changeset patch
# User Andrew Wheat <andrew.wheat@bbc.co.uk>
# Date 1543845093 0
#      Mon Dec 03 13:51:33 2018 +0000
# Node ID 46fb72016dd94678f9eebabe6e3381d3839f7995
# Parent  51656beb99961285c620eedcbcf232bad0bdcdfe
Add tests for enabling proxy_cache_lock without serving stale.

When the upstream sets a 'Cache-Control' header, the response is
cached for the specified duration.

When proxy_cache_valid is used to define the cache duration.

diff -r 51656beb9996 -r 46fb72016dd9 proxy_cache_lock_stale.t
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/proxy_cache_lock_stale.t	Mon Dec 03 13:51:33 2018 +0000
@@ -0,0 +1,171 @@
+#!/usr/bin/perl
+
+# (C) Andrew Wheat
+
+# Tests for http proxy cache lock.
+
+###############################################################################
+
+use warnings;
+use strict;
+
+use Test::More;
+
+BEGIN { use FindBin; chdir($FindBin::Bin); }
+
+use lib 'lib';
+use Test::Nginx qw/ :DEFAULT http_end /;
+
+###############################################################################
+
+select STDERR; $| = 1;
+select STDOUT; $| = 1;
+
+my $t = Test::Nginx->new()->has(qw/http proxy cache/)->plan(12)
+	->write_file_expand('nginx.conf', <<'EOF');
+
+%%TEST_GLOBALS%%
+
+daemon off;
+
+events {
+}
+
+http {
+    %%TEST_GLOBALS_HTTP%%
+
+    proxy_cache_path   %%TESTDIR%%/cache  levels=1:2
+                       keys_zone=NAME:1m;
+
+    server {
+        listen       127.0.0.1:8080;
+        server_name  localhost;
+
+        location /stale_off {
+            proxy_pass    http://127.0.0.1:8081;
+            proxy_cache   NAME;
+
+            proxy_cache_lock on;
+            proxy_cache_use_stale off;
+            add_header X-Cache-Status $upstream_cache_status;
+        }
+
+        location /stale_on {
+            proxy_pass    http://127.0.0.1:8081;
+            proxy_cache   NAME;
+
+            proxy_cache_lock on;
+            proxy_cache_use_stale updating;
+            add_header X-Cache-Status $upstream_cache_status;
+        }
+    }
+}
+
+EOF
+
+$t->run_daemon(\&http_daemon);
+$t->run();
+$t->waitforsocket('127.0.0.1:' . port(8081));
+
+###############################################################################
+
+# cache misses trigger the cache lock with serve_stale on and off.
+# parallel requests to both cache on and off; warm the cache.
+
+my @stale_on_sockets;
+for my $i (1 .. 2) {
+	$stale_on_sockets[$i] = http_get('/stale_on', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_on_sockets[$i]), qr/request 1/, 'first set of use_stale on ' . $i);
+}
+like(http_get('/stale_on'), qr/request 1/, 'first request cached');
+
+my @stale_off_sockets;
+for my $i (1 .. 2) {
+	$stale_off_sockets[$i] = http_get('/stale_off', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_off_sockets[$i]), qr/request 1/, 'first set of use_stale off ' . $i);
+}
+like(http_get('/stale_off'), qr/request 1/, 'first request cached');
+
+# wait for the cache to expire; cache-control/max-age is set to 2s, so wait double that.
+sleep 4;
+
+# make the requests again, with 'proxy_cache_use_stale' set to updating, we should get the
+# cached version for two requests, and a new response from the upstream server.
+for my $i (1 .. 3) {
+	$stale_on_sockets[$i] = http_get('/stale_on', start => 1);
+}
+my %counts;
+for my $i (1 .. 3) {
+	(my $req) = (http_end($stale_on_sockets[$i]) =~ qr/(request \d+)/);
+	$counts{$req} = 0 if not $counts{$req};
+	$counts{$req}++;
+}
+
+is($counts{'request 1'}, 2, 'two of the requests come back with the cached response and a x-cache-status of UPDATING');
+is($counts{'request 2'}, 1, 'one of the requests come back as the new response from the upstream with a x-cache-status of EXPIRED');
+
+like(http_get('/stale_on'), qr/request 2/, 'subsequent request for use_stale on is cached');
+
+# With 'proxy_cache_use_stale' set to 'off' we should see the same behaviour as the first set
+# of requests; all the requests get blocked while a single request is made to the upstream
+# server. The response from that single request should be the response for all of the
+# downstream requests.
+for my $i (1 .. 2) {
+	$stale_off_sockets[$i] = http_get('/stale_off', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_off_sockets[$i]), qr/request 2/, 'second set of use_stale off ' . $i);
+}
+like(http_get('/stale_off'), qr/request 2/, 'subsequent requests for /stale_off are cached once again.');
+
+
+
+###############################################################################
+
+sub http_daemon {
+	my $server = IO::Socket::INET->new(
+		Proto => 'tcp',
+		LocalAddr => '127.0.0.1:' . port(8081),
+		Listen => 5,
+		Reuse => 1
+	)
+		or die "Can't create listening socket: $!\n";
+
+	my %counts;
+
+	while (my $client = $server->accept()) {
+		$client->autoflush(1);
+
+		my $uri = '';
+		while (<$client>) {
+
+			$uri = $1 if /GET (.*) HTTP/;
+			if (not $counts{$uri}) {
+				$counts{$uri} = 0
+			}
+
+			last if /^\x0d?\x0a?$/;
+		}
+
+		next unless $uri;
+
+		select(undef, undef, undef, 1.1);
+
+		$counts{$uri}++;
+
+		print $client <<"EOF";
+HTTP/1.1 200 OK
+Cache-Control: max-age=2
+Connection: close
+Request-Number: $counts{$uri}
+
+request $counts{$uri}
+EOF
+	}
+}
+
+###############################################################################
diff -r 51656beb9996 -r 46fb72016dd9 proxy_cache_lock_stale_valid.t
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/proxy_cache_lock_stale_valid.t	Mon Dec 03 13:51:33 2018 +0000
@@ -0,0 +1,171 @@
+#!/usr/bin/perl
+
+# (C) Andrew Wheat
+
+# Tests for http proxy cache lock.
+
+###############################################################################
+
+use warnings;
+use strict;
+
+use Test::More;
+
+BEGIN { use FindBin; chdir($FindBin::Bin); }
+
+use lib 'lib';
+use Test::Nginx qw/ :DEFAULT http_end /;
+
+###############################################################################
+
+select STDERR; $| = 1;
+select STDOUT; $| = 1;
+
+my $t = Test::Nginx->new()->has(qw/http proxy cache/)->plan(12)
+	->write_file_expand('nginx.conf', <<'EOF');
+
+%%TEST_GLOBALS%%
+
+daemon off;
+
+events {
+}
+
+http {
+    %%TEST_GLOBALS_HTTP%%
+
+    proxy_cache_path   %%TESTDIR%%/cache  levels=1:2
+                       keys_zone=NAME:1m;
+    proxy_cache_valid  2s;
+
+    server {
+        listen       127.0.0.1:8080;
+        server_name  localhost;
+
+        location /stale_off {
+            proxy_pass    http://127.0.0.1:8081;
+            proxy_cache   NAME;
+
+            proxy_cache_lock on;
+            proxy_cache_use_stale off;
+            add_header X-Cache-Status $upstream_cache_status;
+        }
+
+        location /stale_on {
+            proxy_pass    http://127.0.0.1:8081;
+            proxy_cache   NAME;
+
+            proxy_cache_lock on;
+            proxy_cache_use_stale updating;
+            add_header X-Cache-Status $upstream_cache_status;
+        }
+    }
+}
+
+EOF
+
+$t->run_daemon(\&http_daemon);
+$t->run();
+$t->waitforsocket('127.0.0.1:' . port(8081));
+
+###############################################################################
+
+# cache misses trigger the cache lock with serve_stale on and off.
+# parallel requests to both cache on and off; warm the cache.
+
+my @stale_on_sockets;
+for my $i (1 .. 2) {
+	$stale_on_sockets[$i] = http_get('/stale_on', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_on_sockets[$i]), qr/request 1/, 'first set of use_stale on ' . $i);
+}
+like(http_get('/stale_on'), qr/request 1/, 'first request cached');
+
+my @stale_off_sockets;
+for my $i (1 .. 2) {
+	$stale_off_sockets[$i] = http_get('/stale_off', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_off_sockets[$i]), qr/request 1/, 'first set of use_stale off ' . $i);
+}
+like(http_get('/stale_off'), qr/request 1/, 'first request cached');
+
+# wait for the cache to expire; cache-control/max-age is set to 2s, so wait double that.
+sleep 4;
+
+# make the requests again, with 'proxy_cache_use_stale' set to updating, we should get the
+# cached version for two requests, and a new response from the upstream server.
+for my $i (1 .. 3) {
+	$stale_on_sockets[$i] = http_get('/stale_on', start => 1);
+}
+my %counts;
+for my $i (1 .. 3) {
+	(my $req) = (http_end($stale_on_sockets[$i]) =~ qr/(request \d+)/);
+	$counts{$req} = 0 if not $counts{$req};
+	$counts{$req}++;
+}
+
+is($counts{'request 1'}, 2, 'two of the requests come back with the cached response and a x-cache-status of UPDATING');
+is($counts{'request 2'}, 1, 'one of the requests come back as the new response from the upstream with a x-cache-status of EXPIRED');
+
+like(http_get('/stale_on'), qr/request 2/, 'subsequent request for use_stale on is cached');
+
+# With 'proxy_cache_use_stale' set to 'off' we should see the same behaviour as the first set
+# of requests; all the requests get blocked while a single request is made to the upstream
+# server. The response from that single request should be the response for all of the
+# downstream requests.
+for my $i (1 .. 2) {
+	$stale_off_sockets[$i] = http_get('/stale_off', start => 1);
+}
+for my $i (1 .. 2) {
+	like(http_end($stale_off_sockets[$i]), qr/request 2/, 'second set of use_stale off ' . $i);
+}
+like(http_get('/stale_off'), qr/request 2/, 'subsequent requests for /stale_off are cached once again.');
+
+
+
+###############################################################################
+
+sub http_daemon {
+	my $server = IO::Socket::INET->new(
+		Proto => 'tcp',
+		LocalAddr => '127.0.0.1:' . port(8081),
+		Listen => 5,
+		Reuse => 1
+	)
+		or die "Can't create listening socket: $!\n";
+
+	my %counts;
+
+	while (my $client = $server->accept()) {
+		$client->autoflush(1);
+
+		my $uri = '';
+		while (<$client>) {
+
+			$uri = $1 if /GET (.*) HTTP/;
+			if (not $counts{$uri}) {
+				$counts{$uri} = 0
+			}
+
+			last if /^\x0d?\x0a?$/;
+		}
+
+		next unless $uri;
+
+		select(undef, undef, undef, 1.1);
+
+		$counts{$uri}++;
+
+		print $client <<"EOF";
+HTTP/1.1 200 OK
+Connection: close
+Request-Number: $counts{$uri}
+
+request $counts{$uri}
+EOF
+	}
+}
+
+###############################################################################
