cvs diff -u protocol.c
Index: protocol.c
===================================================================
RCS file: /home/cvs/httpd-2.0/server/protocol.c,v
Here is an interesting patch. I don't think this is quite what we should commit, but
it
was entertaining to see how well this exercised the chunk filter. This patch does a
non-blocking read each and every time and only falls back to a blocking read when we
get
APR_EAGAIN on a non-blocking read. We get wildly varying number of bytes read on each
non-blocking read :-). The 'correct' fix is to do a non-blocking read once then make
subsequent reads blocking. Will commit something tomorrow.
Bill
retrieving revision 1.28
diff -u -r1.28 protocol.c
--- protocol.c 2001/06/27 20:18:09 1.28
+++ protocol.c 2001/07/04 02:58:13
@@ -868,8 +868,23 @@
send_it = 1;
}
if (e->length == -1) { /* if length unknown */
- rv = apr_bucket_read(e, &ignored, &length, APR_BLOCK_READ);
+ rv = apr_bucket_read(e, &ignored, &length, APR_NONBLOCK_READ);
+ if (rv == APR_EAGAIN) {
+ /* If we can chunk the output, flush the filter chain to
+ * the network then do a blocking read.
+ */
+ if (r->proto_num >= HTTP_VERSION(1,1)) {
+ apr_bucket_brigade *split;
+ split = apr_brigade_split(b, e);
+ rv = ap_fflush(f, b);
+ if (rv != APR_SUCCESS)
+ return rv;
+ b = split;
+ }
+ rv = apr_bucket_read(e, &ignored, &length, APR_BLOCK_READ);
+ }
if (rv != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "ap_content_length_filter:
apr_bucket_read() failed");
return rv;
}
}