Hello community, here is the log from the commit of package libserf for openSUSE:Factory checked in at 2014-10-23 14:20:57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/libserf (Old) and /work/SRC/openSUSE:Factory/.libserf.new (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "libserf" Changes: -------- --- /work/SRC/openSUSE:Factory/libserf/libserf.changes 2014-08-13 08:49:17.000000000 +0200 +++ /work/SRC/openSUSE:Factory/.libserf.new/libserf.changes 2014-10-23 14:21:16.000000000 +0200 @@ -1,0 +2,11 @@ +Wed Oct 22 18:24:36 UTC 2014 - [email protected] + +- Serf 1.3.8 + This release fixes a problem with handling very large gzip- + encoded HTTP responses and disables SSLv2 and SSLv3. + * CRC calculation error for gzipped http reponses > 4GB. + * SSPI CredHandle not freed when APR pool is destroyed. + * Disable SSLv2 and SSLv3 as both or broken [boo#901968] +- disable running tests due to memory leak in test fixture + +------------------------------------------------------------------- Old: ---- serf-1.3.7.tar.bz2 New: ---- serf-1.3.8.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ libserf.spec ++++++ --- /var/tmp/diff_new_pack.OK44bQ/_old 2014-10-23 14:21:18.000000000 +0200 +++ /var/tmp/diff_new_pack.OK44bQ/_new 2014-10-23 14:21:18.000000000 +0200 @@ -23,7 +23,7 @@ %define minor 3 %define SHLIBVER %{major}.%{minor}.0 Name: libserf -Version: 1.3.7 +Version: 1.3.8 Release: 0 Summary: High-Performance Asynchronous HTTP Client Library License: Apache-2.0 @@ -104,12 +104,16 @@ rm -f "%{buildroot}%{_libdir}"/lib*.a %check +# test failing due to memory leak +# https://groups.google.com/d/msg/serf-dev/qoTK8BfRfrI/EJL7xj4PmT4J +%if 0 %if 0%{?suse_version} > 1210 scons \ CFLAGS="%{optflags}" \ check \ %{?_smp_mflags} %endif +%endif %post -n libserf-%{major}-%{major} -p /sbin/ldconfig ++++++ serf-1.3.7.tar.bz2 -> serf-1.3.8.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/CHANGES new/serf-1.3.8/CHANGES --- old/serf-1.3.7/CHANGES 2014-08-11 17:17:02.000000000 +0200 +++ new/serf-1.3.8/CHANGES 2014-10-20 21:08:16.000000000 +0200 @@ -1,10 +1,18 @@ +Serf 1.3.8 [2014-10-20, from /tags/1.3.8, rxxxx] +Fix issue #152: CRC calculation error for gzipped http reponses > 4GB. +Fix issue #153: SSPI CredHandle not freed when APR pool is destroyed. +Fix issue #154: Disable SSLv2 and SSLv3 as both or broken. + + Serf 1.3.7 [2014-08-11, from /tags/1.3.7, r2411] Handle NUL bytes in fields of an X.509 certificate. (r2393, r2399) + Serf 1.3.6 [2014-06-09, from /tags/1.3.6, r2372] Revert r2319 from serf 1.3.5: this change was making serf call handle_response multiple times in case of an error response, leading to unexpected behavior. + Serf 1.3.5 [2014-04-27, from /tags/1.3.5, r2355] Fix issue #125: no reverse lookup during Negotiate authentication for proxies. Fix a crash caused by incorrect reuse of the ssltunnel CONNECT request (r2316) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/auth/auth_spnego_sspi.c new/serf-1.3.8/auth/auth_spnego_sspi.c --- old/serf-1.3.7/auth/auth_spnego_sspi.c 2014-02-04 20:41:14.000000000 +0100 +++ new/serf-1.3.8/auth/auth_spnego_sspi.c 2014-10-19 20:38:11.000000000 +0200 @@ -95,8 +95,8 @@ } if (SecIsValidHandle(&ctx->sspi_credentials)) { - FreeCredentialsHandle(&ctx->sspi_context); - SecInvalidateHandle(&ctx->sspi_context); + FreeCredentialsHandle(&ctx->sspi_credentials); + SecInvalidateHandle(&ctx->sspi_credentials); } return APR_SUCCESS; diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/buckets/deflate_buckets.c new/serf-1.3.8/buckets/deflate_buckets.c --- old/serf-1.3.7/buckets/deflate_buckets.c 2011-06-24 04:03:57.000000000 +0200 +++ new/serf-1.3.8/buckets/deflate_buckets.c 2014-10-19 20:38:11.000000000 +0200 @@ -141,7 +141,6 @@ const char **data, apr_size_t *len) { deflate_context_t *ctx = bucket->data; - unsigned long compCRC, compLen; apr_status_t status; const char *private_data; apr_size_t private_len; @@ -186,17 +185,25 @@ ctx->state++; break; case STATE_VERIFY: + { + unsigned long compCRC, compLen, actualLen; + /* Do the checksum computation. */ compCRC = getLong((unsigned char*)ctx->hdr_buffer); if (ctx->crc != compCRC) { return SERF_ERROR_DECOMPRESSION_FAILED; } compLen = getLong((unsigned char*)ctx->hdr_buffer + 4); - if (ctx->zstream.total_out != compLen) { + /* The length in the trailer is module 2^32, so do the same for + the actual length. */ + actualLen = ctx->zstream.total_out; + actualLen &= 0xFFFFFFFF; + if (actualLen != compLen) { return SERF_ERROR_DECOMPRESSION_FAILED; } ctx->state++; break; + } case STATE_INIT: zRC = inflateInit2(&ctx->zstream, ctx->windowSize); if (zRC != Z_OK) { @@ -264,10 +271,14 @@ ctx->zstream.next_in = (unsigned char*)private_data; ctx->zstream.avail_in = private_len; } - zRC = Z_OK; - while (ctx->zstream.avail_in != 0) { - /* We're full, clear out our buffer, reset, and return. */ - if (ctx->zstream.avail_out == 0) { + + while (1) { + + zRC = inflate(&ctx->zstream, Z_NO_FLUSH); + + /* We're full or zlib requires more space. Either case, clear + out our buffer, reset, and return. */ + if (zRC == Z_BUF_ERROR || ctx->zstream.avail_out == 0) { serf_bucket_t *tmp; ctx->zstream.next_out = ctx->buffer; private_len = ctx->bufferSize - ctx->zstream.avail_out; @@ -283,7 +294,6 @@ ctx->zstream.avail_out = ctx->bufferSize; break; } - zRC = inflate(&ctx->zstream, Z_NO_FLUSH); if (zRC == Z_STREAM_END) { serf_bucket_t *tmp; @@ -330,9 +340,13 @@ break; } + + /* Any other error? */ if (zRC != Z_OK) { return SERF_ERROR_DECOMPRESSION_FAILED; } + + /* As long as zRC == Z_OK, just keep looping. */ } /* Okay, we've inflated. Try to read. */ status = serf_bucket_read(ctx->inflate_stream, requested, data, @@ -340,8 +354,13 @@ /* Hide EOF. */ if (APR_STATUS_IS_EOF(status)) { status = ctx->stream_status; - /* If our stream is finished too, return SUCCESS so - * we'll iterate one more time. + + /* If the inflation wasn't finished, return APR_SUCCESS. */ + if (zRC != Z_STREAM_END) + return APR_SUCCESS; + + /* If our stream is finished too and all data was inflated, + * return SUCCESS so we'll iterate one more time. */ if (APR_STATUS_IS_EOF(status)) { /* No more data to read from the stream, and everything diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/buckets/ssl_buckets.c new/serf-1.3.8/buckets/ssl_buckets.c --- old/serf-1.3.7/buckets/ssl_buckets.c 2014-08-06 04:24:00.000000000 +0200 +++ new/serf-1.3.8/buckets/ssl_buckets.c 2014-10-19 20:38:11.000000000 +0200 @@ -1317,7 +1317,9 @@ ssl_ctx->pool = serf_bucket_allocator_get_pool(allocator); ssl_ctx->allocator = allocator; + /* Use the best possible protocol version, but disable the broken SSLv2/3 */ ssl_ctx->ctx = SSL_CTX_new(SSLv23_client_method()); + SSL_CTX_set_options(ssl_ctx->ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); SSL_CTX_set_client_cert_cb(ssl_ctx->ctx, ssl_need_client_cert); ssl_ctx->cached_cert = 0; diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/serf.h new/serf-1.3.8/serf.h --- old/serf-1.3.7/serf.h 2014-08-04 20:11:28.000000000 +0200 +++ new/serf-1.3.8/serf.h 2014-10-19 20:20:09.000000000 +0200 @@ -1062,7 +1062,7 @@ /* Version info */ #define SERF_MAJOR_VERSION 1 #define SERF_MINOR_VERSION 3 -#define SERF_PATCH_VERSION 7 +#define SERF_PATCH_VERSION 8 /* Version number string */ #define SERF_VERSION_STRING APR_STRINGIFY(SERF_MAJOR_VERSION) "." \ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/test/test_buckets.c new/serf-1.3.8/test/test_buckets.c --- old/serf-1.3.7/test/test_buckets.c 2013-09-29 08:37:46.000000000 +0200 +++ new/serf-1.3.8/test/test_buckets.c 2014-10-19 20:38:11.000000000 +0200 @@ -16,6 +16,8 @@ #include <apr.h> #include <apr_pools.h> #include <apr_strings.h> +#include <apr_random.h> +#include <zlib.h> #include "serf.h" #include "test_serf.h" @@ -1218,6 +1220,347 @@ } } +static apr_status_t deflate_compress(const char **data, apr_size_t *len, + z_stream *zdestr, + const char *orig, apr_size_t orig_len, + int last, + apr_pool_t *pool) +{ + int zerr; + apr_size_t buf_size; + void *write_buf; + + /* The largest buffer we should need is 0.1% larger than the + uncompressed data, + 12 bytes. This info comes from zlib.h. + Note: This isn't sufficient when using Z_NO_FLUSH and extremely compressed + data. Use a buffer bigger than what we need. */ +// buf_size = orig_len + (orig_len / 1000) + 12; + buf_size = 100000; + + write_buf = apr_palloc(pool, buf_size); + + zdestr->next_in = (Bytef *)orig; /* Casting away const! */ + zdestr->avail_in = (uInt)orig_len; + + zerr = Z_OK; + zdestr->next_out = write_buf; + zdestr->avail_out = (uInt)buf_size; + + while ((last && zerr != Z_STREAM_END) || + (!last && zdestr->avail_in > 0)) + { + zerr = deflate(zdestr, last ? Z_FINISH : Z_NO_FLUSH); + if (zerr < 0) + return APR_EGENERAL; + } + + *data = write_buf; + *len = buf_size - zdestr->avail_out; + + return APR_SUCCESS; +} + +/* Reads bucket until EOF found and compares read data with zero terminated + string expected. Report all failures using CuTest. */ +static void read_bucket_and_check_pattern(CuTest *tc, serf_bucket_t *bkt, + const char *pattern, + apr_size_t expected_len) +{ + apr_status_t status; + const char *expected; + const apr_size_t pattern_len = strlen(pattern); + + apr_size_t exp_rem = 0; + apr_size_t actual_len = 0; + + do + { + const char *data; + apr_size_t act_rem; + + status = serf_bucket_read(bkt, SERF_READ_ALL_AVAIL, &data, &act_rem); + + CuAssert(tc, "Got error during bucket reading.", + !SERF_BUCKET_READ_ERROR(status)); + + actual_len += act_rem; + + while (act_rem > 0) { + apr_size_t bytes_to_compare; + + if (exp_rem == 0) { + expected = pattern; + exp_rem = pattern_len; + } + + bytes_to_compare = act_rem < exp_rem ? act_rem : exp_rem; + CuAssert(tc, "Read data is not equal to expected.", + strncmp(expected, data, bytes_to_compare) == 0); + data += bytes_to_compare; + act_rem -= bytes_to_compare; + + expected += bytes_to_compare; + exp_rem -= bytes_to_compare; + } + } while(!APR_STATUS_IS_EOF(status)); + + CuAssertIntEquals_Msg(tc, "Read less data than expected.", 0, exp_rem); + CuAssertIntEquals_Msg(tc, "Read less/more data than expected.", actual_len, + expected_len); +} + +static void deflate_buckets(CuTest *tc, int nr_of_loops) +{ + const char *msg = "12345678901234567890123456789012345678901234567890"; + + test_baton_t *tb = tc->testBaton; + serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(tb->pool, NULL, + NULL); + z_stream zdestr; + int i; + const char gzip_header[10] = + { '\037', '\213', Z_DEFLATED, 0, + 0, 0, 0, 0, /* mtime */ + 0, 0x03 /* Unix OS_CODE */ + }; + + serf_bucket_t *aggbkt = serf_bucket_aggregate_create(alloc); + serf_bucket_t *defbkt = serf_bucket_deflate_create(aggbkt, alloc, + SERF_DEFLATE_GZIP); + serf_bucket_t *strbkt; + +#if 0 /* Enable logging */ + { + serf_config_t *config; + + serf_context_t *ctx = serf_context_create(tb->pool); + /* status = */ serf__config_store_get_config(ctx, NULL, &config, tb->pool); + + serf_bucket_set_config(defbkt, config); + } +#endif + + memset(&zdestr, 0, sizeof(z_stream)); + /* HTTP uses raw deflate format, so windows size => -15 */ + CuAssert(tc, "zlib init failed.", + deflateInit2(&zdestr, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -15, 8, + Z_DEFAULT_STRATEGY) == Z_OK); + + strbkt = SERF_BUCKET_SIMPLE_STRING_LEN(gzip_header, 10, alloc); + serf_bucket_aggregate_append(aggbkt, strbkt); + + for (i = 0; i < nr_of_loops; i++) { + const char *data; + apr_size_t len; + + if (i == nr_of_loops - 1) { + CuAssertIntEquals(tc, APR_SUCCESS, + deflate_compress(&data, &len, &zdestr, msg, + strlen(msg), 1, tb->pool)); + } else { + CuAssertIntEquals(tc, APR_SUCCESS, + deflate_compress(&data, &len, &zdestr, msg, + strlen(msg), 0, tb->pool)); + } + + if (len == 0) + continue; + + strbkt = SERF_BUCKET_SIMPLE_STRING_LEN(data, len, alloc); + + serf_bucket_aggregate_append(aggbkt, strbkt); + } + + tb->user_baton_l = APR_EOF; + read_bucket_and_check_pattern(tc, defbkt, msg, nr_of_loops * strlen(msg)); +} + +static void test_deflate_buckets(CuTest *tc) +{ + int i; + + for (i = 1; i < 1000; i++) { + deflate_buckets(tc, i); + } +} + +static apr_status_t discard_data(serf_bucket_t *bkt, + apr_size_t *read_len) +{ + const char *data; + apr_size_t data_len; + apr_status_t status; + apr_size_t read; + + read = 0; + + do + { + status = serf_bucket_read(bkt, SERF_READ_ALL_AVAIL, &data, &data_len); + + if (!SERF_BUCKET_READ_ERROR(status)) { + read += data_len; + } + } while(status == APR_SUCCESS); + + *read_len = read; + return status; +} + +static apr_status_t hold_open(void *baton, serf_bucket_t *aggbkt) +{ + test_baton_t *tb = baton; + + return tb->user_baton_l; +} + +static void put_32bit(unsigned char *buf, unsigned long x) +{ + buf[0] = (unsigned char)(x & 0xFF); + buf[1] = (unsigned char)((x & 0xFF00) >> 8); + buf[2] = (unsigned char)((x & 0xFF0000) >> 16); + buf[3] = (unsigned char)((x & 0xFF000000) >> 24); +} + +static serf_bucket_t * +create_gzip_deflate_bucket(serf_bucket_t *stream, z_stream *outzstr, + serf_bucket_alloc_t *alloc) +{ + serf_bucket_t *strbkt; + serf_bucket_t *defbkt = serf_bucket_deflate_create(stream, alloc, + SERF_DEFLATE_GZIP); + int zerr; + + memset(outzstr, 0, sizeof(z_stream)); + + const char gzip_header[10] = + { '\037', '\213', Z_DEFLATED, 0, + 0, 0, 0, 0, /* mtime */ + 0, 0x03 /* Unix OS_CODE */ + }; + + /* HTTP uses raw deflate format, so windows size => -15 */ + zerr = deflateInit2(outzstr, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -15, 8, + Z_DEFAULT_STRATEGY); + if (zerr != Z_OK) + return NULL; + + strbkt = SERF_BUCKET_SIMPLE_STRING_LEN(gzip_header, 10, alloc); + serf_bucket_aggregate_append(stream, strbkt); + + return defbkt; +} + +/* Test for issue #152: the trailers of gzipped data only store the 4 most + significant bytes of the length, so when the compressed data is >4GB + we can't just compare actual length with expected length. */ +static void test_deflate_4GBplus_buckets(CuTest *tc) +{ + test_baton_t *tb = tc->testBaton; + serf_bucket_alloc_t *alloc = serf_bucket_allocator_create(tb->pool, NULL, + NULL); + int i; + unsigned char gzip_trailer[8]; + z_stream zdestr; + serf_bucket_t *aggbkt = serf_bucket_aggregate_create(alloc); + serf_bucket_t *defbkt = create_gzip_deflate_bucket(aggbkt, &zdestr, alloc); + serf_bucket_t *strbkt; + apr_pool_t *iter_pool; + apr_size_t actual_size; + unsigned long unc_crc = 0; + unsigned long unc_length = 0; + +#define NR_OF_LOOPS 550000 +#define BUFSIZE 8096 + unsigned char uncompressed[BUFSIZE]; + + serf_bucket_aggregate_hold_open(aggbkt, hold_open, tb); + tb->user_baton_l = APR_EAGAIN; + + +#if 0 /* Enable logging */ + { + serf_config_t *config; + + serf_context_t *ctx = serf_context_create(tb->pool); + /* status = */ serf__config_store_get_config(ctx, NULL, &config, tb->pool); + + serf_bucket_set_config(defbkt, config); + } +#endif + + apr_pool_create(&iter_pool, tb->pool); + + actual_size = 0; + for (i = 0; i < NR_OF_LOOPS; i++) { + const char *data; + apr_size_t len; + apr_size_t read_len; + serf_bucket_alloc_t *iter_alloc; + apr_status_t status; + + apr_pool_clear(iter_pool); + iter_alloc = serf_bucket_allocator_create(iter_pool, NULL, NULL); + + + if (i % 1000 == 0) + printf("%d\n", i); + + status = apr_generate_random_bytes(uncompressed, BUFSIZE); + CuAssertIntEquals(tc, APR_SUCCESS, status); + + unc_crc = crc32(unc_crc, (const Bytef *)uncompressed, BUFSIZE); + unc_length += BUFSIZE; + + if (i == NR_OF_LOOPS - 1) { + CuAssertIntEquals(tc, APR_SUCCESS, + deflate_compress(&data, &len, &zdestr, + (const char *)uncompressed, + BUFSIZE, 1, iter_pool)); + } else { + CuAssertIntEquals(tc, APR_SUCCESS, + deflate_compress(&data, &len, &zdestr, + (const char *)uncompressed, + BUFSIZE, 0, iter_pool)); + } + + if (len == 0) + continue; + + strbkt = serf_bucket_simple_copy_create(data, len, iter_alloc); + serf_bucket_aggregate_append(aggbkt, strbkt); + + /* Start reading inflated data */ + status = discard_data(defbkt, &read_len); + CuAssert(tc, "Got error during discarding of compressed data.", + !SERF_BUCKET_READ_ERROR(status)); + + actual_size += read_len; + } + + put_32bit(&gzip_trailer[0], unc_crc); + put_32bit(&gzip_trailer[4], unc_length); + strbkt = SERF_BUCKET_SIMPLE_STRING_LEN((const char *)gzip_trailer, + sizeof(gzip_trailer), alloc); + serf_bucket_aggregate_append(aggbkt, strbkt); + + tb->user_baton_l = APR_EOF; + + while (1) { + apr_size_t read_len; + apr_status_t status = discard_data(defbkt, &read_len); + CuAssert(tc, "Got error during discarding of compressed data.", + !SERF_BUCKET_READ_ERROR(status)); + actual_size += read_len; + if (status == APR_EOF) + break; + } + + CuAssertIntEquals(tc, NR_OF_LOOPS * BUFSIZE, actual_size); +#undef NR_OF_LOOPS +#undef BUFSIZE +} + CuSuite *test_buckets(void) { CuSuite *suite = CuSuiteNew(); @@ -1243,6 +1586,13 @@ SUITE_ADD_TEST(suite, test_random_eagain_in_response); SUITE_ADD_TEST(suite, test_dechunk_buckets); SUITE_ADD_TEST(suite, test_response_no_body_expected); + SUITE_ADD_TEST(suite, test_deflate_buckets); +#if 0 + /* This test for issue #152 takes a lot of time generating 4GB+ of random + data so it's disabled by default. */ + SUITE_ADD_TEST(suite, test_deflate_4GBplus_buckets); +#endif + #if 0 SUITE_ADD_TEST(suite, test_serf_default_read_iovec); #endif diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/serf-1.3.7/test/test_serf.h new/serf-1.3.8/test/test_serf.h --- old/serf-1.3.7/test/test_serf.h 2013-06-23 10:43:58.000000000 +0200 +++ new/serf-1.3.8/test/test_serf.h 2014-10-19 20:24:37.000000000 +0200 @@ -108,8 +108,9 @@ const char *serv_url; serf_connection_setup_t conn_setup; - /* An extra baton which can be freely used by tests. */ + /* Extra batons which can be freely used by tests. */ void *user_baton; + long user_baton_l; /* Flags that can be used to report situations, e.g. that a callback was called. */ -- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
