Hello community,

here is the log from the commit of package libs3 for openSUSE:Factory checked 
in at 2016-11-14 20:14:26
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/libs3 (Old)
 and      /work/SRC/openSUSE:Factory/.libs3.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "libs3"

Changes:
--------
--- /work/SRC/openSUSE:Factory/libs3/libs3.changes      2016-05-04 
08:20:08.000000000 +0200
+++ /work/SRC/openSUSE:Factory/.libs3.new/libs3.changes 2016-11-14 
20:14:27.000000000 +0100
@@ -1,0 +2,15 @@
+Fri Nov  4 13:40:47 UTC 2016 - [email protected]
+
+- Updated s3-aws4.diff to new submission
+- Added s3-revert-pr51.diff, needed for s3-aws4.diff to work
+- Correct version to be 3.0~gitN, since the 2.0 release is already
+  way past.
+
+-------------------------------------------------------------------
+Wed Aug 17 10:24:00 UTC 2016 - [email protected]
+
+- Update to new snapshot 2.0~git195
+* Add multipart copy API and support inside s3 executable
+- Add s3-aws4.diff to support AWS4-HMAC-SHA256
+
+-------------------------------------------------------------------

Old:
----
  debian.libs3-2.install
  libs3-2.0~git193.tar.xz

New:
----
  debian.libs3-3.install
  debian.series
  libs3-3.0~git204.tar.xz
  s3-aws4.diff

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ libs3.spec ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -1,7 +1,7 @@
 #
 # spec file for package libs3
 #
-# Copyright (c) 2012 SUSE LINUX Products GmbH, Nuernberg, Germany.
+# Copyright (c) 2016 SUSE LINUX GmbH, Nuernberg, Germany.
 #
 # All modifications and additions to the file contributed by third parties
 # remain the property of their copyright owners, unless otherwise agreed
@@ -16,25 +16,26 @@
 #
 
 
-%define version_unconverted 2.0~git193
+%define version_unconverted 3.0~git204
 
 Name:           libs3
-%define lname  libs3-2
-Version:        2.0~git193
+%define lname  libs3-3
+Version:        3.0~git204
 Release:        0
 Summary:        C Library and tools for Amazon S3 access
 License:        LGPL-3.0+
 Group:          Development/Libraries/C and C++
-URL:            https://aws.amazon.com/developertools/Amazon-S3/1648
+Url:            https://aws.amazon.com/developertools/Amazon-S3/1648
 
-#Git-Clone:    git://github.com/bji/libs3
 Source:         %name-%version.tar.xz
-Patch1:         s3-am.diff
-BuildRoot:      %_tmppath/%name-%version-build
+Patch1:         s3-aws4.diff
+Patch2:         s3-am.diff
+BuildRoot:      %{_tmppath}/%{name}-%{version}-build
 BuildRequires:  automake
 BuildRequires: libtool >= 2
 BuildRequires:  pkg-config
 BuildRequires:  xz
+BuildRequires:  pkgconfig(libcrypto)
 BuildRequires: pkgconfig(libcurl)
 BuildRequires:  pkgconfig(libxml-2.0)
 
@@ -71,7 +72,7 @@
 
 %prep
 %setup -q
-%patch -P 1 -p1
+%patch -P 1 -P 2 -p1
 
 %build
 mkdir -p m4
@@ -88,7 +89,7 @@
 
 %files -n %lname
 %defattr(-,root,root)
-%_libdir/libs3.so.2*
+%_libdir/libs3.so.3*
 %doc COPYING LICENSE
 
 %files devel

++++++ _service ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -3,7 +3,7 @@
                <param name="scm">git</param>
                <param name="url">git://github.com/bji/libs3</param>
                <param 
name="parent-tag">7f08779e5ee4b0bb99f8baae018fa35da42352e0</param>
-               <param name="versionformat">2.0~git@TAG_OFFSET@</param>
+               <param name="versionformat">3.0~git@TAG_OFFSET@</param>
        </service>
        <service name="recompress" mode="localonly">
                <param name="file">*.tar</param>

++++++ debian.changelog ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -1,5 +1,5 @@
-libs3 (2.0~git193) stable; urgency=low
+libs3 (3.0~git204) stable; urgency=low
 
-  * bla
+  * OBS needs a way to convert .changes to debian.changelog
 
- -- Zarafa Development <[email protected]>  Tue, 20 Jan 2015 10:25:03 
+0100
+ -- Development <[email protected]>  Fri, 04 Nov 2016 00:00:00 +0000

++++++ debian.control ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -2,11 +2,12 @@
 Section: net
 Priority: extra
 Maintainer: Laszlo Boszormenyi (GCS) <[email protected]>
-Build-Depends: debhelper (>= 8), libxml2-dev, libcurl4-gnutls-dev
+Build-Depends: debhelper (>= 8), libxml2-dev, libcurl4-gnutls-dev,
+ libssl-dev, autotools-dev, dh-autoreconf, pkg-config
 Standards-Version: 3.9.3
 Homepage: http://libs3.ischo.com/index.html
 
-Package: libs3-2
+Package: libs3-3
 Architecture: any
 Depends: ${shlibs:Depends}, ${misc:Depends}
 Description: C Library and Tools for Amazon S3 Access
@@ -17,7 +18,7 @@
 Package: libs3-dev
 Architecture: any
 Section: libdevel
-Depends: ${misc:Depends}, libs3-2 (= ${binary:Version})
+Depends: ${misc:Depends}, libs3-3 (= ${binary:Version})
 Description: C Development Library for Amazon S3 Access
  This package includes the libs3 header, needed to compile applications
  against libs3.

++++++ debian.libs3-2.install -> debian.libs3-3.install ++++++

++++++ debian.libs3-dev.install ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -1,3 +1,2 @@
 usr/include/
-usr/lib/libs3.a
 usr/lib/libs3.so

++++++ debian.rules ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -1,63 +1,8 @@
 #!/usr/bin/make -f
-# Uncomment this to turn on verbose mode.
-#export DH_VERBOSE=1
-
-DESTDIR = $(CURDIR)/debian/tmp/usr/
-
 # These are used for cross-compiling and for saving the configure script
 # from having to guess our platform (since we know it already)
 DEB_HOST_GNU_TYPE   ?= $(shell dpkg-architecture -qDEB_HOST_GNU_TYPE)
 DEB_BUILD_GNU_TYPE  ?= $(shell dpkg-architecture -qDEB_BUILD_GNU_TYPE)
 
-ifneq (,$(findstring debug,$(DEB_BUILD_OPTIONS)))
-       CFLAGS += -g
-endif
-ifeq (,$(findstring nostrip,$(DEB_BUILD_OPTIONS)))
-       INSTALL_PROGRAM += -s
-endif
-
-clean:
-       dh_testdir
-
-       rm -f install-stamp build-stamp
-       $(MAKE) clean
-       dh_clean
-
-build-arch: build-stamp
-build-indep: build-stamp
-
-build: build-arch build-indep
-
-build-stamp:
-       dh_testdir
-       $(MAKE)
-       touch $@
-
-install: install-stamp
-
-install-stamp: build
-       dh_testdir
-
-       $(MAKE) install DESTDIR=$(DESTDIR)
-       touch $@
-
-binary-indep: build install
-
-binary-arch: build install
-       dh_testdir
-       dh_testroot
-       dh_installdocs -a
-       dh_installchangelogs -a
-       dh_install -a
-       dh_makeshlibs -a
-       dh_strip -a
-       dh_compress -a
-       dh_fixperms -a
-       dh_installdeb -a
-       dh_shlibdeps -a
-       dh_gencontrol -a
-       dh_md5sums -a
-       dh_builddeb -a
-
-binary: binary-indep binary-arch
-.PHONY: clean build binary-indep binary-arch binary install
+%:
+       dh $@ --parallel --with autoreconf

++++++ debian.series ++++++
s3-aws4.diff -p1
s3-am.diff -p1
++++++ libs3-2.0~git193.tar.xz -> libs3-3.0~git204.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libs3-2.0~git193/inc/libs3.h 
new/libs3-3.0~git204/inc/libs3.h
--- old/libs3-2.0~git193/inc/libs3.h    2016-03-31 13:53:27.000000000 +0200
+++ new/libs3-3.0~git204/inc/libs3.h    2016-11-04 22:20:37.000000000 +0100
@@ -1963,6 +1963,57 @@
 
 
 /**
+ * Copies portion of an object from one location to another.  The object may
+ * be copied back to itself, which is useful for replacing metadata without
+ * changing the object.  Required when doing >5GB object copies.
+ *
+ * @param bucketContext gives the source bucket and associated parameters for
+ *        this request
+ * @param key is the source key
+ * @param destinationBucket gives the destination bucket into which to copy
+ *        the object.  If NULL, the source bucket will be used.
+ * @param destinationKey gives the destination key into which to copy the
+ *        object.  If NULL, the source key will be used.
+ * @param partNo is the sequence numebr of any multipart upload, 0 = 
non-multipart
+ * @param uploadId is the ID returned for a multipart initialize request, 
ignored
+ *        if partNo = 0
+ * @param startOffset is the starting point in original object to copy.
+ * @param count is the number of bytes starting at startOffset in original
+ *        object to copy.  0 indicates no-range (i.e. all)
+ * @param putProperties optionally provides properties to apply to the object
+ *        that is being put to.  If not supplied (i.e. NULL is passed in),
+ *        then the copied object will retain the metadata of the copied
+ *        object.
+ * @param lastModifiedReturn returns the last modified date of the copied
+ *        object
+ * @param eTagReturnSize specifies the number of bytes provided in the
+ *        eTagReturn buffer
+ * @param eTagReturn is a buffer into which the resulting eTag of the copied
+ *        object will be written
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ * @param requestContext if non-NULL, gives the S3RequestContext to add this
+ *        request to, and does not perform the request immediately.  If NULL,
+ *        performs the request immediately and synchronously.
+ * @param handler gives the callbacks to call as the request is processed and
+ *        completed
+ * @param callbackData will be passed in as the callbackData parameter to
+ *        all callbacks for this request
+ **/
+void S3_copy_object_range(const S3BucketContext *bucketContext,
+                          const char *key, const char *destinationBucket,
+                          const char *destinationKey,
+                          const int partNo, const char *uploadId,
+                          const unsigned long startOffset, const unsigned long 
count,
+                          const S3PutProperties *putProperties,
+                          int64_t *lastModifiedReturn, int eTagReturnSize,
+                          char *eTagReturn, S3RequestContext *requestContext,
+                          const S3ResponseHandler *handler, void 
*callbackData);
+
+
+/**
  * Gets an object from S3.  The contents of the object are returned in the
  * handler's getObjectDataCallback.
  *
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libs3-2.0~git193/src/object.c 
new/libs3-3.0~git204/src/object.c
--- old/libs3-2.0~git193/src/object.c   2016-03-31 13:53:27.000000000 +0200
+++ new/libs3-3.0~git204/src/object.c   2016-11-04 22:20:37.000000000 +0100
@@ -174,6 +174,27 @@
                     char *eTagReturn, S3RequestContext *requestContext,
                     const S3ResponseHandler *handler, void *callbackData)
 {
+    /* Use the range copier with 0 length */
+    S3_copy_object_range(bucketContext, key,
+                         destinationBucket, destinationKey,
+                         0, NULL, // No multipart
+                         0, 0, // No length => std. copy of < 5GB
+                         putProperties,
+                         lastModifiedReturn, eTagReturnSize,
+                         eTagReturn, requestContext,
+                         handler, callbackData);
+}
+
+
+void S3_copy_object_range(const S3BucketContext *bucketContext, const char 
*key,
+                          const char *destinationBucket, const char 
*destinationKey,
+                          const int partNo, const char *uploadId,
+                          const unsigned long startOffset, const unsigned long 
count,
+                          const S3PutProperties *putProperties,
+                          int64_t *lastModifiedReturn, int eTagReturnSize,
+                          char *eTagReturn, S3RequestContext *requestContext,
+                          const S3ResponseHandler *handler, void *callbackData)
+{
     // Create the callback data
     CopyObjectData *data = 
         (CopyObjectData *) malloc(sizeof(CopyObjectData));
@@ -197,6 +218,14 @@
     data->eTagReturnLen = 0;
     string_buffer_initialize(data->lastModified);
 
+    // If there's a sequence ID > 0 then add a subResource, OTW pass in NULL
+    char subResource[512];
+    char *subRsrc = NULL;
+    if (partNo > 0) {
+        snprintf(subResource, 512, "partNumber=%d&uploadId=%s", partNo, 
uploadId);
+        subRsrc = subResource;
+    }
+
     // Set up the RequestParams
     RequestParams params =
     {
@@ -211,12 +240,12 @@
           bucketContext->securityToken },             // securityToken
         destinationKey ? destinationKey : key,        // key
         0,                                            // queryParams
-        0,                                            // subResource
+        subRsrc,                                      // subResource
         bucketContext->bucketName,                    // copySourceBucketName
         key,                                          // copySourceKey
         0,                                            // getConditions
-        0,                                            // startByte
-        0,                                            // byteCount
+        startOffset,                                  // startByte
+        count,                                        // byteCount
         putProperties,                                // putProperties
         &copyObjectPropertiesCallback,                // propertiesCallback
         0,                                            // toS3Callback
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libs3-2.0~git193/src/request.c 
new/libs3-3.0~git204/src/request.c
--- old/libs3-2.0~git193/src/request.c  2016-03-31 13:53:27.000000000 +0200
+++ new/libs3-3.0~git204/src/request.c  2016-11-04 22:20:37.000000000 +0100
@@ -358,6 +358,12 @@
                            params->copySourceBucketName,
                            params->copySourceKey);
         }
+        // If byteCount != 0 then we're just copying a range, add header
+        if (params->byteCount > 0) {
+            headers_append(1, "x-amz-copy-source-range: bytes=%llu-%llu",
+                           (unsigned long long)params->startByte,
+                           (unsigned long long)params->startByte + 
params->byteCount);
+        }
         // And the x-amz-metadata-directive header
         if (properties) {
             headers_append(1, "%s", "x-amz-metadata-directive: REPLACE");
@@ -1372,7 +1378,11 @@
         return S3StatusConnectionFailed;
     case CURLE_PARTIAL_FILE:
         return S3StatusOK;
+#if LIBCURL_VERSION_NUM >= 0x071101 /* 7.17.1 */
     case CURLE_PEER_FAILED_VERIFICATION:
+#else
+    case CURLE_SSL_PEER_CERTIFICATE:
+#endif
     case CURLE_SSL_CACERT:
         return S3StatusServerFailedVerification;
     default:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libs3-2.0~git193/src/s3.c 
new/libs3-3.0~git204/src/s3.c
--- old/libs3-2.0~git193/src/s3.c       2016-03-31 13:53:27.000000000 +0200
+++ new/libs3-3.0~git204/src/s3.c       2016-11-04 22:20:37.000000000 +0100
@@ -2063,7 +2063,6 @@
     return S3StatusOK;
 }
 
-
 static int multipartPutXmlCallback(int bufferSize, char *buffer,
                                    void *callbackData)
 {
@@ -2132,7 +2131,8 @@
     return 0;
 }
 
-static void put_object(int argc, char **argv, int optindex)
+static void put_object(int argc, char **argv, int optindex,
+                       const char *srcBucketName, const char *srcKey, unsigned 
long long srcSize)
 {
     if (optindex == argc) {
         fprintf(stderr, "\nERROR: Missing parameter: bucket/key\n");
@@ -2283,7 +2283,12 @@
     data.gb = 0;
     data.noStatus = noStatus;
 
-    if (filename) {
+    if (srcSize) {
+        // This is really a COPY multipart, not a put, so take from source 
object
+        contentLength = srcSize;
+        data.infile = NULL;
+    }
+    else if (filename) {
         if (!contentLength) {
             struct stat statbuf;
             // Stat the file to get its length
@@ -2459,16 +2464,43 @@
             partData.put_object_data = data;
             partContentLength = ((contentLength > MULTIPART_CHUNK_SIZE) ?
                                  MULTIPART_CHUNK_SIZE : contentLength);
-            printf("Sending Part Seq %d, length=%d\n", seq, partContentLength);
+            printf("%s Part Seq %d, length=%d\n", srcSize ? "Copying" : 
"Sending", seq, partContentLength);
             partData.put_object_data.contentLength = partContentLength;
             partData.put_object_data.originalContentLength = partContentLength;
             partData.put_object_data.totalContentLength = todoContentLength;
             partData.put_object_data.totalOriginalContentLength = 
totalContentLength;
             putProperties.md5 = 0;
             do {
-                S3_upload_part(&bucketContext, key, &putProperties,
-                               &putObjectHandler, seq, manager.upload_id,
-                               partContentLength,0, &partData);
+                if (srcSize) {
+                    S3BucketContext srcBucketContext =
+                    {
+                        0,
+                        srcBucketName,
+                        protocolG,
+                        uriStyleG,
+                        accessKeyIdG,
+                        secretAccessKeyG,
+                        0
+                    };
+
+                    S3ResponseHandler copyResponseHandler = { 
&responsePropertiesCallback, &responseCompleteCallback };
+                    int64_t lastModified;
+
+                    unsigned long long startOffset = (unsigned long 
long)MULTIPART_CHUNK_SIZE * (unsigned long long)(seq-1);
+                    unsigned long long count = partContentLength - 1; // 
Inclusive for copies
+                    // The default copy callback tries to set this for us, 
need to allocate here
+                    manager.etags[seq-1] = malloc(512); // TBD - magic #!  Isa 
there a max etag defined?
+                    S3_copy_object_range(&srcBucketContext, srcKey, 
bucketName, key,
+                         seq, manager.upload_id,
+                         startOffset, count,
+                         &putProperties,
+                         &lastModified, 512 /*TBD - magic # */, 
manager.etags[seq-1], 0,
+                         &copyResponseHandler, 0);
+                } else {
+                    S3_upload_part(&bucketContext, key, &putProperties,
+                                   &putObjectHandler, seq, manager.upload_id,
+                                   partContentLength,0, &partData);
+                }
             } while (S3_status_is_retryable(statusG) && should_retry());
             if (statusG != S3StatusOK) {
                 printError();
@@ -2519,6 +2551,30 @@
 
 
 // copy object ---------------------------------------------------------------
+static S3Status copyListKeyCallback(int isTruncated, const char *nextMarker,
+                                    int contentsCount,
+                                    const S3ListBucketContent *contents,
+                                    int commonPrefixesCount,
+                                    const char **commonPrefixes,
+                                    void *callbackData)
+{
+    unsigned long long *size = (unsigned long long *)callbackData;
+
+    // These are unused, avoid warnings in a hopefully portable way
+    (void)(nextMarker);
+    (void)(commonPrefixesCount);
+    (void)(commonPrefixes);
+    (void)(isTruncated);
+
+    if (contentsCount != 1) {
+        // We either have no matched or multiples...can't perform the operation
+        return S3StatusErrorUnexpectedContent;
+    }
+
+    *size = (unsigned long long) contents->size;
+    return S3StatusOK;
+}
+
 
 static void copy_object(int argc, char **argv, int optindex)
 {
@@ -2541,6 +2597,7 @@
 
     const char *sourceBucketName = argv[optindex++];
     const char *sourceKey = slash;
+    unsigned long long sourceSize = 0;
 
     if (optindex == argc) {
         fprintf(stderr, "\nERROR: Missing parameter: "
@@ -2548,6 +2605,37 @@
         usageExit(stderr);
     }
 
+    S3_init();
+    S3BucketContext listBucketContext =
+    {
+        0,
+        sourceBucketName,
+        protocolG,
+        uriStyleG,
+        accessKeyIdG,
+        secretAccessKeyG,
+        0
+    };
+    S3ListBucketHandler listBucketHandler =
+    {
+        { &responsePropertiesCallback, &responseCompleteCallback },
+        &copyListKeyCallback
+    };
+    // Find size of existing key to determine if MP required
+    do {
+        S3_list_bucket(&listBucketContext, sourceKey, NULL,
+                       ".", 1, 0, &listBucketHandler, &sourceSize);
+    } while (S3_status_is_retryable(statusG) && should_retry());
+    if (statusG != S3StatusOK) {
+        fprintf(stderr, "\nERROR: Unable to get source object size\n");
+        exit(1);
+    }
+    if (sourceSize > MULTIPART_CHUNK_SIZE) {
+        printf("\nUsing multipart copy because object size %llu is above 
%d.\n", sourceSize, MULTIPART_CHUNK_SIZE);
+        put_object(argc, argv, optindex, sourceBucketName, sourceKey, 
sourceSize);
+        return;
+    }
+
     // Split bucket/key
     slash = argv[optindex];
     while (*slash && (*slash != '/')) {
@@ -2663,7 +2751,6 @@
         }
     }
 
-    S3_init();
     
     S3BucketContext bucketContext =
     {
@@ -3664,7 +3751,7 @@
         }
     }
     else if (!strcmp(command, "put")) {
-        put_object(argc, argv, optind);
+        put_object(argc, argv, optind, NULL, NULL, 0);
     }
     else if (!strcmp(command, "copy")) {
         copy_object(argc, argv, optind);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/libs3-2.0~git193/test/test.sh 
new/libs3-3.0~git204/test/test.sh
--- old/libs3-2.0~git193/test/test.sh   2016-03-31 13:53:27.000000000 +0200
+++ new/libs3-3.0~git204/test/test.sh   2016-11-04 22:20:37.000000000 +0100
@@ -28,106 +28,132 @@
     S3_COMMAND=s3
 fi
 
+failures=0
+
 TEST_BUCKET=${TEST_BUCKET_PREFIX}.testbucket
 
 # Create the test bucket
 echo "$S3_COMMAND create $TEST_BUCKET"
 $S3_COMMAND create $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # List to find it
 echo "$S3_COMMAND list | grep $TEST_BUCKET"
 $S3_COMMAND list | grep $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Test it
 echo "$S3_COMMAND test $TEST_BUCKET"
 $S3_COMMAND test $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # List to ensure that it is empty
 echo "$S3_COMMAND list $TEST_BUCKET"
 $S3_COMMAND list $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Put some data
 rm -f seqdata
 seq 1 10000 > seqdata
 echo "$S3_COMMAND put $TEST_BUCKET/testkey filename=seqdata noStatus=1"
 $S3_COMMAND put $TEST_BUCKET/testkey filename=seqdata noStatus=1
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 rm -f testkey
 # Get the data and make sure that it matches
 echo "$S3_COMMAND get $TEST_BUCKET/testkey filename=testkey"
 $S3_COMMAND get $TEST_BUCKET/testkey filename=testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 diff seqdata testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 rm -f seqdata testkey
 
 # Delete the file
 echo "$S3_COMMAND delete $TEST_BUCKET/testkey"
 $S3_COMMAND delete $TEST_BUCKET/testkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Remove the test bucket
 echo "$S3_COMMAND delete $TEST_BUCKET"
 $S3_COMMAND delete $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Make sure it's not there
 echo "$S3_COMMAND list | grep $TEST_BUCKET"
 $S3_COMMAND list | grep $TEST_BUCKET
+failures=$(($failures + (($? == 1) ? 0 : 1)))
 
 # Now create it again
 echo "$S3_COMMAND create $TEST_BUCKET"
 $S3_COMMAND create $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Put 10 files in it
 for i in `seq 0 9`; do
     echo "echo \"Hello\" | $S3_COMMAND put $TEST_BUCKET/key_$i"
     echo "Hello" | $S3_COMMAND put $TEST_BUCKET/key_$i
+    failures=$(($failures + (($? == 0) ? 0 : 1)))
 done
 
 # List with all details
 echo "$S3_COMMAND list $TEST_BUCKET"
 $S3_COMMAND list $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 COPY_BUCKET=${TEST_BUCKET_PREFIX}.copybucket
 
 # Create another test bucket and copy a file into it
 echo "$S3_COMMAND create $COPY_BUCKET"
 $S3_COMMAND create $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 echo <<EOF
 $S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
 EOF
 $S3_COMMAND copy $TEST_BUCKET/key_5 $COPY_BUCKET/copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # List the copy bucket
 echo "$S3_COMMAND list $COPY_BUCKET"
 $S3_COMMAND list $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Compare the files
 rm -f key_5 copykey
 echo "$S3_COMMAND get $TEST_BUCKET/key_5 filename=key_5"
 $S3_COMMAND get $TEST_BUCKET/key_5 filename=key_5
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 echo "$S3_COMMAND get $COPY_BUCKET/copykey filename=copykey"
 $S3_COMMAND get $COPY_BUCKET/copykey filename=copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 diff key_5 copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 rm -f key_5 copykey
 
 # Delete the files
 for i in `seq 0 9`; do
     echo "$S3_COMMAND delete $TEST_BUCKET/key_$i"
     $S3_COMMAND delete $TEST_BUCKET/key_$i
+    failures=$(($failures + (($? == 0) ? 0 : 1)))
 done
 echo "$S3_COMMAND delete $COPY_BUCKET/copykey"
 $S3_COMMAND delete $COPY_BUCKET/copykey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Delete the copy bucket
 echo "$S3_COMMAND delete $COPY_BUCKET"
 $S3_COMMAND delete $COPY_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Now create a new zero-length file
 echo "$S3_COMMAND put $TEST_BUCKET/aclkey < /dev/null"
 $S3_COMMAND put $TEST_BUCKET/aclkey < /dev/null
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Get the bucket acl
 rm -f acl
 echo "$S3_COMMAND getacl $TEST_BUCKET filename=acl"
 $S3_COMMAND getacl $TEST_BUCKET filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Add READ for all AWS users, and READ_ACP for everyone
 echo <<EOF >> acl
@@ -138,18 +164,22 @@
 EOF
 echo "$S3_COMMAND setacl $TEST_BUCKET filename=acl"
 $S3_COMMAND setacl $TEST_BUCKET filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Test to make sure that it worked
 rm -f acl_new
 echo "$S3_COMMAND getacl $TEST_BUCKET filename=acl_new"
 $S3_COMMAND getacl $TEST_BUCKET filename=acl_new
-diff acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff -B acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 rm -f acl acl_new
 
 # Get the key acl
 rm -f acl
 echo "$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl"
 $S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Add READ for all AWS users, and READ_ACP for everyone
 echo <<EOF >> acl
@@ -160,25 +190,44 @@
 EOF
 echo "$S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl"
 $S3_COMMAND setacl $TEST_BUCKET/aclkey filename=acl
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 
 # Test to make sure that it worked
 rm -f acl_new
 echo "$S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl_new"
 $S3_COMMAND getacl $TEST_BUCKET/aclkey filename=acl_new
-diff acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+diff -B acl acl_new
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 rm -f acl acl_new
 
 # Check multipart file upload (>15MB)
 dd if=/dev/zero of=mpfile bs=1024k count=30
 echo "$S3_COMMAND put $TEST_BUCKET/mpfile filename=mpfile"
 $S3_COMMAND put $TEST_BUCKET/mpfile filename=mpfile
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 echo "$S3_COMMAND get $TEST_BUCKET/mpfile filename=mpfile.get"
 $S3_COMMAND get $TEST_BUCKET/mpfile filename=mpfile.get
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 diff mpfile mpfile.get
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 rm -f mpfile mpfile.get
 
-# Remove the test file
+# Remove the test files
+echo "$S3_COMMAND delete $TEST_BUCKET/mpfile"
+$S3_COMMAND delete $TEST_BUCKET/mpfile
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 echo "$S3_COMMAND delete $TEST_BUCKET/aclkey"
 $S3_COMMAND delete $TEST_BUCKET/aclkey
+failures=$(($failures + (($? == 0) ? 0 : 1)))
 echo "$S3_COMMAND delete $TEST_BUCKET"
 $S3_COMMAND delete $TEST_BUCKET
+failures=$(($failures + (($? == 0) ? 0 : 1)))
+
+if [ ${failures} = 0 ]; then
+    echo "all tests completed successfully"
+else
+    echo "tests completed with ${failures} failures"
+fi
+
+exit ${failures}

++++++ libs3.dsc ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -1,11 +1,11 @@
 Format: 1.0
 Source: libs3
-Binary: libs3-2, libs3-dev
+Binary: libs3-3, libs3-dev
 Architecture: any
 DEBTRANSFORM-RELEASE: 1
-Version: 2.0~git193
-Maintainer: Laszlo Boszormenyi (GCS) <[email protected]>
+Version: 3.0~git204
 Homepage: http://libs3.ischo.com/index.html
 Standards-Version: 3.9.3
-Build-Depends: debhelper (>= 8), libxml2-dev, libcurl4-gnutls-dev
+Build-Depends: debhelper (>= 8), libxml2-dev, libcurl4-gnutls-dev,
+ libssl-dev, autotools-dev, dh-autoreconf, pkg-config
 Files:

++++++ s3-am.diff ++++++
--- /var/tmp/diff_new_pack.S4wh20/_old  2016-11-14 20:14:29.000000000 +0100
+++ /var/tmp/diff_new_pack.S4wh20/_new  2016-11-14 20:14:29.000000000 +0100
@@ -21,7 +21,7 @@
 +++ libs3-2.0~git193/Makefile.am
 @@ -0,0 +1,14 @@
 +ACLOCAL_AMFLAGS = -I m4
-+AM_CPPFLAGS = ${curl_CFLAGS} ${xml_CFLAGS} -Iinc -DLIBS3_VER_MAJOR=\"2\" 
-DLIBS3_VER_MINOR=\"0\" -DLIBS3_VER=\"2.0\" -D_POSIX_C_SOURCE=200112L
++AM_CPPFLAGS = ${curl_CFLAGS} ${xml_CFLAGS} -Iinc -DLIBS3_VER_MAJOR=\"3\" 
-DLIBS3_VER_MINOR=\"0\" -DLIBS3_VER=\"3.0\" -D_POSIX_C_SOURCE=200112L
 +AM_CFLAGS = -Wall -Wshadow -Wextra
 +bin_PROGRAMS = s3
 +noinst_PROGRAMS = testsimplexml
@@ -29,8 +29,8 @@
 +s3_SOURCES = src/s3.c
 +s3_LDADD = libs3.la
 +libs3_la_SOURCES = src/acl.c src/bucket.c src/error_parser.c src/general.c 
src/object.c src/request.c src/request_context.c src/response_headers_handler.c 
src/service_access_logging.c src/service.c src/simplexml.c src/util.c 
src/multipart.c
-+libs3_la_LDFLAGS = -version-number 2:0:0 -export-symbols-regex 
'^(S3|simplexml)_'
-+libs3_la_LIBADD = -lpthread ${curl_LIBS} ${xml_LIBS}
++libs3_la_LDFLAGS = -version-number 3:0:0 -export-symbols-regex 
'^(S3|simplexml)_'
++libs3_la_LIBADD = -lpthread ${curl_LIBS} ${crypto_LIBS} ${xml_LIBS}
 +include_HEADERS = inc/libs3.h
 +testsimplexml_SOURCES = src/testsimplexml.c
 +testsimplexml_LDADD = libs3.la
@@ -38,14 +38,15 @@
 ===================================================================
 --- /dev/null
 +++ libs3-2.0~git193/configure.ac
-@@ -0,0 +1,12 @@
-+AC_INIT([libs3], [2.0])
+@@ -0,0 +1,13 @@
++AC_INIT([libs3], [3.0])
 +AC_CONFIG_AUX_DIR([build-aux])
 +AC_CONFIG_MACRO_DIR([m4])
 +AM_INIT_AUTOMAKE([foreign subdir-objects tar-pax])
 +AC_PROG_CC
 +AC_DISABLE_STATIC
 +LT_INIT
++PKG_CHECK_MODULES([crypto], [libcrypto])
 +PKG_CHECK_MODULES([curl], [libcurl])
 +PKG_CHECK_MODULES([xml], [libxml-2.0])
 +rm -f GNUmakefile

++++++ s3-aws4.diff ++++++
++++ 3323 lines (skipped)


Reply via email to