Hello community,

here is the log from the commit of package libcontainers-common for 
openSUSE:Factory checked in at 2019-04-04 12:02:23
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/libcontainers-common (Old)
 and      /work/SRC/openSUSE:Factory/.libcontainers-common.new.3908 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "libcontainers-common"

Thu Apr  4 12:02:23 2019 rev:13 rq:690415 version:20190401

Changes:
--------
--- 
/work/SRC/openSUSE:Factory/libcontainers-common/libcontainers-common.changes    
    2019-03-01 20:26:34.622052549 +0100
+++ 
/work/SRC/openSUSE:Factory/.libcontainers-common.new.3908/libcontainers-common.changes
      2019-04-04 12:02:28.189365108 +0200
@@ -1,0 +2,13 @@
+Mon Apr  1 14:24:17 UTC 2019 - Richard Brown <rbr...@suse.com>
+
+- Update to libpod v1.2.0
+  * Rootless Podman can now be used with a single UID and GID, without 
requiring a full 65536 UIDs/GIDs to be allocated in /etc/subuid and /etc/subgid
+  * Move pkg/util default storage functions from libpod to containers/storage
+- Update to image v1.5
+  * Minor behind the scene bugfixes, no user facing changes
+- Update to storage v1.12.1
+  * Move pkg/util default storage functions from libpod to containers/storage
+  * containers/storage no longer depends on containers/image
+- Version 20190401
+
+-------------------------------------------------------------------

Old:
----
  image-1.3.tar.xz
  libpod-1.1.0.tar.xz
  storage-1.10.tar.xz

New:
----
  image-1.5.tar.xz
  libpod-1.2.0.tar.xz
  storage-1.12.1.tar.xz

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ libcontainers-common.spec ++++++
--- /var/tmp/diff_new_pack.kuixG5/_old  2019-04-04 12:02:32.885368870 +0200
+++ /var/tmp/diff_new_pack.kuixG5/_new  2019-04-04 12:02:32.889368873 +0200
@@ -16,16 +16,16 @@
 #
 
 # libpodver - version from containers/libpod
-%define libpodver 1.1.0
+%define libpodver 1.2.0
 
 # storagever - version from containers/storage
-%define storagever 1.10
+%define storagever 1.12.1
 
 # imagever - version from containers/image
-%define imagever 1.3
+%define imagever 1.5
 
 Name:           libcontainers-common
-Version:        20190219
+Version:        20190401
 Release:        0
 Summary:        Configuration files common to github.com/containers
 License:        Apache-2.0

++++++ _service ++++++
--- /var/tmp/diff_new_pack.kuixG5/_old  2019-04-04 12:02:32.929368905 +0200
+++ /var/tmp/diff_new_pack.kuixG5/_new  2019-04-04 12:02:32.933368908 +0200
@@ -4,24 +4,24 @@
 <param name="url">https://github.com/containers/storage.git</param>
 <param name="scm">git</param>
 <param name="filename">storage</param>
-<param name="versionformat">1.10</param>
-<param name="revision">v1.10</param>
+<param name="versionformat">1.12.1</param>
+<param name="revision">v1.12.1</param>
 </service>
 
 <service name="tar_scm" mode="disabled">
 <param name="url">https://github.com/containers/image.git</param>
 <param name="scm">git</param>
 <param name="filename">image</param>
-<param name="versionformat">1.3</param>
-<param name="revision">v1.3</param>
+<param name="versionformat">1.5</param>
+<param name="revision">v1.5</param>
 </service>
 
 <service name="tar_scm" mode="disabled">
 <param name="url">https://github.com/containers/libpod.git</param>
 <param name="scm">git</param>
 <param name="filename">libpod</param>
-<param name="versionformat">1.1.0</param>
-<param name="revision">v1.1.0</param>
+<param name="versionformat">1.2.0</param>
+<param name="revision">v1.2.0</param>
 </service>
 
 <service name="recompress" mode="disabled">

++++++ image-1.3.tar.xz -> image-1.5.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/.travis.yml new/image-1.5/.travis.yml
--- old/image-1.3/.travis.yml   2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/.travis.yml   2019-02-26 15:39:32.000000000 +0100
@@ -21,7 +21,7 @@
   -e TRAVIS=$TRAVIS -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE
   -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST -e 
TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG
   -e TRAVIS_BRANCH=$TRAVIS_BRANCH -e TRAVIS_COMMIT=$TRAVIS_COMMIT
-  -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache -e HOME=/gopath
+  -e GOPATH=/gopath -e TRASH_CACHE=/gopath/.trashcache
   -v /etc/passwd:/etc/passwd -v /etc/sudoers:/etc/sudoers -v 
/etc/sudoers.d:/etc/sudoers.d
   -v /var/run:/var/run:z -v $HOME/gopath:/gopath:Z
   -w /gopath/src/github.com/containers/image image-test bash -c 
"PATH=$PATH:/gopath/bin make cross tools .gitvalidation validate test 
test-skopeo SUDO=sudo BUILDTAGS=\"$BUILDTAGS\""
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/MAINTAINERS new/image-1.5/MAINTAINERS
--- old/image-1.3/MAINTAINERS   2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/MAINTAINERS   2019-02-26 15:39:32.000000000 +0100
@@ -3,3 +3,4 @@
 Miloslav Trmac <m...@redhat.com> (@mtrmac)
 Dan Walsh <dwa...@redhat.com> (@dwalsh)
 Nalin Dahyabhai <na...@redhat.com> (@nalind)
+Valentin Rothberg <rothb...@redhat.com> (@vrothberg)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/copy/copy.go new/image-1.5/copy/copy.go
--- old/image-1.3/copy/copy.go  2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/copy/copy.go  2019-02-26 15:39:32.000000000 +0100
@@ -6,24 +6,29 @@
        "fmt"
        "io"
        "io/ioutil"
+       "os"
        "reflect"
        "runtime"
        "strings"
        "sync"
        "time"
 
+       "github.com/containers/image/docker/reference"
        "github.com/containers/image/image"
+       "github.com/containers/image/manifest"
        "github.com/containers/image/pkg/blobinfocache"
        "github.com/containers/image/pkg/compression"
        "github.com/containers/image/signature"
        "github.com/containers/image/transports"
        "github.com/containers/image/types"
        "github.com/klauspost/pgzip"
-       "github.com/opencontainers/go-digest"
+       digest "github.com/opencontainers/go-digest"
        "github.com/pkg/errors"
        "github.com/sirupsen/logrus"
+       "github.com/vbauerster/mpb"
+       "github.com/vbauerster/mpb/decor"
+       "golang.org/x/crypto/ssh/terminal"
        "golang.org/x/sync/semaphore"
-       pb "gopkg.in/cheggaaa/pb.v1"
 )
 
 type digestingReader struct {
@@ -84,6 +89,7 @@
        dest             types.ImageDestination
        rawSource        types.ImageSource
        reportWriter     io.Writer
+       progressOutput   io.Writer
        progressInterval time.Duration
        progress         chan types.ProgressProperties
        blobInfoCache    types.BlobInfoCache
@@ -152,11 +158,19 @@
                }
        }()
 
+       // If reportWriter is not a TTY (e.g., when piping to a file), do not
+       // print the progress bars to avoid long and hard to parse output.
+       // createProgressBar() will print a single line instead.
+       progressOutput := reportWriter
+       if !isTTY(reportWriter) {
+               progressOutput = ioutil.Discard
+       }
        copyInParallel := dest.HasThreadSafePutBlob() && 
rawSource.HasThreadSafeGetBlob()
        c := &copier{
                dest:             dest,
                rawSource:        rawSource,
                reportWriter:     reportWriter,
+               progressOutput:   progressOutput,
                progressInterval: options.ProgressInterval,
                progress:         options.Progress,
                copyInParallel:   copyInParallel,
@@ -201,7 +215,7 @@
 
 // Image copies a single (on-manifest-list) image unparsedImage, using 
policyContext to validate
 // source image admissibility.
-func (c *copier) copyOneImage(ctx context.Context, policyContext 
*signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) 
(manifest []byte, retErr error) {
+func (c *copier) copyOneImage(ctx context.Context, policyContext 
*signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) 
(manifestBytes []byte, retErr error) {
        // The caller is handling manifest lists; this could happen only if a 
manifest list contains a manifest list.
        // Make sure we fail cleanly in such cases.
        multiImage, err := isMultiImage(ctx, unparsedImage)
@@ -224,6 +238,26 @@
                return nil, errors.Wrapf(err, "Error initializing image from 
source %s", transports.ImageName(c.rawSource.Reference()))
        }
 
+       // If the destination is a digested reference, make a note of that, 
determine what digest value we're
+       // expecting, and check that the source manifest matches it.
+       destIsDigestedReference := false
+       if named := c.dest.Reference().DockerReference(); named != nil {
+               if digested, ok := named.(reference.Digested); ok {
+                       destIsDigestedReference = true
+                       sourceManifest, _, err := src.Manifest(ctx)
+                       if err != nil {
+                               return nil, errors.Wrapf(err, "Error reading 
manifest from source image")
+                       }
+                       matches, err := manifest.MatchesDigest(sourceManifest, 
digested.Digest())
+                       if err != nil {
+                               return nil, errors.Wrapf(err, "Error computing 
digest of source image's manifest")
+                       }
+                       if !matches {
+                               return nil, errors.New("Digest of source 
image's manifest would not match destination reference")
+                       }
+               }
+       }
+
        if err := checkImageDestinationForCurrentRuntimeOS(ctx, 
options.DestinationCtx, src, c.dest); err != nil {
                return nil, err
        }
@@ -251,15 +285,15 @@
                manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: 
types.ManifestUpdateInformation{Destination: c.dest}},
                src:             src,
                // diffIDsAreNeeded is computed later
-               canModifyManifest: len(sigs) == 0,
-               // Ensure _this_ copy sees exactly the intended data when 
either processing a signed image or signing it.
-               // This may be too conservative, but for now, better safe than 
sorry, _especially_ on the SignBy path:
-               // The signature makes the content non-repudiable, so it very 
much matters that the signature is made over exactly what the user intended.
-               // We do intend the RecordDigestUncompressedPair calls to only 
work with reliable data, but at least there’s a risk
-               // that the compressed version coming from a third party may be 
designed to attack some other decompressor implementation,
-               // and we would reuse and sign it.
-               canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
+               canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
        }
+       // Ensure _this_ copy sees exactly the intended data when either 
processing a signed image or signing it.
+       // This may be too conservative, but for now, better safe than sorry, 
_especially_ on the SignBy path:
+       // The signature makes the content non-repudiable, so it very much 
matters that the signature is made over exactly what the user intended.
+       // We do intend the RecordDigestUncompressedPair calls to only work 
with reliable data, but at least there’s a risk
+       // that the compressed version coming from a third party may be 
designed to attack some other decompressor implementation,
+       // and we would reuse and sign it.
+       ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
 
        if err := ic.updateEmbeddedDockerReference(); err != nil {
                return nil, err
@@ -283,7 +317,7 @@
        // and at least with the OpenShift registry "acceptschema2" option, 
there is no way to detect the support
        // without actually trying to upload something and getting a 
types.ManifestTypeRejectedError.
        // So, try the preferred manifest MIME type. If the process succeeds, 
fine…
-       manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
+       manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
        if err != nil {
                logrus.Debugf("Writing manifest using preferred type %s failed: 
%v", preferredManifestMIMEType, err)
                // … if it fails, _and_ the failure is because the manifest is 
rejected, we may have other options.
@@ -314,7 +348,7 @@
                        }
 
                        // We have successfully uploaded a manifest.
-                       manifest = attemptedManifest
+                       manifestBytes = attemptedManifest
                        errs = nil // Mark this as a success so that we don't 
abort below.
                        break
                }
@@ -324,7 +358,7 @@
        }
 
        if options.SignBy != "" {
-               newSig, err := c.createSignature(manifest, options.SignBy)
+               newSig, err := c.createSignature(manifestBytes, options.SignBy)
                if err != nil {
                        return nil, err
                }
@@ -336,7 +370,7 @@
                return nil, errors.Wrap(err, "Error writing signatures")
        }
 
-       return manifest, nil
+       return manifestBytes, nil
 }
 
 // Printf writes a formatted string to c.reportWriter.
@@ -389,20 +423,12 @@
        return nil
 }
 
-// shortDigest returns the first 12 characters of the digest.
-func shortDigest(d digest.Digest) string {
-       return d.Encoded()[:12]
-}
-
-// createProgressBar creates a pb.ProgressBar.
-func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) 
*pb.ProgressBar {
-       bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
-       bar.SetMaxWidth(80)
-       bar.ShowTimeLeft = false
-       bar.ShowPercent = false
-       bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, 
shortDigest(srcInfo.Digest)))
-       bar.Output = writer
-       return bar
+// isTTY returns true if the io.Writer is a file and a tty.
+func isTTY(w io.Writer) bool {
+       if f, ok := w.(*os.File); ok {
+               return terminal.IsTerminal(int(f.Fd()))
+       }
+       return false
 }
 
 // copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and 
updating ic.manifestUpdates if necessary and ic.canModifyManifest.
@@ -431,6 +457,7 @@
        // copyGroup is used to determine if all layers are copied
        copyGroup := sync.WaitGroup{}
        copyGroup.Add(numLayers)
+
        // copySemaphore is used to limit the number of parallel downloads to
        // avoid malicious images causing troubles and to be nice to servers.
        var copySemaphore *semaphore.Weighted
@@ -441,8 +468,7 @@
        }
 
        data := make([]copyLayerData, numLayers)
-       copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar 
*pb.ProgressBar) {
-               defer bar.Finish()
+       copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar 
*mpb.Bar) {
                defer copySemaphore.Release(1)
                defer copyGroup.Done()
                cld := copyLayerData{}
@@ -452,44 +478,37 @@
                        // does not support them.
                        if ic.diffIDsAreNeeded {
                                cld.err = errors.New("getting DiffID for 
foreign layers is unimplemented")
-                               bar.Prefix(fmt.Sprintf("Skipping blob %s 
(DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
-                               bar.Finish()
                        } else {
                                cld.destInfo = srcLayer
-                               logrus.Debugf("Skipping foreign layer %q copy 
to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
-                               bar.Prefix(fmt.Sprintf("Skipping blob %s 
(foreign layer):", shortDigest(srcLayer.Digest)))
-                               bar.Add64(bar.Total)
-                               bar.Finish()
+                               logrus.Debugf("Skipping foreign layer %q copy 
to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
                        }
                } else {
                        cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, 
srcLayer, bar)
                }
                data[index] = cld
+               bar.SetTotal(srcLayer.Size, true)
        }
 
-       progressBars := make([]*pb.ProgressBar, numLayers)
-       for i, srcInfo := range srcInfos {
-               bar := createProgressBar(srcInfo, "blob", nil)
-               progressBars[i] = bar
-       }
+       func() { // A scope for defer
+               progressPool, progressCleanup := ic.c.newProgressPool(ctx)
+               defer progressCleanup()
 
-       progressPool := pb.NewPool(progressBars...)
-       progressPool.Output = ic.c.reportWriter
-       if err := progressPool.Start(); err != nil {
-               return errors.Wrapf(err, "error creating progress-bar pool")
-       }
+               progressBars := make([]*mpb.Bar, numLayers)
+               for i, srcInfo := range srcInfos {
+                       progressBars[i] = ic.c.createProgressBar(progressPool, 
srcInfo, "blob")
+               }
 
-       for i, srcLayer := range srcInfos {
-               copySemaphore.Acquire(ctx, 1)
-               go copyLayerHelper(i, srcLayer, progressBars[i])
-       }
+               for i, srcLayer := range srcInfos {
+                       copySemaphore.Acquire(ctx, 1)
+                       go copyLayerHelper(i, srcLayer, progressBars[i])
+               }
+
+               // Wait for all layers to be copied
+               copyGroup.Wait()
+       }()
 
        destInfos := make([]types.BlobInfo, numLayers)
        diffIDs := make([]digest.Digest, numLayers)
-
-       copyGroup.Wait()
-       progressPool.Stop()
-
        for i, cld := range data {
                if cld.err != nil {
                        return cld.err
@@ -560,6 +579,44 @@
        return manifest, nil
 }
 
+// newProgressPool creates a *mpb.Progress and a cleanup function.
+// The caller must eventually call the returned cleanup function after the 
pool will no longer be updated.
+func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
+       ctx, cancel := context.WithCancel(ctx)
+       pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), 
mpb.WithContext(ctx))
+       return pool, func() {
+               cancel()
+               pool.Wait()
+       }
+}
+
+// createProgressBar creates a mpb.Bar in pool.  Note that if the copier's 
reportWriter
+// is ioutil.Discard, the progress bar's output will be discarded
+func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, 
kind string) *mpb.Bar {
+       // shortDigestLen is the length of the digest used for blobs.
+       const shortDigestLen = 12
+
+       prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
+       // Truncate the prefix (chopping of some part of the digest) to make 
all progress bars aligned in a column.
+       maxPrefixLen := len("Copying blob ") + shortDigestLen
+       if len(prefix) > maxPrefixLen {
+               prefix = prefix[:maxPrefixLen]
+       }
+
+       bar := pool.AddBar(info.Size,
+               mpb.PrependDecorators(
+                       decor.Name(prefix),
+               ),
+               mpb.AppendDecorators(
+                       decor.CountersKibiByte("%.1f / %.1f"),
+               ),
+       )
+       if c.progressOutput == ioutil.Discard {
+               c.Printf("Copying %s %s\n", kind, info.Digest)
+       }
+       return bar
+}
+
 // copyConfig copies config.json, if any, from src to dest.
 func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
        srcInfo := src.ConfigInfo()
@@ -568,12 +625,20 @@
                if err != nil {
                        return errors.Wrapf(err, "Error reading config blob 
%s", srcInfo.Digest)
                }
-               bar := createProgressBar(srcInfo, "config", c.reportWriter)
-               defer bar.Finish()
-               bar.Start()
-               destInfo, err := c.copyBlobFromStream(ctx, 
bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
+
+               destInfo, err := func() (types.BlobInfo, error) { // A scope 
for defer
+                       progressPool, progressCleanup := c.newProgressPool(ctx)
+                       defer progressCleanup()
+                       bar := c.createProgressBar(progressPool, srcInfo, 
"config")
+                       destInfo, err := c.copyBlobFromStream(ctx, 
bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
+                       if err != nil {
+                               return types.BlobInfo{}, err
+                       }
+                       bar.SetTotal(int64(len(configBlob)), true)
+                       return destInfo, nil
+               }()
                if err != nil {
-                       return err
+                       return nil
                }
                if destInfo.Digest != srcInfo.Digest {
                        return errors.Errorf("Internal error: copying 
uncompressed config blob %s changed digest to %s", srcInfo.Digest, 
destInfo.Digest)
@@ -591,7 +656,7 @@
 
 // copyLayer copies a layer with srcInfo (with known Digest and possibly known 
Size) in src to dest, perhaps compressing it if canCompress,
 // and returns a complete blobInfo of the copied layer, and a value for 
LayerDiffIDs if diffIDIsNeeded
-func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, 
bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
+func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, 
bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) {
        cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) 
// May be ""
        diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
 
@@ -602,9 +667,7 @@
                        return types.BlobInfo{}, "", errors.Wrapf(err, "Error 
trying to reuse blob %s at destination", srcInfo.Digest)
                }
                if reused {
-                       bar.Prefix(fmt.Sprintf("Skipping blob %s (already 
present):", shortDigest(srcInfo.Digest)))
-                       bar.Add64(bar.Total)
-                       bar.Finish()
+                       logrus.Debugf("Skipping blob %s (already present):", 
srcInfo.Digest)
                        return blobInfo, cachedDiffID, nil
                }
        }
@@ -616,8 +679,7 @@
        }
        defer srcStream.Close()
 
-       blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, 
types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
-               diffIDIsNeeded, bar)
+       blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, 
types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
        if err != nil {
                return types.BlobInfo{}, "", err
        }
@@ -645,7 +707,7 @@
 // perhaps compressing the stream if canCompress,
 // and returns a complete blobInfo of the copied blob and perhaps a <-chan 
diffIDResult if diffIDIsNeeded, to be read by the caller.
 func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream 
io.Reader, srcInfo types.BlobInfo,
-       diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan 
diffIDResult, error) {
+       diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan 
diffIDResult, error) {
        var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = 
nil
        var diffIDChan chan diffIDResult
 
@@ -706,7 +768,7 @@
 // and returns a complete blobInfo of the copied blob.
 func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, 
srcInfo types.BlobInfo,
        getOriginalLayerCopyWriter func(decompressor 
compression.DecompressorFunc) io.Writer,
-       canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) 
(types.BlobInfo, error) {
+       canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, 
error) {
        // The copying happens through a pipeline of connected io.Readers.
        // === Input: srcStream
 
@@ -729,7 +791,7 @@
                return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob 
%s", srcInfo.Digest)
        }
        isCompressed := decompressor != nil
-       destStream = bar.NewProxyReader(destStream)
+       destStream = bar.ProxyReader(destStream)
 
        // === Send a copy of the original, uncompressed, stream, to a separate 
path if necessary.
        var originalLayerReader io.Reader // DO NOT USE this other than to 
drain the input if no other consumer in the pipeline has done so.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/docker/docker_client.go 
new/image-1.5/docker/docker_client.go
--- old/image-1.3/docker/docker_client.go       2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/docker/docker_client.go       2019-02-26 15:39:32.000000000 
+0100
@@ -91,7 +91,6 @@
        password      string
        signatureBase signatureStorageBase
        scope         authScope
-       extraScope    *authScope // If non-nil, a temporary extra token scope 
(necessary for mounting from another repo)
        // The following members are detected registry properties:
        // They are set after a successful detectProperties(), and never change 
afterwards.
        scheme             string // Empty value also used to indicate 
detectProperties() has not yet succeeded.
@@ -282,7 +281,7 @@
        client.username = username
        client.password = password
 
-       resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
+       resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, 
nil)
        if err != nil {
                return err
        }
@@ -362,8 +361,8 @@
                q.Set("n", strconv.Itoa(limit))
                u.RawQuery = q.Encode()
 
-               logrus.Debugf("trying to talk to v1 search endpoint\n")
-               resp, err := client.makeRequest(ctx, "GET", u.String(), nil, 
nil, noAuth)
+               logrus.Debugf("trying to talk to v1 search endpoint")
+               resp, err := client.makeRequest(ctx, "GET", u.String(), nil, 
nil, noAuth, nil)
                if err != nil {
                        logrus.Debugf("error getting search results from v1 
endpoint %q: %v", registry, err)
                } else {
@@ -379,8 +378,8 @@
                }
        }
 
-       logrus.Debugf("trying to talk to v2 search endpoint\n")
-       resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, 
v2Auth)
+       logrus.Debugf("trying to talk to v2 search endpoint")
+       resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, 
v2Auth, nil)
        if err != nil {
                logrus.Debugf("error getting search results from v2 endpoint 
%q: %v", registry, err)
        } else {
@@ -409,20 +408,20 @@
 
 // makeRequest creates and executes a http.Request with the specified 
parameters, adding authentication and TLS options for the Docker client.
 // The host name and schema is taken from the client or autodetected, and the 
path is relative to it, i.e. the path usually starts with /v2/.
-func (c *dockerClient) makeRequest(ctx context.Context, method, path string, 
headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, 
error) {
+func (c *dockerClient) makeRequest(ctx context.Context, method, path string, 
headers map[string][]string, stream io.Reader, auth sendAuth, extraScope 
*authScope) (*http.Response, error) {
        if err := c.detectProperties(ctx); err != nil {
                return nil, err
        }
 
        url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
-       return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, 
-1, auth)
+       return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, 
-1, auth, extraScope)
 }
 
 // makeRequestToResolvedURL creates and executes a http.Request with the 
specified parameters, adding authentication and TLS options for the Docker 
client.
 // streamLen, if not -1, specifies the length of the data expected on stream.
 // makeRequest should generally be preferred.
 // TODO(runcom): too many arguments here, use a struct
-func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, 
url string, headers map[string][]string, stream io.Reader, streamLen int64, 
auth sendAuth) (*http.Response, error) {
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, 
url string, headers map[string][]string, stream io.Reader, streamLen int64, 
auth sendAuth, extraScope *authScope) (*http.Response, error) {
        req, err := http.NewRequest(method, url, stream)
        if err != nil {
                return nil, err
@@ -441,7 +440,7 @@
                req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
        }
        if auth == v2Auth {
-               if err := c.setupRequestAuth(req); err != nil {
+               if err := c.setupRequestAuth(req, extraScope); err != nil {
                        return nil, err
                }
        }
@@ -460,7 +459,7 @@
 // 2) gcr.io is sending 401 without a WWW-Authenticate header in the real 
request
 //
 // debugging: 
https://github.com/containers/image/pull/211#issuecomment-273426236 and follows 
up
-func (c *dockerClient) setupRequestAuth(req *http.Request) error {
+func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope 
*authScope) error {
        if len(c.challenges) == 0 {
                return nil
        }
@@ -474,10 +473,10 @@
                case "bearer":
                        cacheKey := ""
                        scopes := []authScope{c.scope}
-                       if c.extraScope != nil {
+                       if extraScope != nil {
                                // Using ':' as a separator here is unambiguous 
because getBearerToken below uses the same separator when formatting a remote 
request (and because repository names can't contain colons).
-                               cacheKey = fmt.Sprintf("%s:%s", 
c.extraScope.remoteName, c.extraScope.actions)
-                               scopes = append(scopes, *c.extraScope)
+                               cacheKey = fmt.Sprintf("%s:%s", 
extraScope.remoteName, extraScope.actions)
+                               scopes = append(scopes, *extraScope)
                        }
                        var token bearerToken
                        t, inCache := c.tokenCache.Load(cacheKey)
@@ -564,7 +563,7 @@
 
        ping := func(scheme string) error {
                url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
-               resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, 
nil, -1, noAuth)
+               resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, 
nil, -1, noAuth, nil)
                if err != nil {
                        logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), 
err)
                        return err
@@ -591,7 +590,7 @@
                // best effort to understand if we're talking to a V1 registry
                pingV1 := func(scheme string) bool {
                        url := fmt.Sprintf(resolvedPingV1URL, scheme, 
c.registry)
-                       resp, err := c.makeRequestToResolvedURL(ctx, "GET", 
url, nil, nil, -1, noAuth)
+                       resp, err := c.makeRequestToResolvedURL(ctx, "GET", 
url, nil, nil, -1, noAuth, nil)
                        if err != nil {
                                logrus.Debugf("Ping %s err %s (%#v)", url, 
err.Error(), err)
                                return false
@@ -625,7 +624,7 @@
 // using the original data structures.
 func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref 
dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) 
{
        path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), 
manifestDigest)
-       res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
+       res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
        if err != nil {
                return nil, err
        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/docker/docker_image.go 
new/image-1.5/docker/docker_image.go
--- old/image-1.3/docker/docker_image.go        2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/docker/docker_image.go        2019-02-26 15:39:32.000000000 
+0100
@@ -66,7 +66,7 @@
        tags := make([]string, 0)
 
        for {
-               res, err := client.makeRequest(ctx, "GET", path, nil, nil, 
v2Auth)
+               res, err := client.makeRequest(ctx, "GET", path, nil, nil, 
v2Auth, nil)
                if err != nil {
                        return nil, err
                }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/docker/docker_image_dest.go 
new/image-1.5/docker/docker_image_dest.go
--- old/image-1.3/docker/docker_image_dest.go   2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/docker/docker_image_dest.go   2019-02-26 15:39:32.000000000 
+0100
@@ -12,6 +12,7 @@
        "net/url"
        "os"
        "path/filepath"
+       "strings"
 
        "github.com/containers/image/docker/reference"
        "github.com/containers/image/manifest"
@@ -113,7 +114,7 @@
 
 // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
 func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
-       return false
+       return true
 }
 
 // PutBlob writes contents of stream and returns data representing the result 
(with all data filled in).
@@ -140,7 +141,7 @@
        // FIXME? Chunked upload, progress reporting, etc.
        uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
        logrus.Debugf("Uploading %s", uploadPath)
-       res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
+       res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, 
nil)
        if err != nil {
                return types.BlobInfo{}, err
        }
@@ -157,7 +158,7 @@
        digester := digest.Canonical.Digester()
        sizeCounter := &sizeCounter{}
        tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), 
sizeCounter))
-       res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", 
uploadLocation.String(), map[string][]string{"Content-Type": 
{"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
+       res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", 
uploadLocation.String(), map[string][]string{"Content-Type": 
{"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
        if err != nil {
                logrus.Debugf("Error uploading layer chunked, response %#v", 
res)
                return types.BlobInfo{}, err
@@ -176,7 +177,7 @@
        // TODO: check inputInfo.Digest == computedDigest 
https://github.com/containers/image/pull/70#discussion_r77646717
        locationQuery.Set("digest", computedDigest.String())
        uploadLocation.RawQuery = locationQuery.Encode()
-       res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", 
uploadLocation.String(), map[string][]string{"Content-Type": 
{"application/octet-stream"}}, nil, -1, v2Auth)
+       res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", 
uploadLocation.String(), map[string][]string{"Content-Type": 
{"application/octet-stream"}}, nil, -1, v2Auth, nil)
        if err != nil {
                return types.BlobInfo{}, err
        }
@@ -194,10 +195,10 @@
 // blobExists returns true iff repo contains a blob with digest, and if so, 
also its size.
 // If the destination does not contain the blob, or it is unknown, blobExists 
ordinarily returns (false, -1, nil);
 // it returns a non-nil error only on an unexpected failure.
-func (d *dockerImageDestination) blobExists(ctx context.Context, repo 
reference.Named, digest digest.Digest) (bool, int64, error) {
+func (d *dockerImageDestination) blobExists(ctx context.Context, repo 
reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, 
error) {
        checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), 
digest.String())
        logrus.Debugf("Checking %s", checkPath)
-       res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
+       res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, 
extraScope)
        if err != nil {
                return false, -1, err
        }
@@ -218,7 +219,7 @@
 }
 
 // mountBlob tries to mount blob srcDigest from srcRepo to the current 
destination.
-func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo 
reference.Named, srcDigest digest.Digest) error {
+func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo 
reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
        u := url.URL{
                Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
                RawQuery: url.Values{
@@ -228,7 +229,7 @@
        }
        mountPath := u.String()
        logrus.Debugf("Trying to mount %s", mountPath)
-       res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
+       res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, 
extraScope)
        if err != nil {
                return err
        }
@@ -246,7 +247,7 @@
                        return errors.Wrap(err, "Error determining upload URL 
after a mount attempt")
                }
                logrus.Debugf("... started an upload instead of mounting, 
trying to cancel at %s", uploadLocation.String())
-               res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", 
uploadLocation.String(), nil, nil, -1, v2Auth)
+               res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", 
uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
                if err != nil {
                        logrus.Debugf("Error trying to cancel an inadvertent 
upload: %s", err)
                } else {
@@ -276,7 +277,7 @@
        }
 
        // First, check whether the blob happens to already exist at the 
destination.
-       exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
+       exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
        if err != nil {
                return false, types.BlobInfo{}, err
        }
@@ -286,15 +287,6 @@
        }
 
        // Then try reusing blobs from other locations.
-
-       // Checking candidateRepo, and mounting from it, requires an expanded 
token scope.
-       // We still want to reuse the ping information and other aspects of the 
client, so rather than make a fresh copy, there is this a bit ugly extraScope 
hack.
-       if d.c.extraScope != nil {
-               return false, types.BlobInfo{}, errors.New("Internal error: 
dockerClient.extraScope was set before TryReusingBlob")
-       }
-       defer func() {
-               d.c.extraScope = nil
-       }()
        for _, candidate := range cache.CandidateLocations(d.ref.Transport(), 
bicTransportScope(d.ref), info.Digest, canSubstitute) {
                candidateRepo, err := 
parseBICLocationReference(candidate.Location)
                if err != nil {
@@ -314,7 +306,10 @@
                }
 
                // Whatever happens here, don't abort the entire operation.  
It's likely we just don't have permissions, and if it is a critical network 
error, we will find out soon enough anyway.
-               d.c.extraScope = &authScope{
+
+               // Checking candidateRepo, and mounting from it, requires an
+               // expanded token scope.
+               extraScope := &authScope{
                        remoteName: reference.Path(candidateRepo),
                        actions:    "pull",
                }
@@ -325,7 +320,7 @@
                // Even worse, docker/distribution does not actually reasonably 
implement canceling uploads
                // (it would require a "delete" action in the token, and Quay 
does not give that to anyone, so we can't ask);
                // so, be a nice client and don't create unnecesary upload 
sessions on the server.
-               exists, size, err := d.blobExists(ctx, candidateRepo, 
candidate.Digest)
+               exists, size, err := d.blobExists(ctx, candidateRepo, 
candidate.Digest, extraScope)
                if err != nil {
                        logrus.Debugf("... Failed: %v", err)
                        continue
@@ -335,7 +330,7 @@
                        continue // logrus.Debug() already happened in 
blobExists
                }
                if candidateRepo.Name() != d.ref.ref.Name() {
-                       if err := d.mountBlob(ctx, candidateRepo, 
candidate.Digest); err != nil {
+                       if err := d.mountBlob(ctx, candidateRepo, 
candidate.Digest, extraScope); err != nil {
                                logrus.Debugf("... Mount failed: %v", err)
                                continue
                        }
@@ -369,7 +364,7 @@
        if mimeType != "" {
                headers["Content-Type"] = []string{mimeType}
        }
-       res, err := d.c.makeRequest(ctx, "PUT", path, headers, 
bytes.NewReader(m), v2Auth)
+       res, err := d.c.makeRequest(ctx, "PUT", path, headers, 
bytes.NewReader(m), v2Auth, nil)
        if err != nil {
                return err
        }
@@ -396,14 +391,29 @@
        if !ok || len(errors) == 0 {
                return false
        }
-       ec, ok := errors[0].(errcode.ErrorCoder)
+       err = errors[0]
+       ec, ok := err.(errcode.ErrorCoder)
        if !ok {
                return false
        }
+
+       switch ec.ErrorCode() {
        // ErrorCodeManifestInvalid is returned by OpenShift with 
acceptschema2=false.
+       case v2.ErrorCodeManifestInvalid:
+               return true
        // ErrorCodeTagInvalid is returned by docker/distribution (at least as 
of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
        // when uploading to a tag (because it can’t find a matching tag inside 
the manifest)
-       return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() 
== v2.ErrorCodeTagInvalid
+       case v2.ErrorCodeTagInvalid:
+               return true
+       // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS 
ECR when
+       // uploading an OCI manifest that is (correctly, according to the spec) 
missing
+       // a top-level media type. See libpod issue #1719
+       // FIXME: remove this case when ECR behavior is fixed
+       case errcode.ErrorCodeUnsupported:
+               return strings.Contains(err.Error(), "Invalid JSON syntax")
+       default:
+               return false
+       }
 }
 
 func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures 
[][]byte) error {
@@ -574,7 +584,7 @@
                }
 
                path := fmt.Sprintf(extensionsSignaturePath, 
reference.Path(d.ref.ref), d.manifestDigest.String())
-               res, err := d.c.makeRequest(ctx, "PUT", path, nil, 
bytes.NewReader(body), v2Auth)
+               res, err := d.c.makeRequest(ctx, "PUT", path, nil, 
bytes.NewReader(body), v2Auth, nil)
                if err != nil {
                        return err
                }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/docker/docker_image_src.go 
new/image-1.5/docker/docker_image_src.go
--- old/image-1.3/docker/docker_image_src.go    2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/docker/docker_image_src.go    2019-02-26 15:39:32.000000000 
+0100
@@ -89,7 +89,7 @@
        path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), 
tagOrDigest)
        headers := make(map[string][]string)
        headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
-       res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
+       res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
        if err != nil {
                return nil, "", err
        }
@@ -137,7 +137,7 @@
                err  error
        )
        for _, url := range urls {
-               resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, 
nil, -1, noAuth)
+               resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, 
nil, -1, noAuth, nil)
                if err == nil {
                        if resp.StatusCode != http.StatusOK {
                                err = errors.Errorf("error fetching external 
blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
@@ -147,10 +147,10 @@
                        break
                }
        }
-       if resp.Body != nil && err == nil {
-               return resp.Body, getBlobSize(resp), nil
+       if err != nil {
+               return nil, 0, err
        }
-       return nil, 0, err
+       return resp.Body, getBlobSize(resp), nil
 }
 
 func getBlobSize(resp *http.Response) int64 {
@@ -176,7 +176,7 @@
 
        path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), 
info.Digest.String())
        logrus.Debugf("Downloading %s", path)
-       res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
+       res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
        if err != nil {
                return nil, 0, err
        }
@@ -340,7 +340,7 @@
                return err
        }
        getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
-       get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
+       get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, 
nil)
        if err != nil {
                return err
        }
@@ -362,7 +362,7 @@
 
        // When retrieving the digest from a registry >= 2.3 use the following 
header:
        //   "Accept": "application/vnd.docker.distribution.manifest.v2+json"
-       delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, 
v2Auth)
+       delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, 
v2Auth, nil)
        if err != nil {
                return err
        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/docker/tarfile/src.go 
new/image-1.5/docker/tarfile/src.go
--- old/image-1.3/docker/tarfile/src.go 2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/docker/tarfile/src.go 2019-02-26 15:39:32.000000000 +0100
@@ -9,6 +9,7 @@
        "io/ioutil"
        "os"
        "path"
+       "sync"
 
        "github.com/containers/image/internal/tmpdir"
        "github.com/containers/image/manifest"
@@ -21,8 +22,10 @@
 // Source is a partial implementation of types.ImageSource for reading from 
tarPath.
 type Source struct {
        tarPath              string
-       removeTarPathOnClose bool // Remove temp file on close if true
+       removeTarPathOnClose bool      // Remove temp file on close if true
+       cacheDataLock        sync.Once // Atomic way to ensure that 
ensureCachedDataIsPresent is only invoked once
        // The following data is only available after 
ensureCachedDataIsPresent() succeeds
+       cacheDataResult   error         // The return value of 
ensureCachedDataIsPresent, since it should be as safe to cache as the side 
effects
        tarManifest       *ManifestItem // nil if not available yet.
        configBytes       []byte
        configDigest      digest.Digest
@@ -199,43 +202,46 @@
 
 // ensureCachedDataIsPresent loads data necessary for any of the public 
accessors.
 func (s *Source) ensureCachedDataIsPresent() error {
-       if s.tarManifest != nil {
-               return nil
-       }
-
-       // Read and parse manifest.json
-       tarManifest, err := s.loadTarManifest()
-       if err != nil {
-               return err
-       }
+       s.cacheDataLock.Do(func() {
+               // Read and parse manifest.json
+               tarManifest, err := s.loadTarManifest()
+               if err != nil {
+                       s.cacheDataResult = err
+                       return
+               }
 
-       // Check to make sure length is 1
-       if len(tarManifest) != 1 {
-               return errors.Errorf("Unexpected tar manifest.json: expected 1 
item, got %d", len(tarManifest))
-       }
+               // Check to make sure length is 1
+               if len(tarManifest) != 1 {
+                       s.cacheDataResult = errors.Errorf("Unexpected tar 
manifest.json: expected 1 item, got %d", len(tarManifest))
+                       return
+               }
 
-       // Read and parse config.
-       configBytes, err := s.readTarComponent(tarManifest[0].Config)
-       if err != nil {
-               return err
-       }
-       var parsedConfig manifest.Schema2Image // There's a lot of info there, 
but we only really care about layer DiffIDs.
-       if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
-               return errors.Wrapf(err, "Error decoding tar config %s", 
tarManifest[0].Config)
-       }
+               // Read and parse config.
+               configBytes, err := s.readTarComponent(tarManifest[0].Config)
+               if err != nil {
+                       s.cacheDataResult = err
+                       return
+               }
+               var parsedConfig manifest.Schema2Image // There's a lot of info 
there, but we only really care about layer DiffIDs.
+               if err := json.Unmarshal(configBytes, &parsedConfig); err != 
nil {
+                       s.cacheDataResult = errors.Wrapf(err, "Error decoding 
tar config %s", tarManifest[0].Config)
+                       return
+               }
 
-       knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
-       if err != nil {
-               return err
-       }
+               knownLayers, err := s.prepareLayerData(&tarManifest[0], 
&parsedConfig)
+               if err != nil {
+                       s.cacheDataResult = err
+                       return
+               }
 
-       // Success; commit.
-       s.tarManifest = &tarManifest[0]
-       s.configBytes = configBytes
-       s.configDigest = digest.FromBytes(configBytes)
-       s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
-       s.knownLayers = knownLayers
-       return nil
+               // Success; commit.
+               s.tarManifest = &tarManifest[0]
+               s.configBytes = configBytes
+               s.configDigest = digest.FromBytes(configBytes)
+               s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+               s.knownLayers = knownLayers
+       })
+       return s.cacheDataResult
 }
 
 // loadTarManifest loads and decodes the manifest.json.
@@ -399,7 +405,7 @@
 
 // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
 func (s *Source) HasThreadSafeGetBlob() bool {
-       return false
+       return true
 }
 
 // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 
if unknown).
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/ostree/ostree_src.go 
new/image-1.5/ostree/ostree_src.go
--- old/image-1.3/ostree/ostree_src.go  2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/ostree/ostree_src.go  2019-02-26 15:39:32.000000000 +0100
@@ -17,7 +17,7 @@
        "github.com/containers/image/types"
        "github.com/containers/storage/pkg/ioutils"
        "github.com/klauspost/pgzip"
-       "github.com/opencontainers/go-digest"
+       digest "github.com/opencontainers/go-digest"
        glib "github.com/ostreedev/ostree-go/pkg/glibobject"
        "github.com/pkg/errors"
        "github.com/vbatts/tar-split/tar/asm"
@@ -313,24 +313,19 @@
        if err != nil {
                return nil, 0, err
        }
-       defer mfz.Close()
        metaUnpacker := storage.NewJSONUnpacker(mfz)
 
        getter, err := newOSTreePathFileGetter(s.repo, branch)
        if err != nil {
+               mfz.Close()
                return nil, 0, err
        }
 
        ots := asm.NewOutputTarStream(getter, metaUnpacker)
 
-       pipeReader, pipeWriter := io.Pipe()
-       go func() {
-               io.Copy(pipeWriter, ots)
-               pipeWriter.Close()
-       }()
-
-       rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
+       rc := ioutils.NewReadCloserWrapper(ots, func() error {
                getter.Close()
+               mfz.Close()
                return ots.Close()
        })
        return rc, layerSize, nil
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/pkg/blobinfocache/memory.go 
new/image-1.5/pkg/blobinfocache/memory.go
--- old/image-1.3/pkg/blobinfocache/memory.go   2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/pkg/blobinfocache/memory.go   2019-02-26 15:39:32.000000000 
+0100
@@ -1,6 +1,7 @@
 package blobinfocache
 
 import (
+       "sync"
        "time"
 
        "github.com/containers/image/types"
@@ -17,6 +18,7 @@
 
 // memoryCache implements an in-memory-only BlobInfoCache
 type memoryCache struct {
+       mutex                 *sync.Mutex // synchronizes concurrent accesses
        uncompressedDigests   map[digest.Digest]digest.Digest
        digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{}      
       // stores a set of digests for each uncompressed digest
        knownLocations        
map[locationKey]map[types.BICLocationReference]time.Time // stores last known 
existence time for each location reference
@@ -28,6 +30,7 @@
 // Manual users of types.{ImageSource,ImageDestination} might also use this 
instead of a persistent cache.
 func NewMemoryCache() types.BlobInfoCache {
        return &memoryCache{
+               mutex:                 new(sync.Mutex),
                uncompressedDigests:   map[digest.Digest]digest.Digest{},
                digestsByUncompressed: 
map[digest.Digest]map[digest.Digest]struct{}{},
                knownLocations:        
map[locationKey]map[types.BICLocationReference]time.Time{},
@@ -38,6 +41,15 @@
 // May return anyDigest if it is known to be uncompressed.
 // Returns "" if nothing is known about the digest (it may be compressed or 
uncompressed).
 func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) 
digest.Digest {
+       mem.mutex.Lock()
+       defer mem.mutex.Unlock()
+       return mem.uncompressedDigest(anyDigest)
+}
+
+// uncompressedDigest returns an uncompressed digest corresponding to 
anyDigest.
+// May return anyDigest if it is known to be uncompressed.
+// Returns "" if nothing is known about the digest (it may be compressed or 
uncompressed).
+func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) 
digest.Digest {
        if d, ok := mem.uncompressedDigests[anyDigest]; ok {
                return d
        }
@@ -56,6 +68,8 @@
 // because a manifest/config pair exists); otherwise the cache could be 
poisoned and allow substituting unexpected blobs.
 // (Eventually, the DiffIDs in image config could detect the substitution, but 
that may be too late, and not all image formats contain that data.)
 func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, 
uncompressed digest.Digest) {
+       mem.mutex.Lock()
+       defer mem.mutex.Unlock()
        if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous 
!= uncompressed {
                logrus.Warnf("Uncompressed digest for blob %s previously 
recorded as %s, now %s", anyDigest, previous, uncompressed)
        }
@@ -72,6 +86,8 @@
 // RecordKnownLocation records that a blob with the specified digest exists 
within the specified (transport, scope) scope,
 // and can be reused given the opaque location data.
 func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, 
scope types.BICTransportScope, blobDigest digest.Digest, location 
types.BICLocationReference) {
+       mem.mutex.Lock()
+       defer mem.mutex.Unlock()
        key := locationKey{transport: transport.Name(), scope: scope, 
blobDigest: blobDigest}
        locationScope, ok := mem.knownLocations[key]
        if !ok {
@@ -103,11 +119,13 @@
 // data from previous RecordDigestUncompressedPair calls is used to also look 
up variants of the blob which have the same
 // uncompressed digest.
 func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, 
scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) 
[]types.BICReplacementCandidate {
+       mem.mutex.Lock()
+       defer mem.mutex.Unlock()
        res := []candidateWithTime{}
        res = mem.appendReplacementCandidates(res, transport, scope, 
primaryDigest)
        var uncompressedDigest digest.Digest // = ""
        if canSubstitute {
-               if uncompressedDigest = mem.UncompressedDigest(primaryDigest); 
uncompressedDigest != "" {
+               if uncompressedDigest = mem.uncompressedDigest(primaryDigest); 
uncompressedDigest != "" {
                        otherDigests := 
mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
                        for d := range otherDigests {
                                if d != primaryDigest && d != 
uncompressedDigest {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/image-1.3/pkg/sysregistriesv2/system_registries_v2.go 
new/image-1.5/pkg/sysregistriesv2/system_registries_v2.go
--- old/image-1.3/pkg/sysregistriesv2/system_registries_v2.go   2018-12-21 
15:36:20.000000000 +0100
+++ new/image-1.5/pkg/sysregistriesv2/system_registries_v2.go   2019-02-26 
15:39:32.000000000 +0100
@@ -53,20 +53,23 @@
        Prefix string `toml:"prefix"`
 }
 
-// backwards compatability to sysregistries v1
-type v1TOMLregistries struct {
+// V1TOMLregistries is for backwards compatibility to sysregistries v1
+type V1TOMLregistries struct {
        Registries []string `toml:"registries"`
 }
 
+// V1TOMLConfig is for backwards compatibility to sysregistries v1
+type V1TOMLConfig struct {
+       Search   V1TOMLregistries `toml:"search"`
+       Insecure V1TOMLregistries `toml:"insecure"`
+       Block    V1TOMLregistries `toml:"block"`
+}
+
 // tomlConfig is the data type used to unmarshal the toml config.
 type tomlConfig struct {
        Registries []Registry `toml:"registry"`
        // backwards compatability to sysregistries v1
-       V1Registries struct {
-               Search   v1TOMLregistries `toml:"search"`
-               Insecure v1TOMLregistries `toml:"insecure"`
-               Block    v1TOMLregistries `toml:"block"`
-       } `toml:"registries"`
+       V1TOMLConfig `toml:"registries"`
 }
 
 // InvalidRegistries represents an invalid registry configurations.  An example
@@ -129,21 +132,21 @@
 
        // Note: config.V1Registries.Search needs to be processed first to 
ensure registryOrder is populated in the right order
        // if one of the search registries is also in one of the other lists.
-       for _, search := range config.V1Registries.Search.Registries {
+       for _, search := range config.V1TOMLConfig.Search.Registries {
                reg, err := getRegistry(search)
                if err != nil {
                        return nil, err
                }
                reg.Search = true
        }
-       for _, blocked := range config.V1Registries.Block.Registries {
+       for _, blocked := range config.V1TOMLConfig.Block.Registries {
                reg, err := getRegistry(blocked)
                if err != nil {
                        return nil, err
                }
                reg.Blocked = true
        }
-       for _, insecure := range config.V1Registries.Insecure.Registries {
+       for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
                reg, err := getRegistry(insecure)
                if err != nil {
                        return nil, err
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/storage/storage_image.go 
new/image-1.5/storage/storage_image.go
--- old/image-1.3/storage/storage_image.go      2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/storage/storage_image.go      2019-02-26 15:39:32.000000000 
+0100
@@ -14,6 +14,7 @@
        "sync"
        "sync/atomic"
 
+       "github.com/containers/image/docker/reference"
        "github.com/containers/image/image"
        "github.com/containers/image/internal/tmpdir"
        "github.com/containers/image/manifest"
@@ -70,6 +71,13 @@
        size int64
 }
 
+// manifestBigDataKey returns a key suitable for recording a manifest with the 
specified digest using storage.Store.ImageBigData and related functions.
+// If a specific manifest digest is explicitly requested by the user, the key 
retruned function should be used preferably;
+// for compatibility, if a manifest is not available under this key, check 
also storage.ImageDigestBigDataKey
+func manifestBigDataKey(digest digest.Digest) string {
+       return storage.ImageDigestManifestBigDataNamePrefix + "-" + 
digest.String()
+}
+
 // newImageSource sets up an image for reading.
 func newImageSource(imageRef storageReference) (*storageImageSource, error) {
        // First, locate the image.
@@ -177,12 +185,29 @@
                return nil, "", ErrNoManifestLists
        }
        if len(s.cachedManifest) == 0 {
-               // We stored the manifest as an item named after 
storage.ImageDigestBigDataKey.
-               cachedBlob, err := 
s.imageRef.transport.store.ImageBigData(s.image.ID, 
storage.ImageDigestBigDataKey)
-               if err != nil {
-                       return nil, "", err
+               // The manifest is stored as a big data item.
+               // Prefer the manifest corresponding to the user-specified 
digest, if available.
+               if s.imageRef.named != nil {
+                       if digested, ok := 
s.imageRef.named.(reference.Digested); ok {
+                               key := manifestBigDataKey(digested.Digest())
+                               blob, err := 
s.imageRef.transport.store.ImageBigData(s.image.ID, key)
+                               if err != nil && !os.IsNotExist(err) { // 
os.IsNotExist is true if the image exists but there is no data corresponding to 
key
+                                       return nil, "", err
+                               }
+                               if err == nil {
+                                       s.cachedManifest = blob
+                               }
+                       }
+               }
+               // If the user did not specify a digest, or this is an old 
image stored before manifestBigDataKey was introduced, use the default manifest.
+               // Note that the manifest may not match the expected digest, 
and that is likely to fail eventually, e.g. in 
c/image/image/UnparsedImage.Manifest().
+               if len(s.cachedManifest) == 0 {
+                       cachedBlob, err := 
s.imageRef.transport.store.ImageBigData(s.image.ID, 
storage.ImageDigestBigDataKey)
+                       if err != nil {
+                               return nil, "", err
+                       }
+                       s.cachedManifest = cachedBlob
                }
-               s.cachedManifest = cachedBlob
        }
        return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
 }
@@ -660,6 +685,7 @@
                }
                lastLayer = layer.ID
        }
+
        // If one of those blobs was a configuration blob, then we can try to 
dig out the date when the image
        // was originally created, in case we're just copying it.  If not, no 
harm done.
        options := &storage.ImageOptions{}
@@ -667,9 +693,6 @@
                logrus.Debugf("setting image creation date to %s", 
inspect.Created)
                options.CreationDate = *inspect.Created
        }
-       if manifestDigest, err := manifest.Digest(s.manifest); err == nil {
-               options.Digest = manifestDigest
-       }
        // Create the image record, pointing to the most-recently added layer.
        intendedID := s.imageRef.id
        if intendedID == "" {
@@ -735,8 +758,20 @@
                }
                logrus.Debugf("set names of image %q to %v", img.ID, names)
        }
-       // Save the manifest.  Use storage.ImageDigestBigDataKey as the item's
-       // name, so that its digest can be used to locate the image in the 
Store.
+       // Save the manifest.  Allow looking it up by digest by using the key 
convention defined by the Store.
+       // Record the manifest twice: using a digest-specific key to allow 
references to that specific digest instance,
+       // and using storage.ImageDigestBigDataKey for future users that don’t 
specify any digest and for compatibility with older readers.
+       manifestDigest, err := manifest.Digest(s.manifest)
+       if err != nil {
+               return errors.Wrapf(err, "error computing manifest digest")
+       }
+       if err := s.imageRef.transport.store.SetImageBigData(img.ID, 
manifestBigDataKey(manifestDigest), s.manifest); err != nil {
+               if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, 
true); err2 != nil {
+                       logrus.Debugf("error deleting incomplete image %q: %v", 
img.ID, err2)
+               }
+               logrus.Debugf("error saving manifest for image %q: %v", img.ID, 
err)
+               return err
+       }
        if err := s.imageRef.transport.store.SetImageBigData(img.ID, 
storage.ImageDigestBigDataKey, s.manifest); err != nil {
                if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, 
true); err2 != nil {
                        logrus.Debugf("error deleting incomplete image %q: %v", 
img.ID, err2)
@@ -788,9 +823,21 @@
 }
 
 // PutManifest writes the manifest to the destination.
-func (s *storageImageDestination) PutManifest(ctx context.Context, manifest 
[]byte) error {
-       s.manifest = make([]byte, len(manifest))
-       copy(s.manifest, manifest)
+func (s *storageImageDestination) PutManifest(ctx context.Context, 
manifestBlob []byte) error {
+       if s.imageRef.named != nil {
+               if digested, ok := s.imageRef.named.(reference.Digested); ok {
+                       matches, err := manifest.MatchesDigest(manifestBlob, 
digested.Digest())
+                       if err != nil {
+                               return err
+                       }
+                       if !matches {
+                               return fmt.Errorf("Manifest does not match 
expected digest %s", digested.Digest())
+                       }
+               }
+       }
+
+       s.manifest = make([]byte, len(manifestBlob))
+       copy(s.manifest, manifestBlob)
        return nil
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/storage/storage_reference.go 
new/image-1.5/storage/storage_reference.go
--- old/image-1.3/storage/storage_reference.go  2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/storage/storage_reference.go  2019-02-26 15:39:32.000000000 
+0100
@@ -55,7 +55,7 @@
 // one present with the same name or ID, and return the image.
 func (s *storageReference) resolveImage() (*storage.Image, error) {
        var loadedImage *storage.Image
-       if s.id == "" {
+       if s.id == "" && s.named != nil {
                // Look for an image that has the expanded reference name as an 
explicit Name value.
                image, err := s.transport.store.Image(s.named.String())
                if image != nil && err == nil {
@@ -69,7 +69,7 @@
                        // though possibly with a different tag or digest, as a 
Name value, so
                        // that the canonical reference can be implicitly 
resolved to the image.
                        images, err := 
s.transport.store.ImagesByDigest(digested.Digest())
-                       if images != nil && err == nil {
+                       if err == nil && len(images) > 0 {
                                for _, image := range images {
                                        if imageMatchesRepo(image, s.named) {
                                                loadedImage = image
@@ -97,6 +97,24 @@
                        return nil, ErrNoSuchImage
                }
        }
+       // Default to having the image digest that we hand back match the most 
recently
+       // added manifest...
+       if digest, ok := 
loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
+               loadedImage.Digest = digest
+       }
+       // ... unless the named reference says otherwise, and it matches one of 
the digests
+       // in the image.  For those cases, set the Digest field to that value, 
for the
+       // sake of older consumers that don't know there's a whole list in 
there now.
+       if s.named != nil {
+               if digested, ok := s.named.(reference.Digested); ok {
+                       for _, digest := range loadedImage.Digests {
+                               if digest == digested.Digest() {
+                                       loadedImage.Digest = digest
+                                       break
+                               }
+                       }
+               }
+       }
        return loadedImage, nil
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/storage/storage_transport.go 
new/image-1.5/storage/storage_transport.go
--- old/image-1.3/storage/storage_transport.go  2018-12-21 15:36:20.000000000 
+0100
+++ new/image-1.5/storage/storage_transport.go  2019-02-26 15:39:32.000000000 
+0100
@@ -284,11 +284,6 @@
                }
        }
        if sref, ok := ref.(*storageReference); ok {
-               if sref.id != "" {
-                       if img, err := store.Image(sref.id); err == nil {
-                               return img, nil
-                       }
-               }
                tmpRef := *sref
                if img, err := tmpRef.resolveImage(); err == nil {
                        return img, nil
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/vendor.conf new/image-1.5/vendor.conf
--- old/image-1.3/vendor.conf   2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/vendor.conf   2019-02-26 15:39:32.000000000 +0100
@@ -28,7 +28,6 @@
 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
 golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
 golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
-gopkg.in/cheggaaa/pb.v1 v1.0.27
 gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
 k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
 github.com/xeipuuv/gojsonschema master
@@ -48,3 +47,6 @@
 github.com/klauspost/pgzip v1.2.1
 github.com/klauspost/compress v1.4.1
 github.com/klauspost/cpuid v1.2.0
+github.com/vbauerster/mpb v3.3.4
+github.com/mattn/go-isatty v0.0.4
+github.com/VividCortex/ewma v1.1.1
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/image-1.3/version/version.go 
new/image-1.5/version/version.go
--- old/image-1.3/version/version.go    2018-12-21 15:36:20.000000000 +0100
+++ new/image-1.5/version/version.go    2019-02-26 15:39:32.000000000 +0100
@@ -8,10 +8,10 @@
        // VersionMinor is for functionality in a backwards-compatible manner
        VersionMinor = 1
        // VersionPatch is for backwards-compatible bug fixes
-       VersionPatch = 0
+       VersionPatch = 5
 
        // VersionDev indicates development branch. Releases will be empty 
string.
-       VersionDev = "-dev"
+       VersionDev = ""
 )
 
 // Version is the specification version that the package types support.

++++++ libpod-1.1.0.tar.xz -> libpod-1.2.0.tar.xz ++++++
++++ 52866 lines of diff (skipped)

++++++ storage-1.10.tar.xz -> storage-1.12.1.tar.xz ++++++
++++ 9629 lines of diff (skipped)


Reply via email to