Hello community,

here is the log from the commit of package docker for openSUSE:Factory checked 
in at 2019-06-24 21:47:17
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/docker (Old)
 and      /work/SRC/openSUSE:Factory/.docker.new.4615 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "docker"

Mon Jun 24 21:47:17 2019 rev:90 rq:708941 version:18.09.6_ce

Changes:
--------
--- /work/SRC/openSUSE:Factory/docker/docker.changes    2019-05-06 
21:14:36.256648084 +0200
+++ /work/SRC/openSUSE:Factory/.docker.new.4615/docker.changes  2019-06-24 
21:47:19.635825748 +0200
@@ -1,0 +2,6 @@
+Fri Jun  7 08:36:17 UTC 2019 - Aleksa Sarai <[email protected]>
+
+- Add patch for CVE-2018-15664. bsc#1096726
+  + CVE-2018-15664.patch
+
+-------------------------------------------------------------------

New:
----
  CVE-2018-15664.patch

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ docker.spec ++++++
--- /var/tmp/diff_new_pack.u4ZNQI/_old  2019-06-24 21:47:22.591827979 +0200
+++ /var/tmp/diff_new_pack.u4ZNQI/_new  2019-06-24 21:47:22.627828006 +0200
@@ -87,6 +87,8 @@
 Patch404:       
bsc1001161-0001-oci-include-the-domainname-in-kernel.domainname.patch
 # SUSE-BACKPORT: Backport of https://github.com/docker/cli/pull/1130. 
bsc#1001161
 Patch405:       bsc1001161-0002-cli-add-a-separate-domainname-flag.patch
+# SUSE-BACKPORT: Backport of https://github.com/moby/moby/pull/39292. 
CVE-2018-15664 bsc#1096726
+Patch406:       CVE-2018-15664.patch
 # SUSE-FEATURE: Add support to mirror inofficial/private registries
 #               (https://github.com/docker/docker/pull/34319)
 Patch500:       private-registry-0001-Add-private-registry-mirror-support.patch
@@ -273,6 +275,8 @@
 # bsc#1001161
 %patch404 -p1
 %patch405 -p1
+#  CVE-2018-15664 bsc#1096726
+%patch406 -p1
 %if "%flavour" == "kubic"
 # PATCH-SUSE: Mirror patch.
 %patch500 -p1

++++++ CVE-2018-15664.patch ++++++
>From 33df3c9fb60ed22422c101e3fd618d5eb284c199 Mon Sep 17 00:00:00 2001
From: Brian Goff <[email protected]>
Date: Thu, 30 May 2019 11:15:09 -0700
Subject: [PATCH] CVE-2018-15664

This is a backported fix of the following upstream commits:

 * d089b639372a ("Pass root to chroot to for chroot Untar")
 * 3029e765e241 ("Add chroot for tar packing operations")

Signed-off-by: Brian Goff <[email protected]>
Signed-off-by: Aleksa Sarai <[email protected]>
---
 components/engine/daemon/archive.go           |  15 +-
 components/engine/daemon/export.go            |   2 +-
 .../engine/pkg/chrootarchive/archive.go       |  32 +++-
 .../engine/pkg/chrootarchive/archive_unix.go  | 130 ++++++++++++-
 .../pkg/chrootarchive/archive_windows.go      |   9 +-
 .../engine/pkg/chrootarchive/init_unix.go     |   1 +
 pkg/chrootarchive/archive_unix_test.go        | 171 ++++++++++++++++++
 7 files changed, 342 insertions(+), 18 deletions(-)
 create mode 100644 pkg/chrootarchive/archive_unix_test.go

diff --git a/components/engine/daemon/archive.go 
b/components/engine/daemon/archive.go
index 9c7971b56ea3..109376b4b566 100644
--- a/components/engine/daemon/archive.go
+++ b/components/engine/daemon/archive.go
@@ -31,18 +31,19 @@ type archiver interface {
 }
 
 // helper functions to extract or archive
-func extractArchive(i interface{}, src io.Reader, dst string, opts 
*archive.TarOptions) error {
+func extractArchive(i interface{}, src io.Reader, dst string, opts 
*archive.TarOptions, root string) error {
        if ea, ok := i.(extractor); ok {
                return ea.ExtractArchive(src, dst, opts)
        }
-       return chrootarchive.Untar(src, dst, opts)
+
+       return chrootarchive.UntarWithRoot(src, dst, opts, root)
 }
 
-func archivePath(i interface{}, src string, opts *archive.TarOptions) 
(io.ReadCloser, error) {
+func archivePath(i interface{}, src string, opts *archive.TarOptions, root 
string) (io.ReadCloser, error) {
        if ap, ok := i.(archiver); ok {
                return ap.ArchivePath(src, opts)
        }
-       return archive.TarWithOptions(src, opts)
+       return chrootarchive.Tar(src, opts, root)
 }
 
 // ContainerCopy performs a deprecated operation of archiving the resource at
@@ -238,7 +239,7 @@ func (daemon *Daemon) containerArchivePath(container 
*container.Container, path
        sourceDir, sourceBase := driver.Dir(resolvedPath), 
driver.Base(resolvedPath)
        opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
 
-       data, err := archivePath(driver, sourceDir, opts)
+       data, err := archivePath(driver, sourceDir, opts, 
container.BaseFS.Path())
        if err != nil {
                return nil, nil, err
        }
@@ -367,7 +368,7 @@ func (daemon *Daemon) containerExtractToDir(container 
*container.Container, path
                }
        }
 
-       if err := extractArchive(driver, content, resolvedPath, options); err 
!= nil {
+       if err := extractArchive(driver, content, resolvedPath, options, 
container.BaseFS.Path()); err != nil {
                return err
        }
 
@@ -432,7 +433,7 @@ func (daemon *Daemon) containerCopy(container 
*container.Container, resource str
        archive, err := archivePath(driver, basePath, &archive.TarOptions{
                Compression:  archive.Uncompressed,
                IncludeFiles: filter,
-       })
+       }, container.BaseFS.Path())
        if err != nil {
                return nil, err
        }
diff --git a/components/engine/daemon/export.go 
b/components/engine/daemon/export.go
index 27bc35967d22..01593f4e8a4f 100644
--- a/components/engine/daemon/export.go
+++ b/components/engine/daemon/export.go
@@ -70,7 +70,7 @@ func (daemon *Daemon) containerExport(container 
*container.Container) (arch io.R
                Compression: archive.Uncompressed,
                UIDMaps:     daemon.idMapping.UIDs(),
                GIDMaps:     daemon.idMapping.GIDs(),
-       })
+       }, basefs.Path())
        if err != nil {
                rwlayer.Unmount()
                return nil, err
diff --git a/components/engine/pkg/chrootarchive/archive.go 
b/components/engine/pkg/chrootarchive/archive.go
index 2d9d662830b7..6ff61e6a767a 100644
--- a/components/engine/pkg/chrootarchive/archive.go
+++ b/components/engine/pkg/chrootarchive/archive.go
@@ -27,18 +27,34 @@ func NewArchiver(idMapping *idtools.IdentityMapping) 
*archive.Archiver {
 // The archive may be compressed with one of the following algorithms:
 //  identity (uncompressed), gzip, bzip2, xz.
 func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) 
error {
-       return untarHandler(tarArchive, dest, options, true)
+       return untarHandler(tarArchive, dest, options, true, dest)
+}
+
+// UntarWithRoot is the same as `Untar`, but allows you to pass in a root 
directory
+// The root directory is the directory that will be chrooted to.
+// `dest` must be a path within `root`, if it is not an error will be returned.
+//
+// `root` should set to a directory which is not controlled by any potentially
+// malicious process.
+//
+// This should be used to prevent a potential attacker from manipulating `dest`
+// such that it would provide access to files outside of `dest` through things
+// like symlinks. Normally `ResolveSymlinksInScope` would handle this, however
+// sanitizing symlinks in this manner is inherrently racey:
+// ref: CVE-2018-15664
+func UntarWithRoot(tarArchive io.Reader, dest string, options 
*archive.TarOptions, root string) error {
+       return untarHandler(tarArchive, dest, options, true, root)
 }
 
 // UntarUncompressed reads a stream of bytes from `archive`, parses it as a 
tar archive,
 // and unpacks it into the directory at `dest`.
 // The archive must be an uncompressed stream.
 func UntarUncompressed(tarArchive io.Reader, dest string, options 
*archive.TarOptions) error {
-       return untarHandler(tarArchive, dest, options, false)
+       return untarHandler(tarArchive, dest, options, false, dest)
 }
 
 // Handler for teasing out the automatic decompression
-func untarHandler(tarArchive io.Reader, dest string, options 
*archive.TarOptions, decompress bool) error {
+func untarHandler(tarArchive io.Reader, dest string, options 
*archive.TarOptions, decompress bool, root string) error {
        if tarArchive == nil {
                return fmt.Errorf("Empty archive")
        }
@@ -69,5 +85,13 @@ func untarHandler(tarArchive io.Reader, dest string, options 
*archive.TarOptions
                r = decompressedArchive
        }
 
-       return invokeUnpack(r, dest, options)
+       return invokeUnpack(r, dest, options, root)
+}
+
+// Tar tars the requested path while chrooted to the specified root.
+func Tar(srcPath string, options *archive.TarOptions, root string) 
(io.ReadCloser, error) {
+       if options == nil {
+               options = &archive.TarOptions{}
+       }
+       return invokePack(srcPath, options, root)
 }
diff --git a/components/engine/pkg/chrootarchive/archive_unix.go 
b/components/engine/pkg/chrootarchive/archive_unix.go
index 5df8afd66205..ea2879dc002f 100644
--- a/components/engine/pkg/chrootarchive/archive_unix.go
+++ b/components/engine/pkg/chrootarchive/archive_unix.go
@@ -10,10 +10,13 @@ import (
        "io"
        "io/ioutil"
        "os"
+       "path/filepath"
        "runtime"
+       "strings"
 
        "github.com/docker/docker/pkg/archive"
        "github.com/docker/docker/pkg/reexec"
+       "github.com/pkg/errors"
 )
 
 // untar is the entry-point for docker-untar on re-exec. This is not used on
@@ -23,18 +26,28 @@ func untar() {
        runtime.LockOSThread()
        flag.Parse()
 
-       var options *archive.TarOptions
+       var options archive.TarOptions
 
        //read the options from the pipe "ExtraFiles"
        if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); 
err != nil {
                fatal(err)
        }
 
-       if err := chroot(flag.Arg(0)); err != nil {
+       dst := flag.Arg(0)
+       var root string
+       if len(flag.Args()) > 1 {
+               root = flag.Arg(1)
+       }
+
+       if root == "" {
+               root = dst
+       }
+
+       if err := chroot(root); err != nil {
                fatal(err)
        }
 
-       if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+       if err := archive.Unpack(os.Stdin, dst, &options); err != nil {
                fatal(err)
        }
        // fully consume stdin in case it is zero padded
@@ -45,7 +58,10 @@ func untar() {
        os.Exit(0)
 }
 
-func invokeUnpack(decompressedArchive io.Reader, dest string, options 
*archive.TarOptions) error {
+func invokeUnpack(decompressedArchive io.Reader, dest string, options 
*archive.TarOptions, root string) error {
+       if root == "" {
+               return errors.New("must specify a root to chroot to")
+       }
 
        // We can't pass a potentially large exclude list directly via cmd line
        // because we easily overrun the kernel's max argument/environment size
@@ -57,7 +73,21 @@ func invokeUnpack(decompressedArchive io.Reader, dest 
string, options *archive.T
                return fmt.Errorf("Untar pipe failure: %v", err)
        }
 
-       cmd := reexec.Command("docker-untar", dest)
+       if root != "" {
+               relDest, err := filepath.Rel(root, dest)
+               if err != nil {
+                       return err
+               }
+               if relDest == "." {
+                       relDest = "/"
+               }
+               if relDest[0] != '/' {
+                       relDest = "/" + relDest
+               }
+               dest = relDest
+       }
+
+       cmd := reexec.Command("docker-untar", dest, root)
        cmd.Stdin = decompressedArchive
 
        cmd.ExtraFiles = append(cmd.ExtraFiles, r)
@@ -69,6 +99,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, 
options *archive.T
                w.Close()
                return fmt.Errorf("Untar error on re-exec cmd: %v", err)
        }
+
        //write the options to the pipe for the untar exec to read
        if err := json.NewEncoder(w).Encode(options); err != nil {
                w.Close()
@@ -86,3 +117,92 @@ func invokeUnpack(decompressedArchive io.Reader, dest 
string, options *archive.T
        }
        return nil
 }
+
+func tar() {
+       runtime.LockOSThread()
+       flag.Parse()
+
+       src := flag.Arg(0)
+       var root string
+       if len(flag.Args()) > 1 {
+               root = flag.Arg(1)
+       }
+
+       if root == "" {
+               root = src
+       }
+
+       if err := realChroot(root); err != nil {
+               fatal(err)
+       }
+
+       var options archive.TarOptions
+       if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil {
+               fatal(err)
+       }
+
+       rdr, err := archive.TarWithOptions(src, &options)
+       if err != nil {
+               fatal(err)
+       }
+       defer rdr.Close()
+
+       if _, err := io.Copy(os.Stdout, rdr); err != nil {
+               fatal(err)
+       }
+
+       os.Exit(0)
+}
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) 
(io.ReadCloser, error) {
+       if root == "" {
+               return nil, errors.New("root path must not be empty")
+       }
+
+       relSrc, err := filepath.Rel(root, srcPath)
+       if err != nil {
+               return nil, err
+       }
+       if relSrc == "." {
+               relSrc = "/"
+       }
+       if relSrc[0] != '/' {
+               relSrc = "/" + relSrc
+       }
+
+       // make sure we didn't trim a trailing slash with the call to `Rel`
+       if strings.HasSuffix(srcPath, "/") && !strings.HasSuffix(relSrc, "/") {
+               relSrc += "/"
+       }
+
+       cmd := reexec.Command("docker-tar", relSrc, root)
+
+       errBuff := bytes.NewBuffer(nil)
+       cmd.Stderr = errBuff
+
+       tarR, tarW := io.Pipe()
+       cmd.Stdout = tarW
+
+       stdin, err := cmd.StdinPipe()
+       if err != nil {
+               return nil, errors.Wrap(err, "error getting options pipe for 
tar process")
+       }
+
+       if err := cmd.Start(); err != nil {
+               return nil, errors.Wrap(err, "tar error on re-exec cmd")
+       }
+
+       go func() {
+               err := cmd.Wait()
+               err = errors.Wrapf(err, "error processing tar file: %s", 
errBuff)
+               tarW.CloseWithError(err)
+       }()
+
+       if err := json.NewEncoder(stdin).Encode(options); err != nil {
+               stdin.Close()
+               return nil, errors.Wrap(err, "tar json encode to pipe failed")
+       }
+       stdin.Close()
+
+       return tarR, nil
+}
diff --git a/components/engine/pkg/chrootarchive/archive_windows.go 
b/components/engine/pkg/chrootarchive/archive_windows.go
index f2973132a391..de87113e9544 100644
--- a/components/engine/pkg/chrootarchive/archive_windows.go
+++ b/components/engine/pkg/chrootarchive/archive_windows.go
@@ -14,9 +14,16 @@ func chroot(path string) error {
 
 func invokeUnpack(decompressedArchive io.ReadCloser,
        dest string,
-       options *archive.TarOptions) error {
+       options *archive.TarOptions, root string) error {
        // Windows is different to Linux here because Windows does not support
        // chroot. Hence there is no point sandboxing a chrooted process to
        // do the unpack. We call inline instead within the daemon process.
        return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), 
options)
 }
+
+func invokePack(srcPath string, options *archive.TarOptions, root string) 
(io.ReadCloser, error) {
+       // Windows is different to Linux here because Windows does not support
+       // chroot. Hence there is no point sandboxing a chrooted process to
+       // do the pack. We call inline instead within the daemon process.
+       return archive.TarWithOptions(srcPath, options)
+}
diff --git a/components/engine/pkg/chrootarchive/init_unix.go 
b/components/engine/pkg/chrootarchive/init_unix.go
index a15e4bb83c40..c24fea7d9c13 100644
--- a/components/engine/pkg/chrootarchive/init_unix.go
+++ b/components/engine/pkg/chrootarchive/init_unix.go
@@ -14,6 +14,7 @@ import (
 func init() {
        reexec.Register("docker-applyLayer", applyLayer)
        reexec.Register("docker-untar", untar)
+       reexec.Register("docker-tar", tar)
 }
 
 func fatal(err error) {
diff --git a/pkg/chrootarchive/archive_unix_test.go 
b/pkg/chrootarchive/archive_unix_test.go
new file mode 100644
index 000000000000..f39a88ad3814
--- /dev/null
+++ b/pkg/chrootarchive/archive_unix_test.go
@@ -0,0 +1,171 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+       gotar "archive/tar"
+       "bytes"
+       "io"
+       "io/ioutil"
+       "os"
+       "path"
+       "path/filepath"
+       "strings"
+       "testing"
+
+       "github.com/docker/docker/pkg/archive"
+       "golang.org/x/sys/unix"
+       "gotest.tools/assert"
+)
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not copy data to a
+// container path that will actually overwrite data on the host
+func TestUntarWithMaliciousSymlinks(t *testing.T) {
+       dir, err := ioutil.TempDir("", t.Name())
+       assert.NilError(t, err)
+       defer os.RemoveAll(dir)
+
+       root := filepath.Join(dir, "root")
+
+       err = os.MkdirAll(root, 0755)
+       assert.NilError(t, err)
+
+       // Add a file into a directory above root
+       // Ensure that we can't access this file while tarring.
+       err = ioutil.WriteFile(filepath.Join(dir, "host-file"), []byte("I am a 
host file"), 0644)
+       assert.NilError(t, err)
+
+       // Create some data which which will be copied into the "container" 
root into
+       // the symlinked path.
+       // Before this change, the copy would overwrite the "host" content.
+       // With this change it should not.
+       data := filepath.Join(dir, "data")
+       err = os.MkdirAll(data, 0755)
+       assert.NilError(t, err)
+       err = ioutil.WriteFile(filepath.Join(data, "local-file"), 
[]byte("pwn3d"), 0644)
+       assert.NilError(t, err)
+
+       safe := filepath.Join(root, "safe")
+       err = unix.Symlink(dir, safe)
+       assert.NilError(t, err)
+
+       rdr, err := archive.TarWithOptions(data, 
&archive.TarOptions{IncludeFiles: []string{"local-file"}, RebaseNames: 
map[string]string{"local-file": "host-file"}})
+       assert.NilError(t, err)
+
+       // Use tee to test both the good case and the bad case w/o recreating 
the archive
+       bufRdr := bytes.NewBuffer(nil)
+       tee := io.TeeReader(rdr, bufRdr)
+
+       err = UntarWithRoot(tee, safe, nil, root)
+       assert.Assert(t, err != nil)
+       assert.ErrorContains(t, err, "open /safe/host-file: no such file or 
directory")
+
+       // Make sure the "host" file is still in tact
+       // Before the fix the host file would be overwritten
+       hostData, err := ioutil.ReadFile(filepath.Join(dir, "host-file"))
+       assert.NilError(t, err)
+       assert.Equal(t, string(hostData), "I am a host file")
+
+       // Now test by chrooting to an attacker controlled path
+       // This should succeed as is and overwrite a "host" file
+       // Note that this would be a mis-use of this function.
+       err = UntarWithRoot(bufRdr, safe, nil, safe)
+       assert.NilError(t, err)
+
+       hostData, err = ioutil.ReadFile(filepath.Join(dir, "host-file"))
+       assert.NilError(t, err)
+       assert.Equal(t, string(hostData), "pwn3d")
+}
+
+// Test for CVE-2018-15664
+// Assures that in the case where an "attacker" controlled path is a symlink to
+// some path outside of a container's rootfs that we do not unwittingly leak
+// host data into the archive.
+func TestTarWithMaliciousSymlinks(t *testing.T) {
+       dir, err := ioutil.TempDir("", t.Name())
+       assert.NilError(t, err)
+       // defer os.RemoveAll(dir)
+       t.Log(dir)
+
+       root := filepath.Join(dir, "root")
+
+       err = os.MkdirAll(root, 0755)
+       assert.NilError(t, err)
+
+       hostFileData := []byte("I am a host file")
+
+       // Add a file into a directory above root
+       // Ensure that we can't access this file while tarring.
+       err = ioutil.WriteFile(filepath.Join(dir, "host-file"), hostFileData, 
0644)
+       assert.NilError(t, err)
+
+       safe := filepath.Join(root, "safe")
+       err = unix.Symlink(dir, safe)
+       assert.NilError(t, err)
+
+       data := filepath.Join(dir, "data")
+       err = os.MkdirAll(data, 0755)
+       assert.NilError(t, err)
+
+       type testCase struct {
+               p        string
+               includes []string
+       }
+
+       cases := []testCase{
+               {p: safe, includes: []string{"host-file"}},
+               {p: safe + "/", includes: []string{"host-file"}},
+               {p: safe, includes: nil},
+               {p: safe + "/", includes: nil},
+               {p: root, includes: []string{"safe/host-file"}},
+               {p: root, includes: []string{"/safe/host-file"}},
+               {p: root, includes: nil},
+       }
+
+       maxBytes := len(hostFileData)
+
+       for _, tc := range cases {
+               t.Run(path.Join(tc.p+"_"+strings.Join(tc.includes, "_")), 
func(t *testing.T) {
+                       // Here if we use archive.TarWithOptions directly or 
change the "root" parameter
+                       // to be the same as "safe", data from the host will be 
leaked into the archive
+                       var opts *archive.TarOptions
+                       if tc.includes != nil {
+                               opts = &archive.TarOptions{
+                                       IncludeFiles: tc.includes,
+                               }
+                       }
+                       rdr, err := Tar(tc.p, opts, root)
+                       assert.NilError(t, err)
+                       defer rdr.Close()
+
+                       tr := gotar.NewReader(rdr)
+                       assert.Assert(t, !isDataInTar(t, tr, hostFileData, 
int64(maxBytes)), "host data leaked to archive")
+               })
+       }
+}
+
+func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes 
int64) bool {
+       for {
+               h, err := tr.Next()
+               if err == io.EOF {
+                       break
+               }
+               assert.NilError(t, err)
+
+               if h.Size == 0 {
+                       continue
+               }
+               assert.Assert(t, h.Size <= maxBytes, "%s: file size exceeds max 
expected size %d: %d", h.Name, maxBytes, h.Size)
+
+               data := make([]byte, int(h.Size))
+               _, err = io.ReadFull(tr, data)
+               assert.NilError(t, err)
+               if bytes.Contains(data, compare) {
+                       return true
+               }
+       }
+
+       return false
+}
-- 
2.21.0


Reply via email to