Hello community,
here is the log from the commit of package docker-distribution for
openSUSE:Factory checked in at 2016-05-29 03:12:40
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/docker-distribution (Old)
and /work/SRC/openSUSE:Factory/.docker-distribution.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "docker-distribution"
Changes:
--------
--- /work/SRC/openSUSE:Factory/docker-distribution/docker-distribution.changes
2016-04-28 16:59:04.000000000 +0200
+++
/work/SRC/openSUSE:Factory/.docker-distribution.new/docker-distribution.changes
2016-05-29 03:13:38.000000000 +0200
@@ -1,0 +2,20 @@
+Wed May 18 20:13:04 UTC 2016 - [email protected]
+
+- Updated to v2.4.1. It contains the following fixes:
+
+93d7624 Preserve author information in schema1 manifests
+ba672e8 When a blob upload is committed prevent writing out hashstate in the
subsequent close.
+96230de Add a test with a missing _manifests directory
+c0d3813 Move garbage collect code into storage package
+011b7e4 Ensure GC continues marking if _manifests is nonexistent
+0a1fcf9 Fix wording for dry-run flag in useage message for garbage collector.
+ed02e88 Sorting completed parts by part number for a better accordance with
the S3 spec
+fd5a404 Add blobWrtiter.Close() call into blobWriter.Commit()
+3f538ca add cn-north-1 to valid check
+3330cc5 wait for DLO segments to show up when Close()ing the writer
+775d096 Use correct media type for config blob in schema2 manifest
+64a9727 Only check validity of S3 region if not using custom endpoint
+dafb59f Ensure we log io.Copy errors and bytes copied/total in uploads
+431e46a GCS: FileWriter.Size: return offset + buffer size for Writers that are
not closed
+
+-------------------------------------------------------------------
Old:
----
distribution-2.4.0.tar.xz
New:
----
distribution-2.4.1.tar.xz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ docker-distribution.spec ++++++
--- /var/tmp/diff_new_pack.S2iMgg/_old 2016-05-29 03:13:39.000000000 +0200
+++ /var/tmp/diff_new_pack.S2iMgg/_new 2016-05-29 03:13:39.000000000 +0200
@@ -17,7 +17,7 @@
Name: docker-distribution
-Version: 2.4.0
+Version: 2.4.1
Release: 0
Summary: The Docker toolset to pack, ship, store, and deliver content
License: Apache-2.0
++++++ _service ++++++
--- /var/tmp/diff_new_pack.S2iMgg/_old 2016-05-29 03:13:39.000000000 +0200
+++ /var/tmp/diff_new_pack.S2iMgg/_new 2016-05-29 03:13:39.000000000 +0200
@@ -3,8 +3,8 @@
<param name="url">https://github.com/docker/distribution.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
- <param name="versionformat">2.4.0</param>
- <param name="revision">v2.4.0</param>
+ <param name="versionformat">2.4.1</param>
+ <param name="revision">v2.4.1</param>
</service>
<service name="recompress" mode="disabled">
<param name="file">distribution-*.tar</param>
++++++ distribution-2.4.0.tar.xz -> distribution-2.4.1.tar.xz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/manifest/schema1/config_builder.go
new/distribution-2.4.1/manifest/schema1/config_builder.go
--- old/distribution-2.4.0/manifest/schema1/config_builder.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/manifest/schema1/config_builder.go 2016-05-18
22:10:44.000000000 +0200
@@ -110,7 +110,8 @@
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
- ThrowAway bool `json:"throwaway,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
}
fsLayerList := make([]FSLayer, len(img.History))
@@ -145,6 +146,7 @@
Parent: parent,
Comment: h.Comment,
Created: h.Created,
+ Author: h.Author,
}
v1Compatibility.ContainerConfig.Cmd =
[]string{img.History[i].CreatedBy}
if h.EmptyLayer {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/manifest/schema1/config_builder_test.go
new/distribution-2.4.1/manifest/schema1/config_builder_test.go
--- old/distribution-2.4.0/manifest/schema1/config_builder_test.go
2016-04-14 15:11:00.000000000 +0200
+++ new/distribution-2.4.1/manifest/schema1/config_builder_test.go
2016-05-18 22:10:44.000000000 +0200
@@ -163,6 +163,7 @@
"empty_layer": true
},
{
+ "author": "Alyssa P. Hacker \[email protected]\u003e",
"created": "2015-11-04T23:06:32.083868454Z",
"created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024
count=1024"
},
@@ -252,8 +253,8 @@
}
expectedV1Compatibility := []string{
-
`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo
hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop)
CMD [\"/bin/sh\" \"-c\" \"echo
hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"0850bfdeb7b060b1004a09099846c2f023a3f2ecbf33f56b4774384b00ce0323","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`,
-
`{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh
-c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
+
`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo
hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop)
CMD [\"/bin/sh\" \"-c\" \"echo
hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"69e5c1bfadad697fdb6db59f6326648fa119e0c031a0eda33b8cfadcab54ba7f","os":"linux","parent":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","throwaway":true}`,
+
`{"id":"74cf9c92699240efdba1903c2748ef57105d5bedc588084c4e88f3bb1c3ef0b0","parent":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh
-c dd if=/dev/zero of=/file bs=1024 count=1024"]},"author":"Alyssa P. Hacker
\[email protected]\u003e"}`,
`{"id":"178be37afc7c49e951abd75525dbe0871b62ad49402f037164ee6314f754599d","parent":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh
-c #(nop) ENV asdf=true"]},"throwaway":true}`,
`{"id":"b449305a55a283538c4574856a8b701f2a3d5ec08ef8aec47f385f20339a4866","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh
-c #(nop) ENV derived=true"]},"throwaway":true}`,
`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh
-c #(nop) CMD [\"sh\"]"]}}`,
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/manifest/schema2/builder.go
new/distribution-2.4.1/manifest/schema2/builder.go
--- old/distribution-2.4.0/manifest/schema2/builder.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/manifest/schema2/builder.go 2016-05-18
22:10:44.000000000 +0200
@@ -55,6 +55,9 @@
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON)
+ // Override MediaType, since Put always replaces the specified media
+ // type with application/octet-stream in the descriptor it returns.
+ m.Config.MediaType = MediaTypeConfig
if err != nil {
return nil, err
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/manifest/schema2/builder_test.go
new/distribution-2.4.1/manifest/schema2/builder_test.go
--- old/distribution-2.4.0/manifest/schema2/builder_test.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/manifest/schema2/builder_test.go 2016-05-18
22:10:44.000000000 +0200
@@ -32,7 +32,7 @@
d := distribution.Descriptor{
Digest: digest.FromBytes(p),
Size: int64(len(p)),
- MediaType: mediaType,
+ MediaType: "application/octet-stream",
}
bs.descriptors[d.Digest] = d
return d, nil
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/garbagecollect.go
new/distribution-2.4.1/registry/garbagecollect.go
--- old/distribution-2.4.0/registry/garbagecollect.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/garbagecollect.go 1970-01-01
01:00:00.000000000 +0100
@@ -1,187 +0,0 @@
-package registry
-
-import (
- "fmt"
- "os"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/context"
- "github.com/docker/distribution/digest"
- "github.com/docker/distribution/manifest/schema1"
- "github.com/docker/distribution/manifest/schema2"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage"
- "github.com/docker/distribution/registry/storage/driver"
- "github.com/docker/distribution/registry/storage/driver/factory"
- "github.com/docker/libtrust"
- "github.com/spf13/cobra"
-)
-
-func emit(format string, a ...interface{}) {
- if dryRun {
- fmt.Printf(format+"\n", a...)
- }
-}
-
-func markAndSweep(ctx context.Context, storageDriver driver.StorageDriver,
registry distribution.Namespace) error {
-
- repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
- if !ok {
- return fmt.Errorf("unable to convert Namespace to
RepositoryEnumerator")
- }
-
- // mark
- markSet := make(map[digest.Digest]struct{})
- err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
- emit(repoName)
-
- var err error
- named, err := reference.ParseNamed(repoName)
- if err != nil {
- return fmt.Errorf("failed to parse repo name %s: %v",
repoName, err)
- }
- repository, err := registry.Repository(ctx, named)
- if err != nil {
- return fmt.Errorf("failed to construct repository: %v",
err)
- }
-
- manifestService, err := repository.Manifests(ctx)
- if err != nil {
- return fmt.Errorf("failed to construct manifest
service: %v", err)
- }
-
- manifestEnumerator, ok :=
manifestService.(distribution.ManifestEnumerator)
- if !ok {
- return fmt.Errorf("unable to convert ManifestService
into ManifestEnumerator")
- }
-
- err = manifestEnumerator.Enumerate(ctx, func(dgst
digest.Digest) error {
- // Mark the manifest's blob
- emit("%s: marking manifest %s ", repoName, dgst)
- markSet[dgst] = struct{}{}
-
- manifest, err := manifestService.Get(ctx, dgst)
- if err != nil {
- return fmt.Errorf("failed to retrieve manifest
for digest %v: %v", dgst, err)
- }
-
- descriptors := manifest.References()
- for _, descriptor := range descriptors {
- markSet[descriptor.Digest] = struct{}{}
- emit("%s: marking blob %s", repoName,
descriptor.Digest)
- }
-
- switch manifest.(type) {
- case *schema1.SignedManifest:
- signaturesGetter, ok :=
manifestService.(distribution.SignaturesGetter)
- if !ok {
- return fmt.Errorf("unable to convert
ManifestService into SignaturesGetter")
- }
- signatures, err :=
signaturesGetter.GetSignatures(ctx, dgst)
- if err != nil {
- return fmt.Errorf("failed to get
signatures for signed manifest: %v", err)
- }
- for _, signatureDigest := range signatures {
- emit("%s: marking signature %s",
repoName, signatureDigest)
- markSet[signatureDigest] = struct{}{}
- }
- break
- case *schema2.DeserializedManifest:
- config :=
manifest.(*schema2.DeserializedManifest).Config
- emit("%s: marking configuration %s", repoName,
config.Digest)
- markSet[config.Digest] = struct{}{}
- break
- }
-
- return nil
- })
-
- return err
- })
-
- if err != nil {
- return fmt.Errorf("failed to mark: %v\n", err)
- }
-
- // sweep
- blobService := registry.Blobs()
- deleteSet := make(map[digest.Digest]struct{})
- err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
- // check if digest is in markSet. If not, delete it!
- if _, ok := markSet[dgst]; !ok {
- deleteSet[dgst] = struct{}{}
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("error enumerating blobs: %v", err)
- }
-
- emit("\n%d blobs marked, %d blobs eligible for deletion", len(markSet),
len(deleteSet))
- // Construct vacuum
- vacuum := storage.NewVacuum(ctx, storageDriver)
- for dgst := range deleteSet {
- emit("blob eligible for deletion: %s", dgst)
- if dryRun {
- continue
- }
- err = vacuum.RemoveBlob(string(dgst))
- if err != nil {
- return fmt.Errorf("failed to delete blob %s: %v\n",
dgst, err)
- }
- }
-
- return err
-}
-
-func init() {
- GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything
expect remove the blobs")
-}
-
-var dryRun bool
-
-// GCCmd is the cobra command that corresponds to the garbage-collect
subcommand
-var GCCmd = &cobra.Command{
- Use: "garbage-collect <config>",
- Short: "`garbage-collect` deletes layers not referenced by any
manifests",
- Long: "`garbage-collect` deletes layers not referenced by any
manifests",
- Run: func(cmd *cobra.Command, args []string) {
- config, err := resolveConfiguration(args)
- if err != nil {
- fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
- cmd.Usage()
- os.Exit(1)
- }
-
- driver, err := factory.Create(config.Storage.Type(),
config.Storage.Parameters())
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to construct %s driver:
%v", config.Storage.Type(), err)
- os.Exit(1)
- }
-
- ctx := context.Background()
- ctx, err = configureLogging(ctx, config)
- if err != nil {
- fmt.Fprintf(os.Stderr, "unable to configure logging
with config: %s", err)
- os.Exit(1)
- }
-
- k, err := libtrust.GenerateECP256PrivateKey()
- if err != nil {
- fmt.Fprint(os.Stderr, err)
- os.Exit(1)
- }
-
- registry, err := storage.NewRegistry(ctx, driver,
storage.DisableSchema1Signatures, storage.Schema1SigningKey(k))
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to construct registry:
%v", err)
- os.Exit(1)
- }
-
- err = markAndSweep(ctx, driver, registry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "failed to garbage collect: %v",
err)
- os.Exit(1)
- }
- },
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/garbagecollect_test.go
new/distribution-2.4.1/registry/garbagecollect_test.go
--- old/distribution-2.4.0/registry/garbagecollect_test.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/garbagecollect_test.go 1970-01-01
01:00:00.000000000 +0100
@@ -1,343 +0,0 @@
-package registry
-
-import (
- "io"
- "testing"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/context"
- "github.com/docker/distribution/digest"
- "github.com/docker/distribution/reference"
- "github.com/docker/distribution/registry/storage"
- "github.com/docker/distribution/registry/storage/driver"
- "github.com/docker/distribution/registry/storage/driver/inmemory"
- "github.com/docker/distribution/testutil"
-)
-
-type image struct {
- manifest distribution.Manifest
- manifestDigest digest.Digest
- layers map[digest.Digest]io.ReadSeeker
-}
-
-func createRegistry(t *testing.T, driver driver.StorageDriver)
distribution.Namespace {
- ctx := context.Background()
- registry, err := storage.NewRegistry(ctx, driver, storage.EnableDelete)
- if err != nil {
- t.Fatalf("Failed to construct namespace")
- }
- return registry
-}
-
-func makeRepository(t *testing.T, registry distribution.Namespace, name
string) distribution.Repository {
- ctx := context.Background()
-
- // Initialize a dummy repository
- named, err := reference.ParseNamed(name)
- if err != nil {
- t.Fatalf("Failed to parse name %s: %v", name, err)
- }
-
- repo, err := registry.Repository(ctx, named)
- if err != nil {
- t.Fatalf("Failed to construct repository: %v", err)
- }
- return repo
-}
-
-func makeManifestService(t *testing.T, repository distribution.Repository)
distribution.ManifestService {
- ctx := context.Background()
-
- manifestService, err := repository.Manifests(ctx)
- if err != nil {
- t.Fatalf("Failed to construct manifest store: %v", err)
- }
- return manifestService
-}
-
-func allBlobs(t *testing.T, registry distribution.Namespace)
map[digest.Digest]struct{} {
- ctx := context.Background()
- blobService := registry.Blobs()
- allBlobsMap := make(map[digest.Digest]struct{})
- err := blobService.Enumerate(ctx, func(dgst digest.Digest) error {
- allBlobsMap[dgst] = struct{}{}
- return nil
- })
- if err != nil {
- t.Fatalf("Error getting all blobs: %v", err)
- }
- return allBlobsMap
-}
-
-func uploadImage(t *testing.T, repository distribution.Repository, im image)
digest.Digest {
- // upload layers
- err := testutil.UploadBlobs(repository, im.layers)
- if err != nil {
- t.Fatalf("layer upload failed: %v", err)
- }
-
- // upload manifest
- ctx := context.Background()
- manifestService := makeManifestService(t, repository)
- manifestDigest, err := manifestService.Put(ctx, im.manifest)
- if err != nil {
- t.Fatalf("manifest upload failed: %v", err)
- }
-
- return manifestDigest
-}
-
-func uploadRandomSchema1Image(t *testing.T, repository
distribution.Repository) image {
- randomLayers, err := testutil.CreateRandomLayers(2)
- if err != nil {
- t.Fatalf("%v", err)
- }
-
- digests := []digest.Digest{}
- for digest := range randomLayers {
- digests = append(digests, digest)
- }
-
- manifest, err := testutil.MakeSchema1Manifest(digests)
- if err != nil {
- t.Fatalf("%v", err)
- }
-
- manifestDigest := uploadImage(t, repository, image{manifest: manifest,
layers: randomLayers})
- return image{
- manifest: manifest,
- manifestDigest: manifestDigest,
- layers: randomLayers,
- }
-}
-
-func uploadRandomSchema2Image(t *testing.T, repository
distribution.Repository) image {
- randomLayers, err := testutil.CreateRandomLayers(2)
- if err != nil {
- t.Fatalf("%v", err)
- }
-
- digests := []digest.Digest{}
- for digest := range randomLayers {
- digests = append(digests, digest)
- }
-
- manifest, err := testutil.MakeSchema2Manifest(repository, digests)
- if err != nil {
- t.Fatalf("%v", err)
- }
-
- manifestDigest := uploadImage(t, repository, image{manifest: manifest,
layers: randomLayers})
- return image{
- manifest: manifest,
- manifestDigest: manifestDigest,
- layers: randomLayers,
- }
-}
-
-func TestNoDeletionNoEffect(t *testing.T) {
- ctx := context.Background()
- inmemoryDriver := inmemory.New()
-
- registry := createRegistry(t, inmemoryDriver)
- repo := makeRepository(t, registry, "palailogos")
- manifestService, err := repo.Manifests(ctx)
-
- image1 := uploadRandomSchema1Image(t, repo)
- image2 := uploadRandomSchema1Image(t, repo)
- image3 := uploadRandomSchema2Image(t, repo)
-
- // construct manifestlist for fun.
- blobstatter := registry.BlobStatter()
- manifestList, err := testutil.MakeManifestList(blobstatter,
[]digest.Digest{
- image1.manifestDigest, image2.manifestDigest})
- if err != nil {
- t.Fatalf("Failed to make manifest list: %v", err)
- }
-
- _, err = manifestService.Put(ctx, manifestList)
- if err != nil {
- t.Fatalf("Failed to add manifest list: %v", err)
- }
-
- // Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
- if err != nil {
- t.Fatalf("Failed mark and sweep: %v", err)
- }
-
- blobs := allBlobs(t, registry)
-
- // the +1 at the end is for the manifestList
- // the first +3 at the end for each manifest's blob
- // the second +3 at the end for each manifest's signature/config layer
- totalBlobCount := len(image1.layers) + len(image2.layers) +
len(image3.layers) + 1 + 3 + 3
- if len(blobs) != totalBlobCount {
- t.Fatalf("Garbage collection affected storage")
- }
-}
-
-func TestDeletionHasEffect(t *testing.T) {
- ctx := context.Background()
- inmemoryDriver := inmemory.New()
-
- registry := createRegistry(t, inmemoryDriver)
- repo := makeRepository(t, registry, "komnenos")
- manifests, err := repo.Manifests(ctx)
-
- image1 := uploadRandomSchema1Image(t, repo)
- image2 := uploadRandomSchema1Image(t, repo)
- image3 := uploadRandomSchema2Image(t, repo)
-
- manifests.Delete(ctx, image2.manifestDigest)
- manifests.Delete(ctx, image3.manifestDigest)
-
- // Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
- if err != nil {
- t.Fatalf("Failed mark and sweep: %v", err)
- }
-
- blobs := allBlobs(t, registry)
-
- // check that the image1 manifest and all the layers are still in blobs
- if _, ok := blobs[image1.manifestDigest]; !ok {
- t.Fatalf("First manifest is missing")
- }
-
- for layer := range image1.layers {
- if _, ok := blobs[layer]; !ok {
- t.Fatalf("manifest 1 layer is missing: %v", layer)
- }
- }
-
- // check that image2 and image3 layers are not still around
- for layer := range image2.layers {
- if _, ok := blobs[layer]; ok {
- t.Fatalf("manifest 2 layer is present: %v", layer)
- }
- }
-
- for layer := range image3.layers {
- if _, ok := blobs[layer]; ok {
- t.Fatalf("manifest 3 layer is present: %v", layer)
- }
- }
-}
-
-func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) {
- for d = range digests {
- break
- }
- return
-}
-
-func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) {
- for d := range digests {
- ds = append(ds, d)
- }
- return
-}
-
-func TestDeletionWithSharedLayer(t *testing.T) {
- ctx := context.Background()
- inmemoryDriver := inmemory.New()
-
- registry := createRegistry(t, inmemoryDriver)
- repo := makeRepository(t, registry, "tzimiskes")
-
- // Create random layers
- randomLayers1, err := testutil.CreateRandomLayers(3)
- if err != nil {
- t.Fatalf("failed to make layers: %v", err)
- }
-
- randomLayers2, err := testutil.CreateRandomLayers(3)
- if err != nil {
- t.Fatalf("failed to make layers: %v", err)
- }
-
- // Upload all layers
- err = testutil.UploadBlobs(repo, randomLayers1)
- if err != nil {
- t.Fatalf("failed to upload layers: %v", err)
- }
-
- err = testutil.UploadBlobs(repo, randomLayers2)
- if err != nil {
- t.Fatalf("failed to upload layers: %v", err)
- }
-
- // Construct manifests
- manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1))
- if err != nil {
- t.Fatalf("failed to make manifest: %v", err)
- }
-
- sharedKey := getAnyKey(randomLayers1)
- manifest2, err := testutil.MakeSchema2Manifest(repo,
append(getKeys(randomLayers2), sharedKey))
- if err != nil {
- t.Fatalf("failed to make manifest: %v", err)
- }
-
- manifestService := makeManifestService(t, repo)
-
- // Upload manifests
- _, err = manifestService.Put(ctx, manifest1)
- if err != nil {
- t.Fatalf("manifest upload failed: %v", err)
- }
-
- manifestDigest2, err := manifestService.Put(ctx, manifest2)
- if err != nil {
- t.Fatalf("manifest upload failed: %v", err)
- }
-
- // delete
- err = manifestService.Delete(ctx, manifestDigest2)
- if err != nil {
- t.Fatalf("manifest deletion failed: %v", err)
- }
-
- // check that all of the layers in layer 1 are still there
- blobs := allBlobs(t, registry)
- for dgst := range randomLayers1 {
- if _, ok := blobs[dgst]; !ok {
- t.Fatalf("random layer 1 blob missing: %v", dgst)
- }
- }
-}
-
-func TestOrphanBlobDeleted(t *testing.T) {
- inmemoryDriver := inmemory.New()
-
- registry := createRegistry(t, inmemoryDriver)
- repo := makeRepository(t, registry, "michael_z_doukas")
-
- digests, err := testutil.CreateRandomLayers(1)
- if err != nil {
- t.Fatalf("Failed to create random digest: %v", err)
- }
-
- if err = testutil.UploadBlobs(repo, digests); err != nil {
- t.Fatalf("Failed to upload blob: %v", err)
- }
-
- // formality to create the necessary directories
- uploadRandomSchema2Image(t, repo)
-
- // Run GC
- err = markAndSweep(context.Background(), inmemoryDriver, registry)
- if err != nil {
- t.Fatalf("Failed mark and sweep: %v", err)
- }
-
- blobs := allBlobs(t, registry)
-
- // check that orphan blob layers are not still around
- for dgst := range digests {
- if _, ok := blobs[dgst]; ok {
- t.Fatalf("Orphan layer is present: %v", dgst)
- }
- }
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/handlers/helpers.go
new/distribution-2.4.1/registry/handlers/helpers.go
--- old/distribution-2.4.0/registry/handlers/helpers.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/handlers/helpers.go 2016-05-18
22:10:44.000000000 +0200
@@ -46,7 +46,11 @@
// instead of showing 0 for the HTTP status.
responseWriter.WriteHeader(499)
- ctxu.GetLogger(context).Error("client disconnected
during " + action)
+ ctxu.GetLoggerWithFields(context,
map[interface{}]interface{}{
+ "error": err,
+ "copied": copied,
+ "contentLength": r.ContentLength,
+ }, "error", "copied", "contentLength").Error("client
disconnected during " + action)
return errors.New("client disconnected")
default:
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/root.go
new/distribution-2.4.1/registry/root.go
--- old/distribution-2.4.0/registry/root.go 2016-04-14 15:11:00.000000000
+0200
+++ new/distribution-2.4.1/registry/root.go 2016-05-18 22:10:44.000000000
+0200
@@ -1,7 +1,14 @@
package registry
import (
+ "fmt"
+ "os"
+
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/registry/storage"
+ "github.com/docker/distribution/registry/storage/driver/factory"
"github.com/docker/distribution/version"
+ "github.com/docker/libtrust"
"github.com/spf13/cobra"
)
@@ -10,6 +17,7 @@
func init() {
RootCmd.AddCommand(ServeCmd)
RootCmd.AddCommand(GCCmd)
+ GCCmd.Flags().BoolVarP(&dryRun, "dry-run", "d", false, "do everything
except remove the blobs")
RootCmd.Flags().BoolVarP(&showVersion, "version", "v", false, "show the
version and exit")
}
@@ -26,3 +34,51 @@
cmd.Usage()
},
}
+
+var dryRun bool
+
+// GCCmd is the cobra command that corresponds to the garbage-collect
subcommand
+var GCCmd = &cobra.Command{
+ Use: "garbage-collect <config>",
+ Short: "`garbage-collect` deletes layers not referenced by any
manifests",
+ Long: "`garbage-collect` deletes layers not referenced by any
manifests",
+ Run: func(cmd *cobra.Command, args []string) {
+ config, err := resolveConfiguration(args)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
+ cmd.Usage()
+ os.Exit(1)
+ }
+
+ driver, err := factory.Create(config.Storage.Type(),
config.Storage.Parameters())
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to construct %s driver:
%v", config.Storage.Type(), err)
+ os.Exit(1)
+ }
+
+ ctx := context.Background()
+ ctx, err = configureLogging(ctx, config)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unable to configure logging
with config: %s", err)
+ os.Exit(1)
+ }
+
+ k, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ registry, err := storage.NewRegistry(ctx, driver,
storage.DisableSchema1Signatures, storage.Schema1SigningKey(k))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to construct registry:
%v", err)
+ os.Exit(1)
+ }
+
+ err = storage.MarkAndSweep(ctx, driver, registry, dryRun)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "failed to garbage collect: %v",
err)
+ os.Exit(1)
+ }
+ },
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/storage/blob_test.go
new/distribution-2.4.1/registry/storage/blob_test.go
--- old/distribution-2.4.0/registry/storage/blob_test.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/storage/blob_test.go 2016-05-18
22:10:44.000000000 +0200
@@ -16,6 +16,7 @@
"github.com/docker/distribution/registry/storage/cache/memory"
"github.com/docker/distribution/registry/storage/driver/inmemory"
"github.com/docker/distribution/testutil"
+ "path"
)
// TestWriteSeek tests that the current file size can be
@@ -83,6 +84,15 @@
t.Fatalf("unexpected error during upload cancellation: %v", err)
}
+ // get the enclosing directory
+ uploadPath := path.Dir(blobUpload.(*blobWriter).path)
+
+ // ensure state was cleaned up
+ _, err = driver.List(ctx, uploadPath)
+ if err == nil {
+ t.Fatal("files in upload path after cleanup")
+ }
+
// Do a resume, get unknown upload
blobUpload, err = bs.Resume(ctx, blobUpload.ID())
if err != distribution.ErrBlobUploadUnknown {
@@ -128,6 +138,13 @@
t.Fatalf("unexpected error finishing layer upload: %v", err)
}
+ // ensure state was cleaned up
+ uploadPath = path.Dir(blobUpload.(*blobWriter).path)
+ _, err = driver.List(ctx, uploadPath)
+ if err == nil {
+ t.Fatal("files in upload path after commit")
+ }
+
// After finishing an upload, it should no longer exist.
if _, err := bs.Resume(ctx, blobUpload.ID()); err !=
distribution.ErrBlobUploadUnknown {
t.Fatalf("expected layer upload to be unknown, got %v", err)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/distribution-2.4.0/registry/storage/blobwriter.go
new/distribution-2.4.1/registry/storage/blobwriter.go
--- old/distribution-2.4.0/registry/storage/blobwriter.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/storage/blobwriter.go 2016-05-18
22:10:44.000000000 +0200
@@ -18,8 +18,8 @@
errResumableDigestNotAvailable = errors.New("resumable digest not
available")
)
-// layerWriter is used to control the various aspects of resumable
-// layer upload. It implements the LayerUpload interface.
+// blobWriter is used to control the various aspects of resumable
+// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
@@ -34,6 +34,7 @@
path string
resumableDigestEnabled bool
+ committed bool
}
var _ distribution.BlobWriter = &blobWriter{}
@@ -56,6 +57,8 @@
return distribution.Descriptor{}, err
}
+ bw.Close()
+
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return distribution.Descriptor{}, err
@@ -78,6 +81,7 @@
return distribution.Descriptor{}, err
}
+ bw.committed = true
return canonical, nil
}
@@ -89,11 +93,14 @@
return err
}
+ if err := bw.Close(); err != nil {
+ context.GetLogger(ctx).Errorf("error closing blobwriter: %s",
err)
+ }
+
if err := bw.removeResources(ctx); err != nil {
return err
}
- bw.Close()
return nil
}
@@ -130,6 +137,10 @@
}
func (bw *blobWriter) Close() error {
+ if bw.committed {
+ return errors.New("blobwriter close after commit")
+ }
+
if err := bw.storeHashState(bw.blobStore.ctx); err != nil {
return err
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/registry/storage/driver/gcs/gcs.go
new/distribution-2.4.1/registry/storage/driver/gcs/gcs.go
--- old/distribution-2.4.0/registry/storage/driver/gcs/gcs.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/storage/driver/gcs/gcs.go 2016-05-18
22:10:44.000000000 +0200
@@ -493,6 +493,9 @@
// Size returns the number of bytes written to this FileWriter.
func (w *writer) Size() int64 {
+ if !w.closed {
+ return w.offset + int64(w.buffSize)
+ }
return w.size
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/registry/storage/driver/s3-aws/s3.go
new/distribution-2.4.1/registry/storage/driver/s3-aws/s3.go
--- old/distribution-2.4.0/registry/storage/driver/s3-aws/s3.go 2016-04-14
15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/storage/driver/s3-aws/s3.go 2016-05-18
22:10:44.000000000 +0200
@@ -18,6 +18,7 @@
"io/ioutil"
"net/http"
"reflect"
+ "sort"
"strconv"
"strings"
"time"
@@ -80,6 +81,7 @@
"ap-northeast-1",
"ap-northeast-2",
"sa-east-1",
+ "cn-north-1",
} {
validRegions[region] = struct{}{}
}
@@ -136,14 +138,21 @@
secretKey = ""
}
+ regionEndpoint := parameters["regionendpoint"]
+ if regionEndpoint == nil {
+ regionEndpoint = ""
+ }
+
regionName, ok := parameters["region"]
if regionName == nil || fmt.Sprint(regionName) == "" {
return nil, fmt.Errorf("No region parameter provided")
}
region := fmt.Sprint(regionName)
- _, ok = validRegions[region]
- if !ok {
- return nil, fmt.Errorf("Invalid region provided: %v", region)
+ // Don't check the region value if a custom endpoint is provided.
+ if regionEndpoint == "" {
+ if _, ok = validRegions[region]; !ok {
+ return nil, fmt.Errorf("Invalid region provided: %v",
region)
+ }
}
bucket := parameters["bucket"]
@@ -151,11 +160,6 @@
return nil, fmt.Errorf("No bucket parameter provided")
}
- regionEndpoint := parameters["regionendpoint"]
- if regionEndpoint == nil {
- regionEndpoint = ""
- }
-
encryptBool := false
encrypt := parameters["encrypt"]
switch encrypt := encrypt.(type) {
@@ -716,6 +720,12 @@
}
}
+type completedParts []*s3.CompletedPart
+
+func (a completedParts) Len() int { return len(a) }
+func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber <
*a[j].PartNumber }
+
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
@@ -728,19 +738,22 @@
// If the last written part is smaller than minChunkSize, we need to
make a
// new multipart upload :sadface:
if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) <
minChunkSize {
- var completedParts []*s3.CompletedPart
+ var completedUploadedParts completedParts
for _, part := range w.parts {
- completedParts = append(completedParts,
&s3.CompletedPart{
+ completedUploadedParts = append(completedUploadedParts,
&s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
+
+ sort.Sort(completedUploadedParts)
+
_, err :=
w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
- Parts: completedParts,
+ Parts: completedUploadedParts,
},
})
if err != nil {
@@ -880,19 +893,23 @@
return err
}
w.committed = true
- var completedParts []*s3.CompletedPart
+
+ var completedUploadedParts completedParts
for _, part := range w.parts {
- completedParts = append(completedParts, &s3.CompletedPart{
+ completedUploadedParts = append(completedUploadedParts,
&s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
+
+ sort.Sort(completedUploadedParts)
+
_, err =
w.driver.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
- Parts: completedParts,
+ Parts: completedUploadedParts,
},
})
if err != nil {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/registry/storage/driver/swift/swift.go
new/distribution-2.4.1/registry/storage/driver/swift/swift.go
--- old/distribution-2.4.0/registry/storage/driver/swift/swift.go
2016-04-14 15:11:00.000000000 +0200
+++ new/distribution-2.4.1/registry/storage/driver/swift/swift.go
2016-05-18 22:10:44.000000000 +0200
@@ -698,6 +698,9 @@
if err := w.driver.createManifest(w.path,
w.driver.Container+"/"+w.segmentsPath); err != nil {
return err
}
+ if err := w.waitForSegmentsToShowUp(); err != nil {
+ return err
+ }
}
w.closed = true
@@ -732,10 +735,14 @@
}
w.committed = true
+ return w.waitForSegmentsToShowUp()
+}
+func (w *writer) waitForSegmentsToShowUp() error {
var err error
waitingTime := readAfterWriteWait
endTime := time.Now().Add(readAfterWriteTimeout)
+
for {
var info swift.Object
if info, _, err = w.driver.Conn.Object(w.driver.Container,
w.driver.swiftPath(w.path)); err == nil {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/registry/storage/garbagecollect.go
new/distribution-2.4.1/registry/storage/garbagecollect.go
--- old/distribution-2.4.0/registry/storage/garbagecollect.go 1970-01-01
01:00:00.000000000 +0100
+++ new/distribution-2.4.1/registry/storage/garbagecollect.go 2016-05-18
22:10:44.000000000 +0200
@@ -0,0 +1,150 @@
+package storage
+
+import (
+ "fmt"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/digest"
+ "github.com/docker/distribution/manifest/schema1"
+ "github.com/docker/distribution/manifest/schema2"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/storage/driver"
+)
+
+func emit(format string, a ...interface{}) {
+ fmt.Printf(format+"\n", a...)
+}
+
+// MarkAndSweep performs a mark and sweep of registry data
+func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver,
registry distribution.Namespace, dryRun bool) error {
+ repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
+ if !ok {
+ return fmt.Errorf("unable to convert Namespace to
RepositoryEnumerator")
+ }
+
+ // mark
+ markSet := make(map[digest.Digest]struct{})
+ err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
+ if dryRun {
+ emit(repoName)
+ }
+
+ var err error
+ named, err := reference.ParseNamed(repoName)
+ if err != nil {
+ return fmt.Errorf("failed to parse repo name %s: %v",
repoName, err)
+ }
+ repository, err := registry.Repository(ctx, named)
+ if err != nil {
+ return fmt.Errorf("failed to construct repository: %v",
err)
+ }
+
+ manifestService, err := repository.Manifests(ctx)
+ if err != nil {
+ return fmt.Errorf("failed to construct manifest
service: %v", err)
+ }
+
+ manifestEnumerator, ok :=
manifestService.(distribution.ManifestEnumerator)
+ if !ok {
+ return fmt.Errorf("unable to convert ManifestService
into ManifestEnumerator")
+ }
+
+ err = manifestEnumerator.Enumerate(ctx, func(dgst
digest.Digest) error {
+ // Mark the manifest's blob
+ if dryRun {
+ emit("%s: marking manifest %s ", repoName, dgst)
+ }
+ markSet[dgst] = struct{}{}
+
+ manifest, err := manifestService.Get(ctx, dgst)
+ if err != nil {
+ return fmt.Errorf("failed to retrieve manifest
for digest %v: %v", dgst, err)
+ }
+
+ descriptors := manifest.References()
+ for _, descriptor := range descriptors {
+ markSet[descriptor.Digest] = struct{}{}
+ if dryRun {
+ emit("%s: marking blob %s", repoName,
descriptor.Digest)
+ }
+ }
+
+ switch manifest.(type) {
+ case *schema1.SignedManifest:
+ signaturesGetter, ok :=
manifestService.(distribution.SignaturesGetter)
+ if !ok {
+ return fmt.Errorf("unable to convert
ManifestService into SignaturesGetter")
+ }
+ signatures, err :=
signaturesGetter.GetSignatures(ctx, dgst)
+ if err != nil {
+ return fmt.Errorf("failed to get
signatures for signed manifest: %v", err)
+ }
+ for _, signatureDigest := range signatures {
+ if dryRun {
+ emit("%s: marking signature
%s", repoName, signatureDigest)
+ }
+ markSet[signatureDigest] = struct{}{}
+ }
+ break
+ case *schema2.DeserializedManifest:
+ config :=
manifest.(*schema2.DeserializedManifest).Config
+ if dryRun {
+ emit("%s: marking configuration %s",
repoName, config.Digest)
+ }
+ markSet[config.Digest] = struct{}{}
+ break
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ // In certain situations such as unfinished uploads,
deleting all
+ // tags in S3 or removing the _manifests folder
manually, this
+ // error may be of type PathNotFound.
+ //
+ // In these cases we can continue marking other
manifests safely.
+ if _, ok := err.(driver.PathNotFoundError); ok {
+ return nil
+ }
+ }
+
+ return err
+ })
+
+ if err != nil {
+ return fmt.Errorf("failed to mark: %v\n", err)
+ }
+
+ // sweep
+ blobService := registry.Blobs()
+ deleteSet := make(map[digest.Digest]struct{})
+ err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
+ // check if digest is in markSet. If not, delete it!
+ if _, ok := markSet[dgst]; !ok {
+ deleteSet[dgst] = struct{}{}
+ }
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("error enumerating blobs: %v", err)
+ }
+ if dryRun {
+ emit("\n%d blobs marked, %d blobs eligible for deletion",
len(markSet), len(deleteSet))
+ }
+ // Construct vacuum
+ vacuum := NewVacuum(ctx, storageDriver)
+ for dgst := range deleteSet {
+ if dryRun {
+ emit("blob eligible for deletion: %s", dgst)
+ continue
+ }
+ err = vacuum.RemoveBlob(string(dgst))
+ if err != nil {
+ return fmt.Errorf("failed to delete blob %s: %v\n",
dgst, err)
+ }
+ }
+
+ return err
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/distribution-2.4.0/registry/storage/garbagecollect_test.go
new/distribution-2.4.1/registry/storage/garbagecollect_test.go
--- old/distribution-2.4.0/registry/storage/garbagecollect_test.go
1970-01-01 01:00:00.000000000 +0100
+++ new/distribution-2.4.1/registry/storage/garbagecollect_test.go
2016-05-18 22:10:44.000000000 +0200
@@ -0,0 +1,374 @@
+package storage
+
+import (
+ "io"
+ "path"
+ "testing"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/context"
+ "github.com/docker/distribution/digest"
+ "github.com/docker/distribution/reference"
+ "github.com/docker/distribution/registry/storage/driver"
+ "github.com/docker/distribution/registry/storage/driver/inmemory"
+ "github.com/docker/distribution/testutil"
+)
+
+type image struct {
+ manifest distribution.Manifest
+ manifestDigest digest.Digest
+ layers map[digest.Digest]io.ReadSeeker
+}
+
+func createRegistry(t *testing.T, driver driver.StorageDriver)
distribution.Namespace {
+ ctx := context.Background()
+ registry, err := NewRegistry(ctx, driver, EnableDelete)
+ if err != nil {
+ t.Fatalf("Failed to construct namespace")
+ }
+ return registry
+}
+
+func makeRepository(t *testing.T, registry distribution.Namespace, name
string) distribution.Repository {
+ ctx := context.Background()
+
+ // Initialize a dummy repository
+ named, err := reference.ParseNamed(name)
+ if err != nil {
+ t.Fatalf("Failed to parse name %s: %v", name, err)
+ }
+
+ repo, err := registry.Repository(ctx, named)
+ if err != nil {
+ t.Fatalf("Failed to construct repository: %v", err)
+ }
+ return repo
+}
+
+func makeManifestService(t *testing.T, repository distribution.Repository)
distribution.ManifestService {
+ ctx := context.Background()
+
+ manifestService, err := repository.Manifests(ctx)
+ if err != nil {
+ t.Fatalf("Failed to construct manifest store: %v", err)
+ }
+ return manifestService
+}
+
+func allBlobs(t *testing.T, registry distribution.Namespace)
map[digest.Digest]struct{} {
+ ctx := context.Background()
+ blobService := registry.Blobs()
+ allBlobsMap := make(map[digest.Digest]struct{})
+ err := blobService.Enumerate(ctx, func(dgst digest.Digest) error {
+ allBlobsMap[dgst] = struct{}{}
+ return nil
+ })
+ if err != nil {
+ t.Fatalf("Error getting all blobs: %v", err)
+ }
+ return allBlobsMap
+}
+
+func uploadImage(t *testing.T, repository distribution.Repository, im image)
digest.Digest {
+ // upload layers
+ err := testutil.UploadBlobs(repository, im.layers)
+ if err != nil {
+ t.Fatalf("layer upload failed: %v", err)
+ }
+
+ // upload manifest
+ ctx := context.Background()
+ manifestService := makeManifestService(t, repository)
+ manifestDigest, err := manifestService.Put(ctx, im.manifest)
+ if err != nil {
+ t.Fatalf("manifest upload failed: %v", err)
+ }
+
+ return manifestDigest
+}
+
+func uploadRandomSchema1Image(t *testing.T, repository
distribution.Repository) image {
+ randomLayers, err := testutil.CreateRandomLayers(2)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ digests := []digest.Digest{}
+ for digest := range randomLayers {
+ digests = append(digests, digest)
+ }
+
+ manifest, err := testutil.MakeSchema1Manifest(digests)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ manifestDigest := uploadImage(t, repository, image{manifest: manifest,
layers: randomLayers})
+ return image{
+ manifest: manifest,
+ manifestDigest: manifestDigest,
+ layers: randomLayers,
+ }
+}
+
+func uploadRandomSchema2Image(t *testing.T, repository
distribution.Repository) image {
+ randomLayers, err := testutil.CreateRandomLayers(2)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ digests := []digest.Digest{}
+ for digest := range randomLayers {
+ digests = append(digests, digest)
+ }
+
+ manifest, err := testutil.MakeSchema2Manifest(repository, digests)
+ if err != nil {
+ t.Fatalf("%v", err)
+ }
+
+ manifestDigest := uploadImage(t, repository, image{manifest: manifest,
layers: randomLayers})
+ return image{
+ manifest: manifest,
+ manifestDigest: manifestDigest,
+ layers: randomLayers,
+ }
+}
+
+func TestNoDeletionNoEffect(t *testing.T) {
+ ctx := context.Background()
+ inmemoryDriver := inmemory.New()
+
+ registry := createRegistry(t, inmemoryDriver)
+ repo := makeRepository(t, registry, "palailogos")
+ manifestService, err := repo.Manifests(ctx)
+
+ image1 := uploadRandomSchema1Image(t, repo)
+ image2 := uploadRandomSchema1Image(t, repo)
+ image3 := uploadRandomSchema2Image(t, repo)
+
+ // construct manifestlist for fun.
+ blobstatter := registry.BlobStatter()
+ manifestList, err := testutil.MakeManifestList(blobstatter,
[]digest.Digest{
+ image1.manifestDigest, image2.manifestDigest})
+ if err != nil {
+ t.Fatalf("Failed to make manifest list: %v", err)
+ }
+
+ _, err = manifestService.Put(ctx, manifestList)
+ if err != nil {
+ t.Fatalf("Failed to add manifest list: %v", err)
+ }
+
+ // Run GC
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry,
false)
+ if err != nil {
+ t.Fatalf("Failed mark and sweep: %v", err)
+ }
+
+ blobs := allBlobs(t, registry)
+
+ // the +1 at the end is for the manifestList
+ // the first +3 at the end for each manifest's blob
+ // the second +3 at the end for each manifest's signature/config layer
+ totalBlobCount := len(image1.layers) + len(image2.layers) +
len(image3.layers) + 1 + 3 + 3
+ if len(blobs) != totalBlobCount {
+ t.Fatalf("Garbage collection affected storage")
+ }
+}
+
+func TestGCWithMissingManifests(t *testing.T) {
+ ctx := context.Background()
+ d := inmemory.New()
+
+ registry := createRegistry(t, d)
+ repo := makeRepository(t, registry, "testrepo")
+ uploadRandomSchema1Image(t, repo)
+
+ // Simulate a missing _manifests directory
+ revPath, err := pathFor(manifestRevisionsPathSpec{"testrepo"})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _manifestsPath := path.Dir(revPath)
+ err = d.Delete(ctx, _manifestsPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = MarkAndSweep(context.Background(), d, registry, false)
+ if err != nil {
+ t.Fatalf("Failed mark and sweep: %v", err)
+ }
+
+ blobs := allBlobs(t, registry)
+ if len(blobs) > 0 {
+ t.Errorf("unexpected blobs after gc")
+ }
+}
+
+func TestDeletionHasEffect(t *testing.T) {
+ ctx := context.Background()
+ inmemoryDriver := inmemory.New()
+
+ registry := createRegistry(t, inmemoryDriver)
+ repo := makeRepository(t, registry, "komnenos")
+ manifests, err := repo.Manifests(ctx)
+
+ image1 := uploadRandomSchema1Image(t, repo)
+ image2 := uploadRandomSchema1Image(t, repo)
+ image3 := uploadRandomSchema2Image(t, repo)
+
+ manifests.Delete(ctx, image2.manifestDigest)
+ manifests.Delete(ctx, image3.manifestDigest)
+
+ // Run GC
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry,
false)
+ if err != nil {
+ t.Fatalf("Failed mark and sweep: %v", err)
+ }
+
+ blobs := allBlobs(t, registry)
+
+ // check that the image1 manifest and all the layers are still in blobs
+ if _, ok := blobs[image1.manifestDigest]; !ok {
+ t.Fatalf("First manifest is missing")
+ }
+
+ for layer := range image1.layers {
+ if _, ok := blobs[layer]; !ok {
+ t.Fatalf("manifest 1 layer is missing: %v", layer)
+ }
+ }
+
+ // check that image2 and image3 layers are not still around
+ for layer := range image2.layers {
+ if _, ok := blobs[layer]; ok {
+ t.Fatalf("manifest 2 layer is present: %v", layer)
+ }
+ }
+
+ for layer := range image3.layers {
+ if _, ok := blobs[layer]; ok {
+ t.Fatalf("manifest 3 layer is present: %v", layer)
+ }
+ }
+}
+
+func getAnyKey(digests map[digest.Digest]io.ReadSeeker) (d digest.Digest) {
+ for d = range digests {
+ break
+ }
+ return
+}
+
+func getKeys(digests map[digest.Digest]io.ReadSeeker) (ds []digest.Digest) {
+ for d := range digests {
+ ds = append(ds, d)
+ }
+ return
+}
+
+func TestDeletionWithSharedLayer(t *testing.T) {
+ ctx := context.Background()
+ inmemoryDriver := inmemory.New()
+
+ registry := createRegistry(t, inmemoryDriver)
+ repo := makeRepository(t, registry, "tzimiskes")
+
+ // Create random layers
+ randomLayers1, err := testutil.CreateRandomLayers(3)
+ if err != nil {
+ t.Fatalf("failed to make layers: %v", err)
+ }
+
+ randomLayers2, err := testutil.CreateRandomLayers(3)
+ if err != nil {
+ t.Fatalf("failed to make layers: %v", err)
+ }
+
+ // Upload all layers
+ err = testutil.UploadBlobs(repo, randomLayers1)
+ if err != nil {
+ t.Fatalf("failed to upload layers: %v", err)
+ }
+
+ err = testutil.UploadBlobs(repo, randomLayers2)
+ if err != nil {
+ t.Fatalf("failed to upload layers: %v", err)
+ }
+
+ // Construct manifests
+ manifest1, err := testutil.MakeSchema1Manifest(getKeys(randomLayers1))
+ if err != nil {
+ t.Fatalf("failed to make manifest: %v", err)
+ }
+
+ sharedKey := getAnyKey(randomLayers1)
+ manifest2, err := testutil.MakeSchema2Manifest(repo,
append(getKeys(randomLayers2), sharedKey))
+ if err != nil {
+ t.Fatalf("failed to make manifest: %v", err)
+ }
+
+ manifestService := makeManifestService(t, repo)
+
+ // Upload manifests
+ _, err = manifestService.Put(ctx, manifest1)
+ if err != nil {
+ t.Fatalf("manifest upload failed: %v", err)
+ }
+
+ manifestDigest2, err := manifestService.Put(ctx, manifest2)
+ if err != nil {
+ t.Fatalf("manifest upload failed: %v", err)
+ }
+
+ // delete
+ err = manifestService.Delete(ctx, manifestDigest2)
+ if err != nil {
+ t.Fatalf("manifest deletion failed: %v", err)
+ }
+
+ // check that all of the layers in layer 1 are still there
+ blobs := allBlobs(t, registry)
+ for dgst := range randomLayers1 {
+ if _, ok := blobs[dgst]; !ok {
+ t.Fatalf("random layer 1 blob missing: %v", dgst)
+ }
+ }
+}
+
+func TestOrphanBlobDeleted(t *testing.T) {
+ inmemoryDriver := inmemory.New()
+
+ registry := createRegistry(t, inmemoryDriver)
+ repo := makeRepository(t, registry, "michael_z_doukas")
+
+ digests, err := testutil.CreateRandomLayers(1)
+ if err != nil {
+ t.Fatalf("Failed to create random digest: %v", err)
+ }
+
+ if err = testutil.UploadBlobs(repo, digests); err != nil {
+ t.Fatalf("Failed to upload blob: %v", err)
+ }
+
+ // formality to create the necessary directories
+ uploadRandomSchema2Image(t, repo)
+
+ // Run GC
+ err = MarkAndSweep(context.Background(), inmemoryDriver, registry,
false)
+ if err != nil {
+ t.Fatalf("Failed mark and sweep: %v", err)
+ }
+
+ blobs := allBlobs(t, registry)
+
+ // check that orphan blob layers are not still around
+ for dgst := range digests {
+ if _, ok := blobs[dgst]; ok {
+ t.Fatalf("Orphan layer is present: %v", dgst)
+ }
+ }
+}