Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package velero for openSUSE:Factory checked in at 2021-04-08 21:32:21 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/velero (Old) and /work/SRC/openSUSE:Factory/.velero.new.2401 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "velero" Thu Apr 8 21:32:21 2021 rev:6 rq:883713 version:1.5.4 Changes: -------- --- /work/SRC/openSUSE:Factory/velero/velero.changes 2021-01-19 16:02:43.263427731 +0100 +++ /work/SRC/openSUSE:Factory/.velero.new.2401/velero.changes 2021-04-08 21:32:31.479830773 +0200 @@ -1,0 +2,6 @@ +Thu Apr 08 02:27:02 UTC 2021 - jenting.hs...@suse.com + +- Update to version 1.5.4: + * Add cherry-pick commits and changelog for v1.5.4 (#3651) + +------------------------------------------------------------------- Old: ---- velero-1.5.3.tar.gz New: ---- velero-1.5.4.tar.gz ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ velero.spec ++++++ --- /var/tmp/diff_new_pack.X5vRmE/_old 2021-04-08 21:32:32.331831708 +0200 +++ /var/tmp/diff_new_pack.X5vRmE/_new 2021-04-08 21:32:32.331831708 +0200 @@ -17,11 +17,11 @@ %define goipath github.com/vmware-tanzu/velero -%define commit 123109a3bcac11dbb6783d2758207bac0d0817cb +%define commit 525705bceb8895b9da2cf2a1d1a79b99d74723cb %define gitstate clean Name: velero -Version: 1.5.3 +Version: 1.5.4 Release: 0 Summary: Backup program with deduplication and encryption License: Apache-2.0 ++++++ _service ++++++ --- /var/tmp/diff_new_pack.X5vRmE/_old 2021-04-08 21:32:32.371831752 +0200 +++ /var/tmp/diff_new_pack.X5vRmE/_new 2021-04-08 21:32:32.371831752 +0200 @@ -5,7 +5,7 @@ <param name="exclude">.git</param> <param name="versionformat">@PARENT_TAG@</param> <param name="versionrewrite-pattern">v(.*)</param> - <param name="revision">v1.5.3</param> + <param name="revision">v1.5.4</param> <param name="changesgenerate">enable</param> </service> <service name="recompress" mode="disabled"> ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.X5vRmE/_old 2021-04-08 21:32:32.391831774 +0200 +++ /var/tmp/diff_new_pack.X5vRmE/_new 2021-04-08 21:32:32.391831774 +0200 @@ -1,4 +1,4 @@ <servicedata> <service name="tar_scm"> <param name="url">https://github.com/vmware-tanzu/velero</param> - <param name="changesrevision">123109a3bcac11dbb6783d2758207bac0d0817cb</param></service></servicedata> \ No newline at end of file + <param name="changesrevision">525705bceb8895b9da2cf2a1d1a79b99d74723cb</param></service></servicedata> \ No newline at end of file ++++++ velero-1.5.3.tar.gz -> velero-1.5.4.tar.gz ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-16-9.yaml new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-16-9.yaml --- old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-16-9.yaml 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-16-9.yaml 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -name: "Verify Velero CRDs on k8s 1.16.9" -on: [pull_request] - -jobs: - kind: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: engineerd/setup-kind@v0.4.0 - with: - image: "kindest/node:v1.16.9" - - name: Testing - run: | - kubectl cluster-info - kubectl get pods -n kube-system - kubectl version - echo "current-context:" $(kubectl config current-context) - echo "environment-kubeconfig:" ${KUBECONFIG} - make local - ./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f - diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-17-0.yaml new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-17-0.yaml --- old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-17-0.yaml 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-17-0.yaml 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -name: "Verify Velero CRDs on k8s 1.17" -on: [pull_request] - -jobs: - kind: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: engineerd/setup-kind@v0.4.0 - with: - image: "kindest/node:v1.17.0" - - name: Testing - run: | - kubectl cluster-info - kubectl get pods -n kube-system - kubectl version - echo "current-context:" $(kubectl config current-context) - echo "environment-kubeconfig:" ${KUBECONFIG} - make local - ./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f - diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-18-4.yaml new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-18-4.yaml --- old/velero-1.5.3/.github/workflows/crds-verify-k8s-1-18-4.yaml 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/.github/workflows/crds-verify-k8s-1-18-4.yaml 1970-01-01 01:00:00.000000000 +0100 @@ -1,20 +0,0 @@ -name: "Verify Velero CRDs on k8s 1.18.4" -on: [pull_request] - -jobs: - kind: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - uses: engineerd/setup-kind@v0.4.0 - with: - image: "kindest/node:v1.18.4" - - name: Testing - run: | - kubectl cluster-info - kubectl get pods -n kube-system - kubectl version - echo "current-context:" $(kubectl config current-context) - echo "environment-kubeconfig:" ${KUBECONFIG} - make local - ./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f - diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/.github/workflows/crds-verify-kind.yaml new/velero-1.5.4/.github/workflows/crds-verify-kind.yaml --- old/velero-1.5.3/.github/workflows/crds-verify-kind.yaml 1970-01-01 01:00:00.000000000 +0100 +++ new/velero-1.5.4/.github/workflows/crds-verify-kind.yaml 2021-04-01 20:32:03.000000000 +0200 @@ -0,0 +1,86 @@ +name: "Verify Velero CRDs across k8s versions" +on: + pull_request: + # Do not run when the change only includes these directories. + paths-ignore: + - "site/**" + - "design/**" + +jobs: + # Build the Velero CLI once for all Kubernetes versions, and cache it so the fan-out workers can get it. + build-cli: + runs-on: ubuntu-latest + steps: + # Look for a CLI that's made for this PR + - name: Fetch built CLI + id: cache + uses: actions/cache@v2 + env: + cache-name: cache-velero-cli + with: + path: ./_output/bin/linux/amd64/velero + # The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary + key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }} + # This key controls the prefixes that we'll look at in the cache to restore from + restore-keys: | + velero-${{ github.event.pull_request.number }}- + + - name: Fetch cached go modules + uses: actions/cache@v2 + if: steps.cache.outputs.cache-hit != 'true' + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Check out the code + uses: actions/checkout@v2 + if: steps.cache.outputs.cache-hit != 'true' + + # If no binaries were built for this PR, build it now. + - name: Build Velero CLI + if: steps.cache.outputs.cache-hit != 'true' + run: | + make local + + # Check the common CLI against all kubernetes versions + crd-check: + needs: build-cli + runs-on: ubuntu-latest + strategy: + matrix: + # Latest k8s versions. There's no series-based tag, nor is there a latest tag. + k8s: + - 1.15.12 + - 1.16.15 + - 1.17.17 + - 1.18.15 + - 1.19.7 + - 1.20.2 + # All steps run in parallel unless otherwise specified. + # See https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#creating-dependent-jobs + steps: + - name: Fetch built CLI + id: cache + uses: actions/cache@v2 + env: + cache-name: cache-velero-cli + with: + path: ./_output/bin/linux/amd64/velero + # The cache key a combination of the current PR number, and a SHA256 hash of the Velero binary + key: velero-${{ github.event.pull_request.number }}-${{ hashFiles('./_output/bin/linux/amd64/velero') }} + # This key controls the prefixes that we'll look at in the cache to restore from + restore-keys: | + velero-${{ github.event.pull_request.number }}- + - uses: engineerd/setup-kind@v0.5.0 + with: + image: "kindest/node:v${{ matrix.k8s }}" + - name: Install CRDs + run: | + kubectl cluster-info + kubectl get pods -n kube-system + kubectl version + echo "current-context:" $(kubectl config current-context) + echo "environment-kubeconfig:" ${KUBECONFIG} + ./_output/bin/linux/amd64/velero install --crds-only --dry-run -oyaml | kubectl apply -f - diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/.github/workflows/pr-ci-check.yml new/velero-1.5.4/.github/workflows/pr-ci-check.yml --- old/velero-1.5.3/.github/workflows/pr-ci-check.yml 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/.github/workflows/pr-ci-check.yml 2021-04-01 20:32:03.000000000 +0200 @@ -1,14 +1,20 @@ name: Pull Request CI Check on: [pull_request] jobs: - build: name: Run CI runs-on: ubuntu-latest steps: + - name: Check out the code + uses: actions/checkout@v2 - - name: Check out the code - uses: actions/checkout@v2 + - name: Fetch cached go modules + uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- - - name: Make ci - run: make ci + - name: Make ci + run: make ci diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/Makefile new/velero-1.5.4/Makefile --- old/velero-1.5.3/Makefile 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/Makefile 2021-04-01 20:32:03.000000000 +0200 @@ -26,13 +26,29 @@ # Image name IMAGE ?= $(REGISTRY)/$(BIN) -# Build image handling. We push a build image for every changed version of +# We allow the Dockerfile to be configurable to enable the use of custom Dockerfiles +# that pull base images from different registries. +VELERO_DOCKERFILE ?= Dockerfile +BUILDER_IMAGE_DOCKERFILE ?= hack/build-image/Dockerfile + +# Calculate the realpath of the build-image Dockerfile as we `cd` into the hack/build +# directory before this Dockerfile is used and any relative path will not be valid. +BUILDER_IMAGE_DOCKERFILE_REALPATH := $(shell realpath $(BUILDER_IMAGE_DOCKERFILE)) + +# Build image handling. We push a build image for every changed version of # /hack/build-image/Dockerfile. We tag the dockerfile with the short commit hash # of the commit that changed it. When determining if there is a build image in # the registry to use we look for one that matches the current "commit" for the # Dockerfile else we make one. +# In the case where the Dockerfile for the build image has been overridden using +# the BUILDER_IMAGE_DOCKERFILE variable, we always force a build. + +ifneq "$(origin BUILDER_IMAGE_DOCKERFILE)" "file" + BUILDER_IMAGE_TAG := "custom" +else + BUILDER_IMAGE_TAG := $(shell git log -1 --pretty=%h $(BUILDER_IMAGE_DOCKERFILE)) +endif -BUILDER_IMAGE_TAG := $(shell git log -1 --pretty=%h hack/build-image/Dockerfile) BUILDER_IMAGE := $(REGISTRY)/build-image:$(BUILDER_IMAGE_TAG) BUILDER_IMAGE_CACHED := $(shell docker images -q ${BUILDER_IMAGE} 2>/dev/null ) @@ -170,7 +186,7 @@ --build-arg=VERSION=$(VERSION) \ --build-arg=GIT_SHA=$(GIT_SHA) \ --build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \ - -f Dockerfile . + -f $(VELERO_DOCKERFILE) . container: ifneq ($(BUILDX_ENABLED), true) @@ -186,7 +202,7 @@ --build-arg=GIT_SHA=$(GIT_SHA) \ --build-arg=GIT_TREE_STATE=$(GIT_TREE_STATE) \ --build-arg=RESTIC_VERSION=$(RESTIC_VERSION) \ - -f Dockerfile . + -f $(VELERO_DOCKERFILE) . @echo "container: $(IMAGE):$(VERSION)" SKIP_TESTS ?= @@ -233,11 +249,17 @@ @mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH) .go/go-build .go/golangci-lint build-env: - @# if we detect changes in dockerfile force a new build-image + @# if we have overridden the value for the build-image Dockerfile, + @# force a build using that Dockerfile + @# if we detect changes in dockerfile force a new build-image @# else if we dont have a cached image make one @# finally use the cached image -ifneq ($(shell git diff --quiet HEAD -- hack/build-image/Dockerfile; echo $$?), 0) - @echo "Local changes detected in hack/build-image/Dockerfile" +ifneq "$(origin BUILDER_IMAGE_DOCKERFILE)" "file" + @echo "Dockerfile for builder image has been overridden to $(BUILDER_IMAGE_DOCKERFILE)" + @echo "Preparing a new builder-image" + $(MAKE) build-image +else ifneq ($(shell git diff --quiet HEAD -- $(BUILDER_IMAGE_DOCKERFILE); echo $$?), 0) + @echo "Local changes detected in $(BUILDER_IMAGE_DOCKERFILE)" @echo "Preparing a new builder-image" $(MAKE) build-image else ifneq ($(BUILDER_IMAGE_CACHED),) @@ -252,9 +274,9 @@ @# This makes sure we don't leave the orphaned image behind. $(eval old_id=$(shell docker image inspect --format '{{ .ID }}' ${BUILDER_IMAGE} 2>/dev/null)) ifeq ($(BUILDX_ENABLED), true) - @cd hack/build-image && docker buildx build --build-arg=GOPROXY=$(GOPROXY) --output=type=docker --pull -t $(BUILDER_IMAGE) . + @cd hack/build-image && docker buildx build --build-arg=GOPROXY=$(GOPROXY) --output=type=docker --pull -t $(BUILDER_IMAGE) -f $(BUILDER_IMAGE_DOCKERFILE_REALPATH) . else - @cd hack/build-image && docker build --build-arg=GOPROXY=$(GOPROXY) --pull -t $(BUILDER_IMAGE) . + @cd hack/build-image && docker build --build-arg=GOPROXY=$(GOPROXY) --pull -t $(BUILDER_IMAGE) -f $(BUILDER_IMAGE_DOCKERFILE_REALPATH) . endif $(eval new_id=$(shell docker image inspect --format '{{ .ID }}' ${BUILDER_IMAGE} 2>/dev/null)) @if [ "$(old_id)" != "" ] && [ "$(old_id)" != "$(new_id)" ]; then \ @@ -264,7 +286,13 @@ push-build-image: @# this target will push the build-image it assumes you already have docker @# credentials needed to accomplish this. - docker push $(BUILDER_IMAGE) + @# Pushing will be skipped if a custom Dockerfile was used to build the image. + ifneq "$(origin BUILDER_IMAGE_DOCKERFILE)" "file" + @echo "Dockerfile for builder image has been overridden" + @echo "Skipping push of custom image" + else + docker push $(BUILDER_IMAGE) + endif build-image-hugo: cd site && docker build --pull -t $(HUGO_IMAGE) . diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/changelogs/CHANGELOG-1.5.md new/velero-1.5.4/changelogs/CHANGELOG-1.5.md --- old/velero-1.5.3/changelogs/CHANGELOG-1.5.md 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/changelogs/CHANGELOG-1.5.md 2021-04-01 20:32:03.000000000 +0200 @@ -1,3 +1,20 @@ +## v1.5.4 +### 2021-03-31 +### Download +https://github.com/vmware-tanzu/velero/releases/tag/v1.5.4 + +### Container Image +`velero/velero:v1.5.4` + +### Documentation +https://velero.io/docs/v1.5/ + +### Upgrading +https://velero.io/docs/v1.5/upgrade-to-1.5/ + + * Fixed a bug where restic volumes would not be restored when using a namespace mapping. (#3475, @zubron) + * Add CAPI Cluster and ClusterResourceSets to default restore priorities so that the capi-controller-manager does not panic on restores. (#3446, @nrb) + ## v1.5.3 ### 2021-01-14 ### Download diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/cmd/server/server.go new/velero-1.5.4/pkg/cmd/server/server.go --- old/velero-1.5.3/pkg/cmd/server/server.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/cmd/server/server.go 2021-04-01 20:32:03.000000000 +0200 @@ -468,6 +468,9 @@ // have restic restores run before controllers adopt the pods. // - Replica sets go before deployments/other controllers so they can be explicitly // restored and be adopted by controllers. +// - CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic. +// Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters. +// See https://github.com/kubernetes-sigs/cluster-api/issues/4105 var defaultRestorePriorities = []string{ "customresourcedefinitions", "namespaces", @@ -487,6 +490,8 @@ // to ensure that we prioritize restoring from "apps" too, since this is how they're stored // in the backup. "replicasets.apps", + "clusters.cluster.x-k8s.io", + "clusterresourcesets.addons.cluster.x-k8s.io", } func (s *server) initRestic() error { diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restic/common.go new/velero-1.5.4/pkg/restic/common.go --- old/velero-1.5.3/pkg/restic/common.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restic/common.go 2021-04-01 20:32:03.000000000 +0200 @@ -95,17 +95,17 @@ return res } -func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, pod metav1.Object) bool { - return pod.GetName() == pvb.Spec.Pod.Name && pod.GetNamespace() == pvb.Spec.Pod.Namespace +func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace string) bool { + return podName == pvb.Spec.Pod.Name && namespace == pvb.Spec.Pod.Namespace } // GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, // of the PodVolumeBackups that exist for the provided pod. -func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod metav1.Object) map[string]string { +func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod metav1.Object, sourcePodNs string) map[string]string { volumes := make(map[string]string) for _, pvb := range podVolumeBackups { - if !isPVBMatchPod(pvb, pod) { + if !isPVBMatchPod(pvb, pod.GetName(), sourcePodNs) { continue } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restic/common_test.go new/velero-1.5.4/pkg/restic/common_test.go --- old/velero-1.5.3/pkg/restic/common_test.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restic/common_test.go 2021-04-01 20:32:03.000000000 +0200 @@ -47,78 +47,93 @@ podVolumeBackups []*velerov1api.PodVolumeBackup podAnnotations map[string]string podName string + sourcePodNs string expected map[string]string }{ { - name: "nil annotations", + name: "nil annotations results in no volume backups returned", podAnnotations: nil, expected: nil, }, { - name: "empty annotations", + name: "empty annotations results in no volume backups returned", podAnnotations: make(map[string]string), expected: nil, }, { - name: "non-empty map, no snapshot annotation", + name: "pod annotations with no snapshot annotation prefix results in no volume backups returned", podAnnotations: map[string]string{"foo": "bar"}, expected: nil, }, { - name: "has snapshot annotation only, no suffix", - podAnnotations: map[string]string{podAnnotationPrefix: "bar"}, - expected: map[string]string{"": "bar"}, + name: "pod annotation with only snapshot annotation prefix, results in volume backup with empty volume key", + podAnnotations: map[string]string{podAnnotationPrefix: "snapshotID"}, + expected: map[string]string{"": "snapshotID"}, }, { - name: "has snapshot annotation only, with suffix", - podAnnotations: map[string]string{podAnnotationPrefix + "foo": "bar"}, - expected: map[string]string{"foo": "bar"}, + name: "pod annotation with snapshot annotation prefix results in volume backup with volume name and snapshot ID", + podAnnotations: map[string]string{podAnnotationPrefix + "volume": "snapshotID"}, + expected: map[string]string{"volume": "snapshotID"}, }, { - name: "has snapshot annotation, with suffix", - podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, - expected: map[string]string{"foo": "bar", "abc": "123"}, + name: "only pod annotations with snapshot annotation prefix are considered", + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "volume1": "snapshot1", podAnnotationPrefix + "volume2": "snapshot2"}, + expected: map[string]string{"volume1": "snapshot1", "volume2": "snapshot2"}, }, { - name: "has snapshot annotation, with suffix, and also PVBs", + name: "pod annotations are not considered if PVBs are provided", podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), }, podName: "TestPod", + sourcePodNs: "TestNS", podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, - expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, }, { - name: "no snapshot annotation, but with PVBs", + name: "volume backups are returned even if no pod annotations are present", podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), }, - podName: "TestPod", - expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, }, { - name: "no snapshot annotation, but with PVBs, some of which have snapshot IDs and some of which don't", + name: "only volumes from PVBs with snapshot IDs are returned", podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), - builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").Volume("pvbtest3-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-4").PodName("TestPod").Volume("pvbtest4-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest3-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-4").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest4-abc").Result(), }, - podName: "TestPod", - expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, }, { - name: "has snapshot annotation, with suffix, and with PVBs from current pod and a PVB from another pod", + name: "only volumes from PVBs for the given pod are returned", podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), - builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("xyz").Volume("pvbtest3-xyz").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), }, - podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, - podName: "TestPod", - expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, + }, + { + name: "only volumes from PVBs which match the pod name and source pod namespace are returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestAnotherPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestAnotherNS").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1"}, }, } @@ -128,7 +143,7 @@ pod.Annotations = test.podAnnotations pod.Name = test.podName - res := GetVolumeBackupsForPod(test.podVolumeBackups, pod) + res := GetVolumeBackupsForPod(test.podVolumeBackups, pod, test.sourcePodNs) assert.Equal(t, test.expected, res) }) } @@ -566,17 +581,14 @@ func TestIsPVBMatchPod(t *testing.T) { testCases := []struct { - name string - pod metav1.Object - pvb velerov1api.PodVolumeBackup - expected bool + name string + pvb velerov1api.PodVolumeBackup + podName string + sourcePodNs string + expected bool }{ { name: "should match PVB and pod", - pod: &metav1.ObjectMeta{ - Name: "matching-pod", - Namespace: "matching-namespace", - }, pvb: velerov1api.PodVolumeBackup{ Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{ @@ -585,14 +597,12 @@ }, }, }, - expected: true, + podName: "matching-pod", + sourcePodNs: "matching-namespace", + expected: true, }, { name: "should not match PVB and pod, pod name mismatch", - pod: &metav1.ObjectMeta{ - Name: "not-matching-pod", - Namespace: "matching-namespace", - }, pvb: velerov1api.PodVolumeBackup{ Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{ @@ -601,14 +611,12 @@ }, }, }, - expected: false, + podName: "not-matching-pod", + sourcePodNs: "matching-namespace", + expected: false, }, { name: "should not match PVB and pod, pod namespace mismatch", - pod: &metav1.ObjectMeta{ - Name: "matching-pod", - Namespace: "not-matching-namespace", - }, pvb: velerov1api.PodVolumeBackup{ Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{ @@ -617,14 +625,12 @@ }, }, }, - expected: false, + podName: "matching-pod", + sourcePodNs: "not-matching-namespace", + expected: false, }, { name: "should not match PVB and pod, pod name and namespace mismatch", - pod: &metav1.ObjectMeta{ - Name: "not-matching-pod", - Namespace: "not-matching-namespace", - }, pvb: velerov1api.PodVolumeBackup{ Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{ @@ -633,13 +639,15 @@ }, }, }, - expected: false, + podName: "not-matching-pod", + sourcePodNs: "not-matching-namespace", + expected: false, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual := isPVBMatchPod(&tc.pvb, tc.pod) + actual := isPVBMatchPod(&tc.pvb, tc.podName, tc.sourcePodNs) assert.Equal(t, tc.expected, actual) }) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restic/restorer.go new/velero-1.5.4/pkg/restic/restorer.go --- old/velero-1.5.3/pkg/restic/restorer.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restic/restorer.go 2021-04-01 20:32:03.000000000 +0200 @@ -92,7 +92,7 @@ } func (r *restorer) RestorePodVolumes(data RestoreData) []error { - volumesToRestore := GetVolumeBackupsForPod(data.PodVolumeBackups, data.Pod) + volumesToRestore := GetVolumeBackupsForPod(data.PodVolumeBackups, data.Pod, data.SourceNamespace) if len(volumesToRestore) == 0 { return nil } diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restore/restic_restore_action.go new/velero-1.5.4/pkg/restore/restic_restore_action.go --- old/velero-1.5.3/pkg/restore/restic_restore_action.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restore/restic_restore_action.go 2021-04-01 20:32:03.000000000 +0200 @@ -76,6 +76,15 @@ return nil, errors.Wrap(err, "unable to convert pod from runtime.Unstructured") } + // At the point when this function is called, the namespace mapping for the restore + // has not yet been applied to `input.Item` so we can't perform a reverse-lookup in + // the namespace mapping in the restore spec. Instead, use the pod from the backup + // so that if the mapping is applied earlier, we still use the correct namespace. + var podFromBackup corev1.Pod + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.ItemFromBackup.UnstructuredContent(), &podFromBackup); err != nil { + return nil, errors.Wrap(err, "unable to convert source pod from runtime.Unstructured") + } + log := a.logger.WithField("pod", kube.NamespaceAndName(&pod)) opts := label.NewListOptionsForBackup(input.Restore.Spec.BackupName) @@ -88,7 +97,7 @@ for i := range podVolumeBackupList.Items { podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) } - volumeSnapshots := restic.GetVolumeBackupsForPod(podVolumeBackups, &pod) + volumeSnapshots := restic.GetVolumeBackupsForPod(podVolumeBackups, &pod, podFromBackup.Namespace) if len(volumeSnapshots) == 0 { log.Debug("No restic backups found for pod") return velero.NewRestoreItemActionExecuteOutput(input.Item), nil diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restore/restic_restore_action_test.go new/velero-1.5.4/pkg/restore/restic_restore_action_test.go --- old/velero-1.5.3/pkg/restore/restic_restore_action_test.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restore/restic_restore_action_test.go 2021-04-01 20:32:03.000000000 +0200 @@ -122,6 +122,7 @@ tests := []struct { name string pod *corev1api.Pod + podFromBackup *corev1api.Pod podVolumeBackups []*velerov1api.PodVolumeBackup want *corev1api.Pod }{ @@ -202,6 +203,49 @@ builder.ForContainer("first-container", "").Result()). Result(), }, + { + name: "Restoring pod in another namespace adds the restic initContainer and uses the namespace of the backup pod for matching PVBs", + pod: builder.ForPod("new-ns", "my-pod"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + Result(), + podFromBackup: builder.ForPod("original-ns", "my-pod"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup(veleroNs, "pvb-1"). + PodName("my-pod"). + PodNamespace("original-ns"). + Volume("vol-1"). + ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, backupName)). + SnapshotID("foo"). + Result(), + builder.ForPodVolumeBackup(veleroNs, "pvb-2"). + PodName("my-pod"). + PodNamespace("original-ns"). + Volume("vol-2"). + ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, backupName)). + SnapshotID("foo"). + Result(), + }, + want: builder.ForPod("new-ns", "my-pod"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + InitContainers( + newResticInitContainerBuilder(initContainerImage(defaultImageBase), ""). + Resources(&resourceReqs). + SecurityContext(&securityContext). + VolumeMounts(builder.ForVolumeMount("vol-1", "/restores/vol-1").Result(), builder.ForVolumeMount("vol-2", "/restores/vol-2").Result()). + Command([]string{"/velero-restic-restore-helper"}).Result()). + Result(), + }, } for _, tc := range tests { @@ -214,12 +258,24 @@ require.NoError(t, err) } - unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pod) + unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pod) require.NoError(t, err) + // Default to using the same pod for both Item and ItemFromBackup if podFromBackup not provided + var unstructuredPodFromBackup map[string]interface{} + if tc.podFromBackup != nil { + unstructuredPodFromBackup, err = runtime.DefaultUnstructuredConverter.ToUnstructured(tc.podFromBackup) + require.NoError(t, err) + } else { + unstructuredPodFromBackup = unstructuredPod + } + input := &velero.RestoreItemActionExecuteInput{ Item: &unstructured.Unstructured{ - Object: unstructuredMap, + Object: unstructuredPod, + }, + ItemFromBackup: &unstructured.Unstructured{ + Object: unstructuredPodFromBackup, }, Restore: builder.ForRestore(veleroNs, restoreName). Backup(backupName). diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/velero-1.5.3/pkg/restore/restore.go new/velero-1.5.4/pkg/restore/restore.go --- old/velero-1.5.3/pkg/restore/restore.go 2021-01-14 16:13:14.000000000 +0100 +++ new/velero-1.5.4/pkg/restore/restore.go 2021-04-01 20:32:03.000000000 +0200 @@ -1172,7 +1172,7 @@ return warnings, errs } - if groupResource == kuberesource.Pods && len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, obj)) > 0 { + if groupResource == kuberesource.Pods && len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, obj, originalNamespace)) > 0 { restorePodVolumeBackups(ctx, createdObj, originalNamespace) } ++++++ vendor.tar.gz ++++++ /work/SRC/openSUSE:Factory/velero/vendor.tar.gz /work/SRC/openSUSE:Factory/.velero.new.2401/vendor.tar.gz differ: char 5, line 1