Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package helm for openSUSE:Factory checked in 
at 2026-02-11 18:48:53
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/helm (Old)
 and      /work/SRC/openSUSE:Factory/.helm.new.1670 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "helm"

Wed Feb 11 18:48:53 2026 rev:96 rq:1332393 version:4.1.1

Changes:
--------
--- /work/SRC/openSUSE:Factory/helm/helm.changes        2026-01-27 
16:08:43.197071786 +0100
+++ /work/SRC/openSUSE:Factory/.helm.new.1670/helm.changes      2026-02-11 
18:50:08.384633357 +0100
@@ -1,0 +2,11 @@
+Tue Feb 10 05:56:17 UTC 2026 - Johannes Kastl 
<[email protected]>
+
+- Update to version 4.1.1:
+  * Notable Changes
+    - fix: fine-grained context options for waiting #31735
+    - fix: kstatus do not wait forever on failed resources #31730
+    - fix: Revert "Consider GroupVersionKind when matching
+      resources" #31772
+    - fix: handle nil elements in slice copying #31751
+
+-------------------------------------------------------------------

Old:
----
  helm-4.1.0.obscpio

New:
----
  helm-4.1.1.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ helm.spec ++++++
--- /var/tmp/diff_new_pack.dOH9BW/_old  2026-02-11 18:50:09.788692320 +0100
+++ /var/tmp/diff_new_pack.dOH9BW/_new  2026-02-11 18:50:09.800692824 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           helm
-Version:        4.1.0
+Version:        4.1.1
 Release:        0
 Summary:        The Kubernetes Package Manager
 License:        Apache-2.0

++++++ _service ++++++
--- /var/tmp/diff_new_pack.dOH9BW/_old  2026-02-11 18:50:10.004701392 +0100
+++ /var/tmp/diff_new_pack.dOH9BW/_new  2026-02-11 18:50:10.028702400 +0100
@@ -5,7 +5,7 @@
     <param name="exclude">.git</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="versionrewrite-pattern">v(.*)</param>
-    <param name="revision">v4.1.0</param>
+    <param name="revision">v4.1.1</param>
     <param name="changesgenerate">enable</param>
   </service>
   <service name="set_version" mode="manual">

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.dOH9BW/_old  2026-02-11 18:50:10.132706767 +0100
+++ /var/tmp/diff_new_pack.dOH9BW/_new  2026-02-11 18:50:10.168708279 +0100
@@ -1,6 +1,6 @@
 <servicedata>
 <service name="tar_scm">
                 <param name="url">https://github.com/helm/helm.git</param>
-              <param 
name="changesrevision">4553a0a96e5205595079b6757236cc6f969ed1b9</param></service></servicedata>
+              <param 
name="changesrevision">5caf0044d4ef3d62a955440272999e139aafbbed</param></service></servicedata>
 (No newline at EOF)
 

++++++ helm-4.1.0.obscpio -> helm-4.1.1.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/internal/copystructure/copystructure.go 
new/helm-4.1.1/internal/copystructure/copystructure.go
--- old/helm-4.1.0/internal/copystructure/copystructure.go      2026-01-12 
19:39:57.000000000 +0100
+++ new/helm-4.1.1/internal/copystructure/copystructure.go      2026-01-29 
19:23:46.000000000 +0100
@@ -89,7 +89,15 @@
                }
                copied := reflect.MakeSlice(original.Type(), original.Len(), 
original.Cap())
                for i := 0; i < original.Len(); i++ {
-                       val, err := copyValue(original.Index(i))
+                       elem := original.Index(i)
+
+                       // Handle nil values in slices (e.g., interface{} 
elements that are nil)
+                       if elem.Kind() == reflect.Interface && elem.IsNil() {
+                               copied.Index(i).Set(elem)
+                               continue
+                       }
+
+                       val, err := copyValue(elem)
                        if err != nil {
                                return nil, err
                        }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/helm-4.1.0/internal/copystructure/copystructure_test.go 
new/helm-4.1.1/internal/copystructure/copystructure_test.go
--- old/helm-4.1.0/internal/copystructure/copystructure_test.go 2026-01-12 
19:39:57.000000000 +0100
+++ new/helm-4.1.1/internal/copystructure/copystructure_test.go 2026-01-29 
19:23:46.000000000 +0100
@@ -113,6 +113,21 @@
                input[0]["key1"] = "modified"
                assert.Equal(t, "value1", resultSlice[0]["key1"])
        })
+
+       t.Run("slice with nil elements", func(t *testing.T) {
+               input := []any{
+                       "value1",
+                       nil,
+                       "value2",
+               }
+               result, err := Copy(input)
+               require.NoError(t, err)
+
+               resultSlice, ok := result.([]any)
+               require.True(t, ok)
+               assert.Equal(t, input, resultSlice)
+               assert.Nil(t, resultSlice[1])
+       })
 }
 
 func TestCopy_Map(t *testing.T) {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/client.go 
new/helm-4.1.1/pkg/kube/client.go
--- old/helm-4.1.0/pkg/kube/client.go   2026-01-12 19:39:57.000000000 +0100
+++ new/helm-4.1.1/pkg/kube/client.go   2026-01-29 19:23:46.000000000 +0100
@@ -167,10 +167,14 @@
                waitContext = c.WaitContext
        }
        return &statusWaiter{
-               restMapper: restMapper,
-               client:     dynamicClient,
-               ctx:        waitContext,
-               readers:    o.statusReaders,
+               restMapper:         restMapper,
+               client:             dynamicClient,
+               ctx:                waitContext,
+               watchUntilReadyCtx: o.watchUntilReadyCtx,
+               waitCtx:            o.waitCtx,
+               waitWithJobsCtx:    o.waitWithJobsCtx,
+               waitForDeleteCtx:   o.waitForDeleteCtx,
+               readers:            o.statusReaders,
        }, nil
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/client_test.go 
new/helm-4.1.1/pkg/kube/client_test.go
--- old/helm-4.1.0/pkg/kube/client_test.go      2026-01-12 19:39:57.000000000 
+0100
+++ new/helm-4.1.1/pkg/kube/client_test.go      2026-01-29 19:23:46.000000000 
+0100
@@ -32,9 +32,11 @@
        "github.com/fluxcd/cli-utils/pkg/kstatus/polling/event"
        "github.com/fluxcd/cli-utils/pkg/kstatus/status"
        "github.com/fluxcd/cli-utils/pkg/object"
+       "github.com/fluxcd/cli-utils/pkg/testutil"
        "github.com/stretchr/testify/assert"
        "github.com/stretchr/testify/require"
 
+       batchv1 "k8s.io/api/batch/v1"
        v1 "k8s.io/api/core/v1"
        apierrors "k8s.io/apimachinery/pkg/api/errors"
        "k8s.io/apimachinery/pkg/api/meta"
@@ -44,8 +46,10 @@
        "k8s.io/apimachinery/pkg/runtime/schema"
        jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
        "k8s.io/apimachinery/pkg/types"
+       "k8s.io/apimachinery/pkg/util/yaml"
        "k8s.io/cli-runtime/pkg/genericclioptions"
        "k8s.io/cli-runtime/pkg/resource"
+       dynamicfake "k8s.io/client-go/dynamic/fake"
        "k8s.io/client-go/kubernetes"
        k8sfake "k8s.io/client-go/kubernetes/fake"
        "k8s.io/client-go/kubernetes/scheme"
@@ -1205,7 +1209,7 @@
        }
 }
 
-func newTestCustomResourceData(metadata map[string]string, spec 
map[string]interface{}) *unstructured.Unstructured {
+func newTestCustomResourceData(metadata map[string]string, spec 
map[string]any) *unstructured.Unstructured {
        if metadata == nil {
                metadata = make(map[string]string)
        }
@@ -1215,7 +1219,7 @@
        if _, ok := metadata["namespace"]; !ok {
                metadata["namespace"] = "default"
        }
-       o := map[string]interface{}{
+       o := map[string]any{
                "apiVersion": "crd.com/v1",
                "kind":       "Data",
                "metadata":   metadata,
@@ -1238,7 +1242,7 @@
                name:     "take ownership of resource",
                target:   target,
                original: target,
-               actual: newTestCustomResourceData(nil, map[string]interface{}{
+               actual: newTestCustomResourceData(nil, map[string]any{
                        "color": "red",
                }),
                threeWayMergeForUnstructured: true,
@@ -1254,7 +1258,7 @@
 }
 
 func TestCreatePatchCustomResourceSpec(t *testing.T) {
-       target := newTestCustomResourceData(nil, map[string]interface{}{
+       target := newTestCustomResourceData(nil, map[string]any{
                "color": "red",
                "size":  "large",
        })
@@ -1262,7 +1266,7 @@
                name:     "merge with spec of existing custom resource",
                target:   target,
                original: target,
-               actual: newTestCustomResourceData(nil, map[string]interface{}{
+               actual: newTestCustomResourceData(nil, map[string]any{
                        "color":  "red",
                        "weight": "heavy",
                }),
@@ -2239,9 +2243,19 @@
                },
        }
 
-       var err error
-       c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, 
WithKStatusReaders(statusReaders...))
-       require.NoError(t, err)
+       // Create a fake dynamic client with the pod resource
+       fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+       fakeMapper := 
testutil.NewFakeRESTMapper(v1.SchemeGroupVersion.WithKind("Pod"))
+
+       // Create the pod in the fake client
+       createManifest(t, podManifest, fakeMapper, fakeClient)
+
+       // Set up the waiter with the fake client and custom status readers
+       c.Waiter = &statusWaiter{
+               client:     fakeClient,
+               restMapper: fakeMapper,
+               readers:    statusReaders,
+       }
 
        resources, err := c.Build(strings.NewReader(podManifest), false)
        require.NoError(t, err)
@@ -2271,9 +2285,19 @@
                },
        }
 
-       var err error
-       c.Waiter, err = c.GetWaiterWithOptions(StatusWatcherStrategy, 
WithKStatusReaders(statusReaders...))
-       require.NoError(t, err)
+       // Create a fake dynamic client with the job resource
+       fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+       fakeMapper := 
testutil.NewFakeRESTMapper(batchv1.SchemeGroupVersion.WithKind("Job"))
+
+       // Create the job in the fake client
+       createManifest(t, jobManifest, fakeMapper, fakeClient)
+
+       // Set up the waiter with the fake client and custom status readers
+       c.Waiter = &statusWaiter{
+               client:     fakeClient,
+               restMapper: fakeMapper,
+               readers:    statusReaders,
+       }
 
        resources, err := c.Build(strings.NewReader(jobManifest), false)
        require.NoError(t, err)
@@ -2283,3 +2307,18 @@
        err = c.WaitWithJobs(resources, time.Second*3)
        require.NoError(t, err)
 }
+
+func createManifest(t *testing.T, manifest string,
+       fakeMapper meta.RESTMapper, fakeClient *dynamicfake.FakeDynamicClient) {
+       t.Helper()
+
+       m := make(map[string]any)
+       err := yaml.Unmarshal([]byte(manifest), &m)
+       require.NoError(t, err)
+       obj := &unstructured.Unstructured{Object: m}
+       gvk := obj.GroupVersionKind()
+       mapping, err := fakeMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+       require.NoError(t, err)
+       err = fakeClient.Tracker().Create(mapping.Resource, obj, 
obj.GetNamespace())
+       require.NoError(t, err)
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/options.go 
new/helm-4.1.1/pkg/kube/options.go
--- old/helm-4.1.0/pkg/kube/options.go  2026-01-12 19:39:57.000000000 +0100
+++ new/helm-4.1.1/pkg/kube/options.go  2026-01-29 19:23:46.000000000 +0100
@@ -26,12 +26,45 @@
 type WaitOption func(*waitOptions)
 
 // WithWaitContext sets the context for waiting on resources.
+// If unset, context.Background() will be used.
 func WithWaitContext(ctx context.Context) WaitOption {
        return func(wo *waitOptions) {
                wo.ctx = ctx
        }
 }
 
+// WithWatchUntilReadyMethodContext sets the context specifically for the 
WatchUntilReady method.
+// If unset, the context set by `WithWaitContext` will be used (falling back 
to `context.Background()`).
+func WithWatchUntilReadyMethodContext(ctx context.Context) WaitOption {
+       return func(wo *waitOptions) {
+               wo.watchUntilReadyCtx = ctx
+       }
+}
+
+// WithWaitMethodContext sets the context specifically for the Wait method.
+// If unset, the context set by `WithWaitContext` will be used (falling back 
to `context.Background()`).
+func WithWaitMethodContext(ctx context.Context) WaitOption {
+       return func(wo *waitOptions) {
+               wo.waitCtx = ctx
+       }
+}
+
+// WithWaitWithJobsMethodContext sets the context specifically for the 
WaitWithJobs method.
+// If unset, the context set by `WithWaitContext` will be used (falling back 
to `context.Background()`).
+func WithWaitWithJobsMethodContext(ctx context.Context) WaitOption {
+       return func(wo *waitOptions) {
+               wo.waitWithJobsCtx = ctx
+       }
+}
+
+// WithWaitForDeleteMethodContext sets the context specifically for the 
WaitForDelete method.
+// If unset, the context set by `WithWaitContext` will be used (falling back 
to `context.Background()`).
+func WithWaitForDeleteMethodContext(ctx context.Context) WaitOption {
+       return func(wo *waitOptions) {
+               wo.waitForDeleteCtx = ctx
+       }
+}
+
 // WithKStatusReaders sets the status readers to be used while waiting on 
resources.
 func WithKStatusReaders(readers ...engine.StatusReader) WaitOption {
        return func(wo *waitOptions) {
@@ -40,6 +73,10 @@
 }
 
 type waitOptions struct {
-       ctx           context.Context
-       statusReaders []engine.StatusReader
+       ctx                context.Context
+       watchUntilReadyCtx context.Context
+       waitCtx            context.Context
+       waitWithJobsCtx    context.Context
+       waitForDeleteCtx   context.Context
+       statusReaders      []engine.StatusReader
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/resource.go 
new/helm-4.1.1/pkg/kube/resource.go
--- old/helm-4.1.0/pkg/kube/resource.go 2026-01-12 19:39:57.000000000 +0100
+++ new/helm-4.1.1/pkg/kube/resource.go 2026-01-29 19:23:46.000000000 +0100
@@ -79,7 +79,14 @@
        return r.Filter(rs.Contains)
 }
 
-// isMatchingInfo returns true if infos match on Name and GroupVersionKind.
+// isMatchingInfo returns true if infos match on Name, Namespace, Group and 
Kind.
+//
+// IMPORTANT: Version is intentionally excluded from the comparison. Resources
+// served by the same CRD at different API versions (e.g. v2beta1 vs v2beta2)
+// share the same underlying storage in the Kubernetes API server. Comparing
+// the full GroupVersionKind causes Difference() to treat a version change as
+// a resource removal + addition, which makes Helm delete the resource it just
+// created during upgrades. See https://github.com/helm/helm/issues/31768
 func isMatchingInfo(a, b *resource.Info) bool {
-       return a.Name == b.Name && a.Namespace == b.Namespace && 
a.Mapping.GroupVersionKind == b.Mapping.GroupVersionKind
+       return a.Name == b.Name && a.Namespace == b.Namespace && 
a.Mapping.GroupVersionKind.GroupKind() == b.Mapping.GroupVersionKind.GroupKind()
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/resource_test.go 
new/helm-4.1.1/pkg/kube/resource_test.go
--- old/helm-4.1.0/pkg/kube/resource_test.go    2026-01-12 19:39:57.000000000 
+0100
+++ new/helm-4.1.1/pkg/kube/resource_test.go    2026-01-29 19:23:46.000000000 
+0100
@@ -72,8 +72,8 @@
 
        gvkDiffVersion := schema.GroupVersionKind{Group: "group1", Version: 
"diff", Kind: "pod"}
        resourceInfoDiffVersion := resource.Info{Name: "name1", Namespace: 
"namespace1", Mapping: &meta.RESTMapping{GroupVersionKind: gvkDiffVersion}}
-       if isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) {
-               t.Error("expected resources not equal")
+       if !isMatchingInfo(&resourceInfo, &resourceInfoDiffVersion) {
+               t.Error("expected resources with different versions but same 
group and kind to be equal")
        }
 
        gvkDiffKind := schema.GroupVersionKind{Group: "group1", Version: 
"version1", Kind: "deployment"}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/statuswait.go 
new/helm-4.1.1/pkg/kube/statuswait.go
--- old/helm-4.1.0/pkg/kube/statuswait.go       2026-01-12 19:39:57.000000000 
+0100
+++ new/helm-4.1.1/pkg/kube/statuswait.go       2026-01-29 19:23:46.000000000 
+0100
@@ -42,10 +42,14 @@
 )
 
 type statusWaiter struct {
-       client     dynamic.Interface
-       restMapper meta.RESTMapper
-       ctx        context.Context
-       readers    []engine.StatusReader
+       client             dynamic.Interface
+       restMapper         meta.RESTMapper
+       ctx                context.Context
+       watchUntilReadyCtx context.Context
+       waitCtx            context.Context
+       waitWithJobsCtx    context.Context
+       waitForDeleteCtx   context.Context
+       readers            []engine.StatusReader
 }
 
 // DefaultStatusWatcherTimeout is the timeout used by the status waiter when a
@@ -66,7 +70,7 @@
        if timeout == 0 {
                timeout = DefaultStatusWatcherTimeout
        }
-       ctx, cancel := w.contextWithTimeout(timeout)
+       ctx, cancel := w.contextWithTimeout(w.watchUntilReadyCtx, timeout)
        defer cancel()
        slog.Debug("waiting for resources", "count", len(resourceList), 
"timeout", timeout)
        sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
@@ -88,7 +92,7 @@
        if timeout == 0 {
                timeout = DefaultStatusWatcherTimeout
        }
-       ctx, cancel := w.contextWithTimeout(timeout)
+       ctx, cancel := w.contextWithTimeout(w.waitCtx, timeout)
        defer cancel()
        slog.Debug("waiting for resources", "count", len(resourceList), 
"timeout", timeout)
        sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
@@ -100,7 +104,7 @@
        if timeout == 0 {
                timeout = DefaultStatusWatcherTimeout
        }
-       ctx, cancel := w.contextWithTimeout(timeout)
+       ctx, cancel := w.contextWithTimeout(w.waitWithJobsCtx, timeout)
        defer cancel()
        slog.Debug("waiting for resources", "count", len(resourceList), 
"timeout", timeout)
        sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
@@ -116,7 +120,7 @@
        if timeout == 0 {
                timeout = DefaultStatusWatcherTimeout
        }
-       ctx, cancel := w.contextWithTimeout(timeout)
+       ctx, cancel := w.contextWithTimeout(w.waitForDeleteCtx, timeout)
        defer cancel()
        slog.Debug("waiting for resources to be deleted", "count", 
len(resourceList), "timeout", timeout)
        sw := watcher.NewDefaultStatusWatcher(w.client, w.restMapper)
@@ -145,17 +149,19 @@
                return statusCollector.Error
        }
 
-       // Only check parent context error, otherwise we would error when 
desired status is achieved.
-       if ctx.Err() != nil {
-               errs := []error{}
-               for _, id := range resources {
-                       rs := statusCollector.ResourceStatuses[id]
-                       if rs.Status == status.NotFoundStatus {
-                               continue
-                       }
-                       errs = append(errs, fmt.Errorf("resource still exists, 
name: %s, kind: %s, status: %s", rs.Identifier.Name, 
rs.Identifier.GroupKind.Kind, rs.Status))
+       errs := []error{}
+       for _, id := range resources {
+               rs := statusCollector.ResourceStatuses[id]
+               if rs.Status == status.NotFoundStatus || rs.Status == 
status.UnknownStatus {
+                       continue
                }
-               errs = append(errs, ctx.Err())
+               errs = append(errs, fmt.Errorf("resource %s/%s/%s still exists. 
status: %s, message: %s",
+                       rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, 
rs.Identifier.Name, rs.Status, rs.Message))
+       }
+       if err := ctx.Err(); err != nil {
+               errs = append(errs, err)
+       }
+       if len(errs) > 0 {
                return errors.Join(errs...)
        }
        return nil
@@ -190,24 +196,29 @@
                return statusCollector.Error
        }
 
-       // Only check parent context error, otherwise we would error when 
desired status is achieved.
-       if ctx.Err() != nil {
-               errs := []error{}
-               for _, id := range resources {
-                       rs := statusCollector.ResourceStatuses[id]
-                       if rs.Status == status.CurrentStatus {
-                               continue
-                       }
-                       errs = append(errs, fmt.Errorf("resource not ready, 
name: %s, kind: %s, status: %s", rs.Identifier.Name, 
rs.Identifier.GroupKind.Kind, rs.Status))
+       errs := []error{}
+       for _, id := range resources {
+               rs := statusCollector.ResourceStatuses[id]
+               if rs.Status == status.CurrentStatus {
+                       continue
                }
-               errs = append(errs, ctx.Err())
+               errs = append(errs, fmt.Errorf("resource %s/%s/%s not ready. 
status: %s, message: %s",
+                       rs.Identifier.GroupKind.Kind, rs.Identifier.Namespace, 
rs.Identifier.Name, rs.Status, rs.Message))
+       }
+       if err := ctx.Err(); err != nil {
+               errs = append(errs, err)
+       }
+       if len(errs) > 0 {
                return errors.Join(errs...)
        }
        return nil
 }
 
-func (w *statusWaiter) contextWithTimeout(timeout time.Duration) 
(context.Context, context.CancelFunc) {
-       return contextWithTimeout(w.ctx, timeout)
+func (w *statusWaiter) contextWithTimeout(methodCtx context.Context, timeout 
time.Duration) (context.Context, context.CancelFunc) {
+       if methodCtx == nil {
+               methodCtx = w.ctx
+       }
+       return contextWithTimeout(methodCtx, timeout)
 }
 
 func contextWithTimeout(ctx context.Context, timeout time.Duration) 
(context.Context, context.CancelFunc) {
@@ -225,11 +236,16 @@
                        if rs == nil {
                                continue
                        }
-                       // If a resource is already deleted before waiting has 
started, it will show as unknown
-                       // this check ensures we don't wait forever for a 
resource that is already deleted
+                       // If a resource is already deleted before waiting has 
started, it will show as unknown.
+                       // This check ensures we don't wait forever for a 
resource that is already deleted.
                        if rs.Status == status.UnknownStatus && desired == 
status.NotFoundStatus {
                                continue
                        }
+                       // Failed is a terminal state. This check ensures we 
don't wait forever for a resource
+                       // that has already failed, as intervention is required 
to resolve the failure.
+                       if rs.Status == status.FailedStatus && desired == 
status.CurrentStatus {
+                               continue
+                       }
                        rss = append(rss, rs)
                        if rs.Status != desired {
                                nonDesiredResources = 
append(nonDesiredResources, rs)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/helm-4.1.0/pkg/kube/statuswait_test.go 
new/helm-4.1.1/pkg/kube/statuswait_test.go
--- old/helm-4.1.0/pkg/kube/statuswait_test.go  2026-01-12 19:39:57.000000000 
+0100
+++ new/helm-4.1.1/pkg/kube/statuswait_test.go  2026-01-29 19:23:46.000000000 
+0100
@@ -101,10 +101,27 @@
    succeeded: 1
    active: 0
    conditions:
-    - type: Complete 
+    - type: Complete
       status: "True"
 `
 
+var jobFailedManifest = `
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: failed-job
+  namespace: default
+  generation: 1
+status:
+  failed: 1
+  active: 0
+  conditions:
+  - type: Failed
+    status: "True"
+    reason: BackoffLimitExceeded
+    message: "Job has reached the specified backoff limit"
+`
+
 var podCompleteManifest = `
 apiVersion: v1
 kind: Pod
@@ -279,7 +296,7 @@
                name              string
                manifestsToCreate []string
                manifestsToDelete []string
-               expectErrs        []error
+               expectErrs        []string
        }{
                {
                        name:              "wait for pod to be deleted",
@@ -291,7 +308,7 @@
                        name:              "error when not all objects are 
deleted",
                        manifestsToCreate: []string{jobCompleteManifest, 
podCurrentManifest},
                        manifestsToDelete: []string{jobCompleteManifest},
-                       expectErrs:        []error{errors.New("resource still 
exists, name: current-pod, kind: Pod, status: Current"), errors.New("context 
deadline exceeded")},
+                       expectErrs:        []string{"resource 
Pod/ns/current-pod still exists. status: Current", "context deadline exceeded"},
                },
        }
        for _, tt := range tests {
@@ -329,7 +346,10 @@
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objsToCreate)
                        err := statusWaiter.WaitForDelete(resourceList, timeout)
                        if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrs {
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -359,37 +379,35 @@
 func TestStatusWait(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               expectErrs   []error
-               waitForJobs  bool
+               name          string
+               objManifests  []string
+               expectErrStrs []string
+               waitForJobs   bool
        }{
                {
-                       name:         "Job is not complete",
-                       objManifests: []string{jobNoStatusManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: test, kind: Job, status: InProgress"), errors.New("context deadline 
exceeded")},
-                       waitForJobs:  true,
+                       name:          "Job is not complete",
+                       objManifests:  []string{jobNoStatusManifest},
+                       expectErrStrs: []string{"resource Job/qual/test not 
ready. status: InProgress", "context deadline exceeded"},
+                       waitForJobs:   true,
                },
                {
-                       name:         "Job is ready but not complete",
-                       objManifests: []string{jobReadyManifest},
-                       expectErrs:   nil,
-                       waitForJobs:  false,
+                       name:          "Job is ready but not complete",
+                       objManifests:  []string{jobReadyManifest},
+                       expectErrStrs: nil,
+                       waitForJobs:   false,
                },
                {
                        name:         "Pod is ready",
                        objManifests: []string{podCurrentManifest},
-                       expectErrs:   nil,
                },
                {
-                       name:         "one of the pods never becomes ready",
-                       objManifests: []string{podNoStatusManifest, 
podCurrentManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: in-progress-pod, kind: Pod, status: InProgress"), errors.New("context 
deadline exceeded")},
+                       name:          "one of the pods never becomes ready",
+                       objManifests:  []string{podNoStatusManifest, 
podCurrentManifest},
+                       expectErrStrs: []string{"resource 
Pod/ns/in-progress-pod not ready. status: InProgress", "context deadline 
exceeded"},
                },
                {
                        name:         "paused deployment passes",
                        objManifests: []string{pausedDeploymentManifest},
-                       expectErrs:   nil,
                },
        }
 
@@ -416,8 +434,11 @@
                        }
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := statusWaiter.Wait(resourceList, time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -428,23 +449,23 @@
 func TestWaitForJobComplete(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               expectErrs   []error
+               name          string
+               objManifests  []string
+               expectErrStrs []string
        }{
                {
                        name:         "Job is complete",
                        objManifests: []string{jobCompleteManifest},
                },
                {
-                       name:         "Job is not ready",
-                       objManifests: []string{jobNoStatusManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: test, kind: Job, status: InProgress"), errors.New("context deadline 
exceeded")},
+                       name:          "Job is not ready",
+                       objManifests:  []string{jobNoStatusManifest},
+                       expectErrStrs: []string{"resource Job/qual/test not 
ready. status: InProgress", "context deadline exceeded"},
                },
                {
-                       name:         "Job is ready but not complete",
-                       objManifests: []string{jobReadyManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context 
deadline exceeded")},
+                       name:          "Job is ready but not complete",
+                       objManifests:  []string{jobReadyManifest},
+                       expectErrStrs: []string{"resource 
Job/default/ready-not-complete not ready. status: InProgress", "context 
deadline exceeded"},
                },
        }
 
@@ -469,8 +490,11 @@
                        }
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := statusWaiter.WaitWithJobs(resourceList, 
time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -481,9 +505,9 @@
 func TestWatchForReady(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               expectErrs   []error
+               name          string
+               objManifests  []string
+               expectErrStrs []string
        }{
                {
                        name:         "succeeds if pod and job are complete",
@@ -494,14 +518,14 @@
                        objManifests: []string{notReadyDeploymentManifest},
                },
                {
-                       name:         "Fails if job is not complete",
-                       objManifests: []string{jobReadyManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: ready-not-complete, kind: Job, status: InProgress"), errors.New("context 
deadline exceeded")},
+                       name:          "Fails if job is not complete",
+                       objManifests:  []string{jobReadyManifest},
+                       expectErrStrs: []string{"resource 
Job/default/ready-not-complete not ready. status: InProgress", "context 
deadline exceeded"},
                },
                {
-                       name:         "Fails if pod is not complete",
-                       objManifests: []string{podCurrentManifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: current-pod, kind: Pod, status: InProgress"), errors.New("context 
deadline exceeded")},
+                       name:          "Fails if pod is not complete",
+                       objManifests:  []string{podCurrentManifest},
+                       expectErrStrs: []string{"resource Pod/ns/current-pod 
not ready. status: InProgress", "context deadline exceeded"},
                },
        }
 
@@ -528,8 +552,11 @@
                        }
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := statusWaiter.WatchUntilReady(resourceList, 
time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -540,10 +567,10 @@
 func TestStatusWaitMultipleNamespaces(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               expectErrs   []error
-               testFunc     func(statusWaiter, ResourceList, time.Duration) 
error
+               name          string
+               objManifests  []string
+               expectErrStrs []string
+               testFunc      func(statusWaiter, ResourceList, time.Duration) 
error
        }{
                {
                        name:         "pods in multiple namespaces",
@@ -560,9 +587,9 @@
                        },
                },
                {
-                       name:         "error when resource not ready in one 
namespace",
-                       objManifests: []string{podNamespace1NoStatusManifest, 
podNamespace2Manifest},
-                       expectErrs:   []error{errors.New("resource not ready, 
name: pod-ns1, kind: Pod, status: InProgress"), errors.New("context deadline 
exceeded")},
+                       name:          "error when resource not ready in one 
namespace",
+                       objManifests:  []string{podNamespace1NoStatusManifest, 
podNamespace2Manifest},
+                       expectErrStrs: []string{"resource 
Pod/namespace-1/pod-ns1 not ready. status: InProgress", "context deadline 
exceeded"},
                        testFunc: func(sw statusWaiter, rl ResourceList, 
timeout time.Duration) error {
                                return sw.Wait(rl, timeout)
                        },
@@ -642,8 +669,11 @@
 
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := tt.testFunc(sw, resourceList, time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -978,10 +1008,10 @@
 func TestStatusWaitWithCustomReaders(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               customReader *mockStatusReader
-               expectErrs   []error
+               name          string
+               objManifests  []string
+               customReader  *mockStatusReader
+               expectErrStrs []string
        }{
                {
                        name:         "custom reader makes pod immediately 
current",
@@ -990,7 +1020,6 @@
                                supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
                                status:      status.CurrentStatus,
                        },
-                       expectErrs: nil,
                },
                {
                        name:         "custom reader returns in-progress 
status",
@@ -999,7 +1028,7 @@
                                supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: []error{errors.New("resource not ready, 
name: current-pod, kind: Pod, status: InProgress"), errors.New("context 
deadline exceeded")},
+                       expectErrStrs: []string{"resource Pod/ns/current-pod 
not ready. status: InProgress", "context deadline exceeded"},
                },
                {
                        name:         "custom reader for different resource 
type is not used",
@@ -1008,7 +1037,6 @@
                                supportedGK: 
batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: nil,
                },
        }
 
@@ -1035,8 +1063,11 @@
                        }
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := statusWaiter.Wait(resourceList, time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)
@@ -1113,13 +1144,584 @@
        }
 }
 
+func TestStatusWaitWithFailedResources(t *testing.T) {
+       t.Parallel()
+       tests := []struct {
+               name          string
+               objManifests  []string
+               customReader  *mockStatusReader
+               expectErrStrs []string
+               testFunc      func(statusWaiter, ResourceList, time.Duration) 
error
+       }{
+               {
+                       name:         "Wait returns error when resource has 
failed",
+                       objManifests: []string{podNoStatusManifest},
+                       customReader: &mockStatusReader{
+                               supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+                               status:      status.FailedStatus,
+                       },
+                       expectErrStrs: []string{"resource 
Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"},
+                       testFunc: func(sw statusWaiter, rl ResourceList, 
timeout time.Duration) error {
+                               return sw.Wait(rl, timeout)
+                       },
+               },
+               {
+                       name:         "WaitWithJobs returns error when job has 
failed",
+                       objManifests: []string{jobFailedManifest},
+                       customReader: nil, // Use the built-in job status reader
+                       expectErrStrs: []string{
+                               "resource Job/default/failed-job not ready. 
status: Failed",
+                       },
+                       testFunc: func(sw statusWaiter, rl ResourceList, 
timeout time.Duration) error {
+                               return sw.WaitWithJobs(rl, timeout)
+                       },
+               },
+               {
+                       name:         "Wait returns errors when multiple 
resources fail",
+                       objManifests: []string{podNoStatusManifest, 
podCurrentManifest},
+                       customReader: &mockStatusReader{
+                               supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+                               status:      status.FailedStatus,
+                       },
+                       // The mock reader will make both pods return 
FailedStatus
+                       expectErrStrs: []string{
+                               "resource Pod/ns/in-progress-pod not ready. 
status: Failed, message: mock status reader",
+                               "resource Pod/ns/current-pod not ready. status: 
Failed, message: mock status reader",
+                       },
+                       testFunc: func(sw statusWaiter, rl ResourceList, 
timeout time.Duration) error {
+                               return sw.Wait(rl, timeout)
+                       },
+               },
+               {
+                       name:         "WatchUntilReady returns error when 
resource has failed",
+                       objManifests: []string{podNoStatusManifest},
+                       customReader: &mockStatusReader{
+                               supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
+                               status:      status.FailedStatus,
+                       },
+                       // WatchUntilReady also waits for CurrentStatus, so 
failed resources should return error
+                       expectErrStrs: []string{"resource 
Pod/ns/in-progress-pod not ready. status: Failed, message: mock status reader"},
+                       testFunc: func(sw statusWaiter, rl ResourceList, 
timeout time.Duration) error {
+                               return sw.WatchUntilReady(rl, timeout)
+                       },
+               },
+       }
+
+       for _, tt := range tests {
+               t.Run(tt.name, func(t *testing.T) {
+                       t.Parallel()
+                       c := newTestClient(t)
+                       fakeClient := 
dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+                       fakeMapper := testutil.NewFakeRESTMapper(
+                               v1.SchemeGroupVersion.WithKind("Pod"),
+                               batchv1.SchemeGroupVersion.WithKind("Job"),
+                       )
+                       var readers []engine.StatusReader
+                       if tt.customReader != nil {
+                               readers = []engine.StatusReader{tt.customReader}
+                       }
+                       sw := statusWaiter{
+                               client:     fakeClient,
+                               restMapper: fakeMapper,
+                               readers:    readers,
+                       }
+                       objs := getRuntimeObjFromManifests(t, tt.objManifests)
+                       for _, obj := range objs {
+                               u := obj.(*unstructured.Unstructured)
+                               gvr := getGVR(t, fakeMapper, u)
+                               err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                               assert.NoError(t, err)
+                       }
+                       resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
+                       err := tt.testFunc(sw, resourceList, time.Second*3)
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
+                               return
+                       }
+                       assert.NoError(t, err)
+               })
+       }
+}
+
+func TestWaitOptionFunctions(t *testing.T) {
+       t.Parallel()
+
+       t.Run("WithWatchUntilReadyMethodContext sets watchUntilReadyCtx", 
func(t *testing.T) {
+               t.Parallel()
+               type contextKey struct{}
+               ctx := context.WithValue(context.Background(), contextKey{}, 
"test")
+               opts := &waitOptions{}
+               WithWatchUntilReadyMethodContext(ctx)(opts)
+               assert.Equal(t, ctx, opts.watchUntilReadyCtx)
+       })
+
+       t.Run("WithWaitMethodContext sets waitCtx", func(t *testing.T) {
+               t.Parallel()
+               type contextKey struct{}
+               ctx := context.WithValue(context.Background(), contextKey{}, 
"test")
+               opts := &waitOptions{}
+               WithWaitMethodContext(ctx)(opts)
+               assert.Equal(t, ctx, opts.waitCtx)
+       })
+
+       t.Run("WithWaitWithJobsMethodContext sets waitWithJobsCtx", func(t 
*testing.T) {
+               t.Parallel()
+               type contextKey struct{}
+               ctx := context.WithValue(context.Background(), contextKey{}, 
"test")
+               opts := &waitOptions{}
+               WithWaitWithJobsMethodContext(ctx)(opts)
+               assert.Equal(t, ctx, opts.waitWithJobsCtx)
+       })
+
+       t.Run("WithWaitForDeleteMethodContext sets waitForDeleteCtx", func(t 
*testing.T) {
+               t.Parallel()
+               type contextKey struct{}
+               ctx := context.WithValue(context.Background(), contextKey{}, 
"test")
+               opts := &waitOptions{}
+               WithWaitForDeleteMethodContext(ctx)(opts)
+               assert.Equal(t, ctx, opts.waitForDeleteCtx)
+       })
+}
+
+func TestMethodSpecificContextCancellation(t *testing.T) {
+       t.Parallel()
+
+       t.Run("WatchUntilReady uses method-specific context", func(t 
*testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled method-specific context
+               methodCtx, methodCancel := 
context.WithCancel(context.Background())
+               methodCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:             fakeClient,
+                       restMapper:         fakeMapper,
+                       ctx:                context.Background(), // General 
context is not cancelled
+                       watchUntilReadyCtx: methodCtx,            // Method 
context is cancelled
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WatchUntilReady(resourceList, time.Second*3)
+               // Should fail due to cancelled method context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("Wait uses method-specific context", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled method-specific context
+               methodCtx, methodCancel := 
context.WithCancel(context.Background())
+               methodCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:     fakeClient,
+                       restMapper: fakeMapper,
+                       ctx:        context.Background(), // General context is 
not cancelled
+                       waitCtx:    methodCtx,            // Method context is 
cancelled
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.Wait(resourceList, time.Second*3)
+               // Should fail due to cancelled method context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("WaitWithJobs uses method-specific context", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       batchv1.SchemeGroupVersion.WithKind("Job"),
+               )
+
+               // Create a cancelled method-specific context
+               methodCtx, methodCancel := 
context.WithCancel(context.Background())
+               methodCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:          fakeClient,
+                       restMapper:      fakeMapper,
+                       ctx:             context.Background(), // General 
context is not cancelled
+                       waitWithJobsCtx: methodCtx,            // Method 
context is cancelled
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{jobCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WaitWithJobs(resourceList, time.Second*3)
+               // Should fail due to cancelled method context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("WaitForDelete uses method-specific context", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled method-specific context
+               methodCtx, methodCancel := 
context.WithCancel(context.Background())
+               methodCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:           fakeClient,
+                       restMapper:       fakeMapper,
+                       ctx:              context.Background(), // General 
context is not cancelled
+                       waitForDeleteCtx: methodCtx,            // Method 
context is cancelled
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WaitForDelete(resourceList, time.Second*3)
+               // Should fail due to cancelled method context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+}
+
+func TestMethodContextFallbackToGeneralContext(t *testing.T) {
+       t.Parallel()
+
+       t.Run("WatchUntilReady falls back to general context when method 
context is nil", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled general context
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:             fakeClient,
+                       restMapper:         fakeMapper,
+                       ctx:                generalCtx, // General context is 
cancelled
+                       watchUntilReadyCtx: nil,        // Method context is 
nil, should fall back
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WatchUntilReady(resourceList, time.Second*3)
+               // Should fail due to cancelled general context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("Wait falls back to general context when method context is nil", 
func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled general context
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:     fakeClient,
+                       restMapper: fakeMapper,
+                       ctx:        generalCtx, // General context is cancelled
+                       waitCtx:    nil,        // Method context is nil, 
should fall back
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.Wait(resourceList, time.Second*3)
+               // Should fail due to cancelled general context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("WaitWithJobs falls back to general context when method context 
is nil", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       batchv1.SchemeGroupVersion.WithKind("Job"),
+               )
+
+               // Create a cancelled general context
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:          fakeClient,
+                       restMapper:      fakeMapper,
+                       ctx:             generalCtx, // General context is 
cancelled
+                       waitWithJobsCtx: nil,        // Method context is nil, 
should fall back
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{jobCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WaitWithJobs(resourceList, time.Second*3)
+               // Should fail due to cancelled general context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+
+       t.Run("WaitForDelete falls back to general context when method context 
is nil", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // Create a cancelled general context
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel() // Cancel immediately
+
+               sw := statusWaiter{
+                       client:           fakeClient,
+                       restMapper:       fakeMapper,
+                       ctx:              generalCtx, // General context is 
cancelled
+                       waitForDeleteCtx: nil,        // Method context is nil, 
should fall back
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WaitForDelete(resourceList, time.Second*3)
+               // Should fail due to cancelled general context
+               require.Error(t, err)
+               assert.Contains(t, err.Error(), "context canceled")
+       })
+}
+
+func TestMethodContextOverridesGeneralContext(t *testing.T) {
+       t.Parallel()
+
+       t.Run("method-specific context overrides general context for 
WatchUntilReady", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // General context is cancelled, but method context is not
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel()
+
+               sw := statusWaiter{
+                       client:             fakeClient,
+                       restMapper:         fakeMapper,
+                       ctx:                generalCtx,           // Cancelled
+                       watchUntilReadyCtx: context.Background(), // Not 
cancelled - should be used
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WatchUntilReady(resourceList, time.Second*3)
+               // Should succeed because method context is used and it's not 
cancelled
+               assert.NoError(t, err)
+       })
+
+       t.Run("method-specific context overrides general context for Wait", 
func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // General context is cancelled, but method context is not
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel()
+
+               sw := statusWaiter{
+                       client:     fakeClient,
+                       restMapper: fakeMapper,
+                       ctx:        generalCtx,           // Cancelled
+                       waitCtx:    context.Background(), // Not cancelled - 
should be used
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.Wait(resourceList, time.Second*3)
+               // Should succeed because method context is used and it's not 
cancelled
+               assert.NoError(t, err)
+       })
+
+       t.Run("method-specific context overrides general context for 
WaitWithJobs", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       batchv1.SchemeGroupVersion.WithKind("Job"),
+               )
+
+               // General context is cancelled, but method context is not
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel()
+
+               sw := statusWaiter{
+                       client:          fakeClient,
+                       restMapper:      fakeMapper,
+                       ctx:             generalCtx,           // Cancelled
+                       waitWithJobsCtx: context.Background(), // Not cancelled 
- should be used
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{jobCompleteManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+
+               err := sw.WaitWithJobs(resourceList, time.Second*3)
+               // Should succeed because method context is used and it's not 
cancelled
+               assert.NoError(t, err)
+       })
+
+       t.Run("method-specific context overrides general context for 
WaitForDelete", func(t *testing.T) {
+               t.Parallel()
+               c := newTestClient(t)
+               timeout := time.Second
+               timeUntilPodDelete := time.Millisecond * 500
+               fakeClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme)
+               fakeMapper := testutil.NewFakeRESTMapper(
+                       v1.SchemeGroupVersion.WithKind("Pod"),
+               )
+
+               // General context is cancelled, but method context is not
+               generalCtx, generalCancel := 
context.WithCancel(context.Background())
+               generalCancel()
+
+               sw := statusWaiter{
+                       client:           fakeClient,
+                       restMapper:       fakeMapper,
+                       ctx:              generalCtx,           // Cancelled
+                       waitForDeleteCtx: context.Background(), // Not 
cancelled - should be used
+               }
+
+               objs := getRuntimeObjFromManifests(t, 
[]string{podCurrentManifest})
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       err := fakeClient.Tracker().Create(gvr, u, 
u.GetNamespace())
+                       require.NoError(t, err)
+               }
+
+               // Schedule deletion
+               for _, obj := range objs {
+                       u := obj.(*unstructured.Unstructured)
+                       gvr := getGVR(t, fakeMapper, u)
+                       go func(gvr schema.GroupVersionResource, u 
*unstructured.Unstructured) {
+                               time.Sleep(timeUntilPodDelete)
+                               err := fakeClient.Tracker().Delete(gvr, 
u.GetNamespace(), u.GetName())
+                               assert.NoError(t, err)
+                       }(gvr, u)
+               }
+
+               resourceList := getResourceListFromRuntimeObjs(t, c, objs)
+               err := sw.WaitForDelete(resourceList, timeout)
+               // Should succeed because method context is used and it's not 
cancelled
+               assert.NoError(t, err)
+       })
+}
+
 func TestWatchUntilReadyWithCustomReaders(t *testing.T) {
        t.Parallel()
        tests := []struct {
-               name         string
-               objManifests []string
-               customReader *mockStatusReader
-               expectErrs   []error
+               name          string
+               objManifests  []string
+               customReader  *mockStatusReader
+               expectErrStrs []string
        }{
                {
                        name:         "custom reader makes job immediately 
current for hooks",
@@ -1128,7 +1730,6 @@
                                supportedGK: 
batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
                                status:      status.CurrentStatus,
                        },
-                       expectErrs: nil,
                },
                {
                        name:         "custom reader makes pod immediately 
current for hooks",
@@ -1137,7 +1738,6 @@
                                supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
                                status:      status.CurrentStatus,
                        },
-                       expectErrs: nil,
                },
                {
                        name:         "custom reader takes precedence over 
built-in pod reader",
@@ -1146,7 +1746,7 @@
                                supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: []error{errors.New("resource not ready, 
name: good-pod, kind: Pod, status: InProgress"), errors.New("context deadline 
exceeded")},
+                       expectErrStrs: []string{"resource Pod/ns/good-pod not 
ready. status: InProgress", "context deadline exceeded"},
                },
                {
                        name:         "custom reader takes precedence over 
built-in job reader",
@@ -1155,7 +1755,7 @@
                                supportedGK: 
batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: []error{errors.New("resource not ready, 
name: test, kind: Job, status: InProgress"), errors.New("context deadline 
exceeded")},
+                       expectErrStrs: []string{"resource Job/qual/test not 
ready. status: InProgress", "context deadline exceeded"},
                },
                {
                        name:         "custom reader for different resource 
type does not affect pods",
@@ -1164,7 +1764,6 @@
                                supportedGK: 
batchv1.SchemeGroupVersion.WithKind("Job").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: nil,
                },
                {
                        name:         "built-in readers still work when custom 
reader does not match",
@@ -1173,7 +1772,6 @@
                                supportedGK: 
v1.SchemeGroupVersion.WithKind("Pod").GroupKind(),
                                status:      status.InProgressStatus,
                        },
-                       expectErrs: nil,
                },
        }
 
@@ -1200,8 +1798,11 @@
                        }
                        resourceList := getResourceListFromRuntimeObjs(t, c, 
objs)
                        err := statusWaiter.WatchUntilReady(resourceList, 
time.Second*3)
-                       if tt.expectErrs != nil {
-                               assert.EqualError(t, err, 
errors.Join(tt.expectErrs...).Error())
+                       if tt.expectErrStrs != nil {
+                               require.Error(t, err)
+                               for _, expectedErrStr := range tt.expectErrStrs 
{
+                                       assert.Contains(t, err.Error(), 
expectedErrStr)
+                               }
                                return
                        }
                        assert.NoError(t, err)

++++++ helm.obsinfo ++++++
--- /var/tmp/diff_new_pack.dOH9BW/_old  2026-02-11 18:50:13.484847539 +0100
+++ /var/tmp/diff_new_pack.dOH9BW/_new  2026-02-11 18:50:13.504848379 +0100
@@ -1,5 +1,5 @@
 name: helm
-version: 4.1.0
-mtime: 1768243197
-commit: 4553a0a96e5205595079b6757236cc6f969ed1b9
+version: 4.1.1
+mtime: 1769711026
+commit: 5caf0044d4ef3d62a955440272999e139aafbbed
 

++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/helm/vendor.tar.gz 
/work/SRC/openSUSE:Factory/.helm.new.1670/vendor.tar.gz differ: char 100, line 1

Reply via email to