Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package istioctl for openSUSE:Factory 
checked in at 2026-01-21 14:15:16
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/istioctl (Old)
 and      /work/SRC/openSUSE:Factory/.istioctl.new.1928 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "istioctl"

Wed Jan 21 14:15:16 2026 rev:44 rq:1328290 version:1.28.3

Changes:
--------
--- /work/SRC/openSUSE:Factory/istioctl/istioctl.changes        2026-01-12 
10:34:27.665935793 +0100
+++ /work/SRC/openSUSE:Factory/.istioctl.new.1928/istioctl.changes      
2026-01-21 14:15:26.710099522 +0100
@@ -1,0 +2,7 @@
+Tue Jan 20 14:27:08 UTC 2026 - Johannes Kastl 
<[email protected]>
+
+- update to 1.28.3:
+  https://istio.io/latest/news/releases/1.28.x/announcing-1.28.3/
+  No istioctl-related changes in the changelog
+
+-------------------------------------------------------------------

Old:
----
  istioctl-1.28.2.obscpio

New:
----
  istioctl-1.28.3.obscpio

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ istioctl.spec ++++++
--- /var/tmp/diff_new_pack.HvRtS2/_old  2026-01-21 14:15:29.674223148 +0100
+++ /var/tmp/diff_new_pack.HvRtS2/_new  2026-01-21 14:15:29.674223148 +0100
@@ -17,7 +17,7 @@
 
 
 Name:           istioctl
-Version:        1.28.2
+Version:        1.28.3
 Release:        0
 Summary:        CLI for the istio servic mesh in Kubernetes
 License:        Apache-2.0

++++++ _service ++++++
--- /var/tmp/diff_new_pack.HvRtS2/_old  2026-01-21 14:15:29.718224983 +0100
+++ /var/tmp/diff_new_pack.HvRtS2/_new  2026-01-21 14:15:29.722225150 +0100
@@ -3,7 +3,7 @@
     <param name="url">https://github.com/istio/istio</param>
     <param name="scm">git</param>
     <param name="exclude">.git</param>
-    <param name="revision">1.28.2</param>
+    <param name="revision">1.28.3</param>
     <param name="versionformat">@PARENT_TAG@</param>
     <param name="changesgenerate">disable</param>
     <param name="filename">istioctl</param>

++++++ istioctl-1.28.2.obscpio -> istioctl-1.28.3.obscpio ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/Makefile.core.mk 
new/istioctl-1.28.3/Makefile.core.mk
--- old/istioctl-1.28.2/Makefile.core.mk        2025-12-17 21:08:26.000000000 
+0100
+++ new/istioctl-1.28.3/Makefile.core.mk        2026-01-14 11:04:34.000000000 
+0100
@@ -49,7 +49,7 @@
 export VERSION
 
 # Base version of Istio image to use
-BASE_VERSION ?= 1.28-2025-11-05T19-01-12
+BASE_VERSION ?= 1.28-2026-01-13T19-03-16
 ISTIO_BASE_REGISTRY ?= gcr.io/istio-release
 
 export GO111MODULE ?= on
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/docker/iptables.yaml 
new/istioctl-1.28.3/docker/iptables.yaml
--- old/istioctl-1.28.2/docker/iptables.yaml    2025-12-17 21:08:26.000000000 
+0100
+++ new/istioctl-1.28.3/docker/iptables.yaml    2026-01-14 11:04:34.000000000 
+0100
@@ -12,7 +12,7 @@
     - libnfnetlink
     - libmnl
     - libgcc
-    - nftables-slim
+    - nftables=1.1.1-r40
 archs:
   - x86_64
   - aarch64
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/istio.deps 
new/istioctl-1.28.3/istio.deps
--- old/istioctl-1.28.2/istio.deps      2025-12-17 21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/istio.deps      2026-01-14 11:04:34.000000000 +0100
@@ -4,13 +4,13 @@
     "name": "PROXY_REPO_SHA",
     "repoName": "proxy",
     "file": "",
-    "lastStableSHA": "0879e0055d1da524a89415acd456e230b27fba70"
+    "lastStableSHA": "af293e34216afc2d902062b596d313fa9e1d6804"
   },
   {
     "_comment": "",
     "name": "ZTUNNEL_REPO_SHA",
     "repoName": "ztunnel",
     "file": "",
-    "lastStableSHA": "6fb0ce72daf624bfda04dbe6f9585ad24cc54820"
+    "lastStableSHA": "8bda303b0cfac76d3ab99b7e1ed3def71082fc9f"
   }
 ]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/manifests/charts/gateway/templates/service.yaml 
new/istioctl-1.28.3/manifests/charts/gateway/templates/service.yaml
--- old/istioctl-1.28.2/manifests/charts/gateway/templates/service.yaml 
2025-12-17 21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/manifests/charts/gateway/templates/service.yaml 
2026-01-14 11:04:34.000000000 +0100
@@ -72,4 +72,7 @@
 {{- end }}
   selector:
     {{- include "gateway.selectorLabels" . | nindent 4 }}
+    {{- with .Values.service.selectorLabels }}
+    {{- toYaml . | nindent 4 }}
+    {{- end }}
 {{- end }}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/manifests/charts/gateway/values.schema.json 
new/istioctl-1.28.3/manifests/charts/gateway/values.schema.json
--- old/istioctl-1.28.2/manifests/charts/gateway/values.schema.json     
2025-12-17 21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/manifests/charts/gateway/values.schema.json     
2026-01-14 11:04:34.000000000 +0100
@@ -178,6 +178,12 @@
             "annotations": {
               "type": "object"
             },
+            "selectorLabels": {
+              "type": "object",
+              "additionalProperties": {
+                "type": "string"
+              }
+            },
             "externalTrafficPolicy": {
               "type": "string"
             },
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/manifests/charts/gateway/values.yaml 
new/istioctl-1.28.3/manifests/charts/gateway/values.yaml
--- old/istioctl-1.28.2/manifests/charts/gateway/values.yaml    2025-12-17 
21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/manifests/charts/gateway/values.yaml    2026-01-14 
11:04:34.000000000 +0100
@@ -44,6 +44,8 @@
     type: LoadBalancer
     # Set to a specific ClusterIP, or "" for automatic assignment
     clusterIP: ""
+    # Additional labels to add to the service selector
+    selectorLabels: {}
     ports:
     - name: status-port
       port: 15021
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/operator/pkg/helm/helm_test.go 
new/istioctl-1.28.3/operator/pkg/helm/helm_test.go
--- old/istioctl-1.28.2/operator/pkg/helm/helm_test.go  2025-12-17 
21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/operator/pkg/helm/helm_test.go  2026-01-14 
11:04:34.000000000 +0100
@@ -68,6 +68,13 @@
                        diffSelect:  "Deployment:*:istio-ingress",
                },
                {
+                       desc:        "gateway-service-selector-labels",
+                       releaseName: "istio-ingress",
+                       namespace:   "istio-ingress",
+                       chartName:   "gateway",
+                       diffSelect:  "Service:*:istio-ingress",
+               },
+               {
                        desc:        "istiod-traffic-distribution",
                        releaseName: "istiod",
                        namespace:   "istio-system",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/operator/pkg/helm/testdata/input/gateway-service-selector-labels.yaml
 
new/istioctl-1.28.3/operator/pkg/helm/testdata/input/gateway-service-selector-labels.yaml
--- 
old/istioctl-1.28.2/operator/pkg/helm/testdata/input/gateway-service-selector-labels.yaml
   1970-01-01 01:00:00.000000000 +0100
+++ 
new/istioctl-1.28.3/operator/pkg/helm/testdata/input/gateway-service-selector-labels.yaml
   2026-01-14 11:04:34.000000000 +0100
@@ -0,0 +1,6 @@
+spec:
+  values:
+    service:
+      selectorLabels:
+        istio.io/rev: canary
+        custom-label: custom-value
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/operator/pkg/helm/testdata/output/gateway-service-selector-labels.golden.yaml
 
new/istioctl-1.28.3/operator/pkg/helm/testdata/output/gateway-service-selector-labels.golden.yaml
--- 
old/istioctl-1.28.2/operator/pkg/helm/testdata/output/gateway-service-selector-labels.golden.yaml
   1970-01-01 01:00:00.000000000 +0100
+++ 
new/istioctl-1.28.3/operator/pkg/helm/testdata/output/gateway-service-selector-labels.golden.yaml
   2026-01-14 11:04:34.000000000 +0100
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: istio-ingress
+  namespace: istio-ingress
+  labels:
+    app.kubernetes.io/name: istio-ingress
+    app.kubernetes.io/managed-by: "Helm"
+    app.kubernetes.io/instance: "istio-ingress"
+    app.kubernetes.io/part-of: "istio"
+    app.kubernetes.io/version: "1.0.0"
+    helm.sh/chart: gateway-1.0.0
+    app: istio-ingress
+    istio: ingress
+    "istio.io/dataplane-mode": "none"
+  annotations:
+    {}
+spec:
+  type: LoadBalancer
+  ports:
+    - name: status-port
+      port: 15021
+      protocol: TCP
+      targetPort: 15021
+    - name: http2
+      port: 80
+      protocol: TCP
+      targetPort: 80
+    - name: https
+      port: 443
+      protocol: TCP
+      targetPort: 443
+  selector:
+    app: istio-ingress
+    istio: ingress
+    custom-label: custom-value
+    istio.io/rev: canary
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/ambientindex.go
 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/ambientindex.go
--- 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/ambientindex.go
       2025-12-17 21:08:26.000000000 +0100
+++ 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/ambientindex.go
       2026-01-14 11:04:34.000000000 +0100
@@ -186,13 +186,19 @@
        )...)
        authzPolicies := 
kclient.NewDelayedInformer[*securityclient.AuthorizationPolicy](options.Client,
                gvr.AuthorizationPolicy, kubetypes.StandardInformer, 
configFilter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       authzPolicies.Start(a.stop)
        AuthzPolicies := 
krt.WrapClient[*securityclient.AuthorizationPolicy](authzPolicies, 
opts.WithName("informer/AuthorizationPolicies")...)
 
        peerAuths := 
kclient.NewDelayedInformer[*securityclient.PeerAuthentication](options.Client,
                gvr.PeerAuthentication, kubetypes.StandardInformer, 
configFilter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       peerAuths.Start(a.stop)
        PeerAuths := 
krt.WrapClient[*securityclient.PeerAuthentication](peerAuths, 
opts.WithName("informer/PeerAuthentications")...)
 
        gatewayClient := 
kclient.NewDelayedInformer[*v1beta1.Gateway](options.Client, 
gvr.KubernetesGateway, kubetypes.StandardInformer, filter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       gatewayClient.Start(a.stop)
        Gateways := krt.WrapClient[*v1beta1.Gateway](gatewayClient, opts.With(
                krt.WithName("informer/Gateways"),
                krt.WithMetadata(krt.Metadata{
@@ -201,6 +207,8 @@
        )...)
 
        gatewayClassClient := 
kclient.NewDelayedInformer[*v1beta1.GatewayClass](options.Client, 
gvr.GatewayClass, kubetypes.StandardInformer, filter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       gatewayClassClient.Start(a.stop)
        GatewayClasses := 
krt.WrapClient[*v1beta1.GatewayClass](gatewayClassClient, 
opts.WithName("informer/GatewayClasses")...)
        Pods := krt.NewInformerFiltered[*corev1.Pod](options.Client, 
kclient.Filter{
                ObjectFilter:    options.Client.ObjectFilter(),
@@ -214,10 +222,14 @@
 
        serviceEntries := 
kclient.NewDelayedInformer[*networkingclient.ServiceEntry](options.Client,
                gvr.ServiceEntry, kubetypes.StandardInformer, configFilter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       serviceEntries.Start(a.stop)
        ServiceEntries := 
krt.WrapClient[*networkingclient.ServiceEntry](serviceEntries, 
opts.WithName("informer/ServiceEntries")...)
 
        workloadEntries := 
kclient.NewDelayedInformer[*networkingclient.WorkloadEntry](options.Client,
                gvr.WorkloadEntry, kubetypes.StandardInformer, configFilter)
+       // Start with a.stop to ensure the informer respects the index's stop 
channel
+       workloadEntries.Start(a.stop)
        WorkloadEntries := 
krt.WrapClient[*networkingclient.WorkloadEntry](workloadEntries, 
opts.WithName("informer/WorkloadEntries")...)
 
        servicesClient := kclient.NewFiltered[*corev1.Service](options.Client, 
filter)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/cluster.go
 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/cluster.go
--- 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/cluster.go
       2025-12-17 21:08:26.000000000 +0100
+++ 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/cluster.go
       2026-01-14 11:04:34.000000000 +0100
@@ -66,9 +66,9 @@
        // initialSync is marked when RunAndWait completes
        initialSync *atomic.Bool
        // initialSyncTimeout is set when RunAndWait timed out
-       initialSyncTimeout *atomic.Bool
-       stop               chan struct{}
-       *RemoteClusterCollections
+       initialSyncTimeout       *atomic.Bool
+       stop                     chan struct{}
+       RemoteClusterCollections *atomic.Pointer[RemoteClusterCollections]
 }
 
 type RemoteClusterCollections struct {
@@ -81,33 +81,33 @@
 }
 
 // Namespaces returns the namespaces collection.
-func (r *RemoteClusterCollections) Namespaces() 
krt.Collection[*corev1.Namespace] {
-       return r.namespaces
+func (c *Cluster) Namespaces() krt.Collection[*corev1.Namespace] {
+       return c.RemoteClusterCollections.Load().namespaces
 }
 
 // Pods returns the pods collection.
-func (r *RemoteClusterCollections) Pods() krt.Collection[*corev1.Pod] {
-       return r.pods
+func (c *Cluster) Pods() krt.Collection[*corev1.Pod] {
+       return c.RemoteClusterCollections.Load().pods
 }
 
 // Services returns the services collection.
-func (r *RemoteClusterCollections) Services() krt.Collection[*corev1.Service] {
-       return r.services
+func (c *Cluster) Services() krt.Collection[*corev1.Service] {
+       return c.RemoteClusterCollections.Load().services
 }
 
 // EndpointSlices returns the endpointSlices collection.
-func (r *RemoteClusterCollections) EndpointSlices() 
krt.Collection[*discovery.EndpointSlice] {
-       return r.endpointSlices
+func (c *Cluster) EndpointSlices() krt.Collection[*discovery.EndpointSlice] {
+       return c.RemoteClusterCollections.Load().endpointSlices
 }
 
 // Nodes returns the nodes collection.
-func (r *RemoteClusterCollections) Nodes() krt.Collection[*corev1.Node] {
-       return r.nodes
+func (c *Cluster) Nodes() krt.Collection[*corev1.Node] {
+       return c.RemoteClusterCollections.Load().nodes
 }
 
 // Gateways returns the gateways collection.
-func (r *RemoteClusterCollections) Gateways() krt.Collection[*v1beta1.Gateway] 
{
-       return r.gateways
+func (c *Cluster) Gateways() krt.Collection[*v1beta1.Gateway] {
+       return c.RemoteClusterCollections.Load().gateways
 }
 
 func NewRemoteClusterCollections(
@@ -136,15 +136,16 @@
        collections *RemoteClusterCollections,
 ) *Cluster {
        c := &Cluster{
-               ID:                 id,
-               Client:             client,
-               stop:               make(chan struct{}),
-               initialSync:        atomic.NewBool(false),
-               initialSyncTimeout: atomic.NewBool(false),
+               ID:                       id,
+               Client:                   client,
+               stop:                     make(chan struct{}),
+               initialSync:              atomic.NewBool(false),
+               initialSyncTimeout:       atomic.NewBool(false),
+               RemoteClusterCollections: 
atomic.NewPointer[RemoteClusterCollections](nil),
        }
 
        if collections != nil {
-               c.RemoteClusterCollections = collections
+               c.RemoteClusterCollections.Store(collections)
        }
 
        if source != nil {
@@ -168,15 +169,15 @@
 
 func (c *Cluster) Run(localMeshConfig meshwatcher.WatcherCollection, debugger 
*krt.DebugHandler) {
        // Check and see if this is a local cluster or not
-       if c.RemoteClusterCollections != nil {
+       if c.RemoteClusterCollections.Load() != nil {
                log.Infof("Configuring cluster %s with existing informers", 
c.ID)
                syncers := []krt.Syncer{
-                       c.namespaces,
-                       c.gateways,
-                       c.services,
-                       c.nodes,
-                       c.endpointSlices,
-                       c.pods,
+                       c.Namespaces(),
+                       c.Gateways(),
+                       c.Services(),
+                       c.Nodes(),
+                       c.EndpointSlices(),
+                       c.Pods(),
                }
                // Just wait for all syncers to be synced
                for _, syncer := range syncers {
@@ -202,16 +203,22 @@
 
        opts := krt.NewOptionsBuilder(c.stop, 
fmt.Sprintf("ambient/cluster[%s]", c.ID), debugger)
        namespaces := kclient.New[*corev1.Namespace](c.Client)
-       // This will start a namespace informer and wait for it to be ready. So 
we must start it in a go routine to avoid blocking.
-       filter := filter.NewDiscoveryNamespacesFilter(namespaces, 
localMeshConfig, c.stop)
+       // When this cluster stops, clean up the namespace watcher
+       go func() {
+               <-c.stop
+               namespaces.ShutdownHandlers()
+       }()
+       // This will start a namespace informer but DON'T wait for it to be 
ready because that will block
+       // assignment of all of the collection fields (leading to races and 
panics).
+       filter, syncWaiter := 
filter.NewNonBlockingDiscoveryNamespacesFilter(namespaces, localMeshConfig, 
c.stop)
        kube.SetObjectFilter(c.Client, filter)
        // Register all of the informers before starting the client
        defaultFilter := kclient.Filter{
                ObjectFilter: c.Client.ObjectFilter(),
        }
 
-       Namespaces := krt.WrapClient(namespaces, opts.With(
-               krt.WithName("informer/Namespaces"),
+       Namespaces := krt.WrapClient(namespaces, append(
+               opts.WithName("informer/Namespaces"),
                krt.WithMetadata(krt.Metadata{
                        ClusterKRTMetadataKey: c.ID,
                }),
@@ -234,8 +241,8 @@
                }),
        )...)
        servicesClient := kclient.NewFiltered[*corev1.Service](c.Client, 
defaultFilter)
-       Services := krt.WrapClient[*corev1.Service](servicesClient, opts.With(
-               krt.WithName("informer/Services"),
+       Services := krt.WrapClient(servicesClient, append(
+               opts.WithName("informer/Services"),
                krt.WithMetadata(krt.Metadata{
                        ClusterKRTMetadataKey: c.ID,
                }),
@@ -244,8 +251,8 @@
        Nodes := krt.NewInformerFiltered[*corev1.Node](c.Client, kclient.Filter{
                ObjectFilter:    c.Client.ObjectFilter(),
                ObjectTransform: kube.StripNodeUnusedFields,
-       }, opts.With(
-               krt.WithName("informer/Nodes"),
+       }, append(
+               opts.WithName("informer/Nodes"),
                krt.WithMetadata(krt.Metadata{
                        ClusterKRTMetadataKey: c.ID,
                }),
@@ -253,35 +260,37 @@
 
        EndpointSlices := 
krt.NewInformerFiltered[*discovery.EndpointSlice](c.Client, kclient.Filter{
                ObjectFilter: c.Client.ObjectFilter(),
-       }, opts.With(
-               krt.WithName("informer/EndpointSlices"),
+       }, append(
+               opts.WithName("informer/EndpointSlices"),
                krt.WithMetadata(krt.Metadata{
                        ClusterKRTMetadataKey: c.ID,
                }),
        )...)
 
-       c.RemoteClusterCollections = &RemoteClusterCollections{
+       c.RemoteClusterCollections.Store(&RemoteClusterCollections{
                namespaces:     Namespaces,
                pods:           Pods,
                services:       Services,
                endpointSlices: EndpointSlices,
                nodes:          Nodes,
                gateways:       Gateways,
-       }
+       })
 
        go func() {
+               log.Debugf("Waiting for discovery filter sync for cluster %s 
before starting all the other informers", c.ID)
+               syncWaiter(c.stop)
                if !c.Client.RunAndWait(c.stop) {
                        log.Warnf("remote cluster %s failed to sync", c.ID)
                        return
                }
 
                syncers := []krt.Syncer{
-                       c.namespaces,
-                       c.gateways,
-                       c.services,
-                       c.nodes,
-                       c.endpointSlices,
-                       c.pods,
+                       c.Namespaces(),
+                       c.Gateways(),
+                       c.Services(),
+                       c.Nodes(),
+                       c.EndpointSlices(),
+                       c.Pods(),
                }
 
                for _, syncer := range syncers {
@@ -296,9 +305,10 @@
 }
 
 func (c *Cluster) HasSynced() bool {
-       // It could happen when a wrong credential provide, this cluster has no 
chance to run.
-       // In this case, the `initialSyncTimeout` will never be set
-       // In order not block istiod start up, check close as well.
+       // It could happen that, when a wrong credential is provided,
+       // this cluster has no chance to run fully and gets prematurely closed.
+       // In this case, the `initialSyncTimeout` will never be set.
+       // In order not block istiod start up, check Closed() as well.
        if c.Closed() {
                return true
        }
@@ -325,22 +335,36 @@
                return
        default:
                close(c.stop)
+               c.Client.Shutdown()
        }
 }
 
+func (c *Cluster) hasInitialCollections() bool {
+       return c.RemoteClusterCollections.Load() != nil &&
+               c.Namespaces() != nil &&
+               c.Gateways() != nil &&
+               c.Services() != nil &&
+               c.Nodes() != nil &&
+               c.EndpointSlices() != nil &&
+               c.Pods() != nil
+}
+
 func (c *Cluster) WaitUntilSynced(stop <-chan struct{}) bool {
        if c.HasSynced() {
                return true
        }
 
+       // Wait for all the syncers to be populated
+       kube.WaitForCacheSync(fmt.Sprintf("cluster[%s] remote collections 
init", c.ID), stop, c.hasInitialCollections)
+
        // Wait for all syncers to be synced
        for _, syncer := range []krt.Syncer{
-               c.namespaces,
-               c.gateways,
-               c.services,
-               c.nodes,
-               c.endpointSlices,
-               c.pods,
+               c.Namespaces(),
+               c.Gateways(),
+               c.Services(),
+               c.Nodes(),
+               c.EndpointSlices(),
+               c.Pods(),
        } {
                if !syncer.WaitUntilSynced(stop) {
                        log.Errorf("Timed out waiting for cluster %s to sync 
%v", c.ID, syncer)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/clusterstore.go
 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/clusterstore.go
--- 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/clusterstore.go
  2025-12-17 21:08:26.000000000 +0100
+++ 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/multicluster/clusterstore.go
  2026-01-14 11:04:34.000000000 +0100
@@ -27,17 +27,19 @@
 type ClusterStore struct {
        sync.RWMutex
        // keyed by secret key(ns/name)->clusterID
-       remoteClusters map[string]map[cluster.ID]*Cluster
-       clusters       sets.String
+       remoteClusters       map[string]map[cluster.ID]*Cluster
+       clusters             sets.String
+       clustersAwaitingSync sets.Set[cluster.ID]
        *krt.RecomputeTrigger
 }
 
 // NewClustersStore initializes data struct to store clusters information
 func NewClustersStore() *ClusterStore {
        return &ClusterStore{
-               remoteClusters:   make(map[string]map[cluster.ID]*Cluster),
-               clusters:         sets.New[string](),
-               RecomputeTrigger: krt.NewRecomputeTrigger(false),
+               remoteClusters:       make(map[string]map[cluster.ID]*Cluster),
+               clusters:             sets.New[string](),
+               RecomputeTrigger:     krt.NewRecomputeTrigger(false),
+               clustersAwaitingSync: sets.New[cluster.ID](),
        }
 }
 
@@ -48,7 +50,12 @@
                c.remoteClusters[secretKey] = make(map[cluster.ID]*Cluster)
        }
        c.remoteClusters[secretKey][clusterID] = value
-       c.clusters.Insert(string(clusterID))
+       exists := c.clusters.InsertContains(string(clusterID))
+       if exists && c.clustersAwaitingSync.Contains(clusterID) {
+               // If there was an old version of this cluster that existed and 
was waiting for sync,
+               // we can remove it from the awaiting set since we have a new 
version now.
+               c.clustersAwaitingSync.Delete(clusterID)
+       }
        c.TriggerRecomputation()
 }
 
@@ -57,6 +64,9 @@
        defer c.Unlock()
        delete(c.remoteClusters[secretKey], clusterID)
        c.clusters.Delete(string(clusterID))
+       if c.clustersAwaitingSync.Contains(clusterID) {
+               c.clustersAwaitingSync.Delete(clusterID)
+       }
        if len(c.remoteClusters[secretKey]) == 0 {
                delete(c.remoteClusters, secretKey)
        }
@@ -91,6 +101,36 @@
 }
 
 // All returns a copy of the current remote clusters.
+func (c *ClusterStore) AllReady() map[string]map[cluster.ID]*Cluster {
+       if c == nil {
+               return nil
+       }
+       c.RLock()
+       defer c.RUnlock()
+       out := make(map[string]map[cluster.ID]*Cluster)
+       for secret, clusters := range c.remoteClusters {
+               for cid, cl := range clusters {
+                       if cl.Closed() || cl.SyncDidTimeout() {
+                               log.Warnf("remote cluster %s is closed or timed 
out, omitting it from the clusters collection", cl.ID)
+                               continue
+                       }
+                       if !cl.HasSynced() {
+                               log.Debugf("remote cluster %s registered 
informers have not been synced up yet. Skipping and will recompute on sync", 
cl.ID)
+                               c.triggerRecomputeOnSyncLocked(cl.ID)
+                               continue
+                       }
+                       outCluster := *cl
+                       if _, ok := out[secret]; !ok {
+                               out[secret] = make(map[cluster.ID]*Cluster)
+                       }
+                       out[secret][cid] = &outCluster
+               }
+       }
+       return out
+}
+
+// All returns a copy of the current remote clusters, including those that may 
not
+// be ready for use. In most cases outside of this package, you should use 
AllReady().
 func (c *ClusterStore) All() map[string]map[cluster.ID]*Cluster {
        if c == nil {
                return nil
@@ -143,3 +183,34 @@
 
        return true
 }
+
+// triggerRecomputeOnSyncLocked sets up a goroutine to wait for the cluster to 
be synced,
+// and then triggers a recompute when it is. Ensure you hold the lock before 
calling this.
+func (c *ClusterStore) triggerRecomputeOnSyncLocked(id cluster.ID) {
+       cluster := c.GetByID(id)
+       if cluster == nil {
+               log.Debugf("cluster %s not found in store to trigger 
recompute", id)
+               return
+       }
+       exists := c.clustersAwaitingSync.InsertContains(id)
+       if exists {
+               // Already waiting for sync
+               return
+       }
+
+       go func() {
+               // Wait until the cluster is synced. If it's deleted from the 
store before
+               // it's fully synced, this will return because of the stop.
+               // Double check to make sure this cluster is still in the store
+               // and that it wasn't closed/timed out (we don't want to send 
an event for bad clusters)
+               if cluster.WaitUntilSynced(cluster.stop) && !cluster.Closed() 
&& !cluster.SyncDidTimeout() && c.GetByID(id) != nil {
+                       // Let dependent krt collections know that this cluster 
is ready to use
+                       c.TriggerRecomputation()
+                       // And clean up our tracking set
+                       c.Lock()
+                       c.clustersAwaitingSync.Delete(id)
+                       c.Unlock()
+                       log.Debugf("remote cluster %s informers synced, 
triggering recompute", id)
+               }
+       }()
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets.go
 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets.go
--- 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets.go
      2025-12-17 21:08:26.000000000 +0100
+++ 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets.go
      2026-01-14 11:04:34.000000000 +0100
@@ -115,6 +115,7 @@
                        }
                        // stop previous remote cluster
                        prev.Stop()
+                       log.Debugf("Shutdown previous remote cluster %s for 
secret %s due to update", clusterID, secretKey)
                } else if a.cs.Contains(cluster.ID(clusterID)) {
                        // if the cluster has been registered before by another 
secret, ignore the new one.
                        logger.Warnf("cluster has already been registered")
@@ -129,10 +130,13 @@
                        continue
                }
 
+               // Store before we run so that a bad secret doesn't block 
adding the cluster to the store.
+               // This is necessary so that changes to the bad secret are 
processed as an update and the bad
+               // cluster is stopped and shutdown.
+               a.cs.Store(secretKey, remoteCluster.ID, remoteCluster)
                // Run returns after initializing the cluster's fields; it runs 
all of the expensive operations
-               // in a goroutine, so we can safely call it synchronously here.
+               // in a goroutine (including discovery filter sync), so we can 
safely call it synchronously here.
                remoteCluster.Run(a.meshConfig, debugger)
-               a.cs.Store(secretKey, remoteCluster.ID, remoteCluster)
                addedClusters = append(addedClusters, remoteCluster)
        }
 
@@ -240,11 +244,7 @@
 
        Clusters := krt.NewManyFromNothing(func(ctx krt.HandlerContext) 
[]*multicluster.Cluster {
                a.cs.MarkDependant(ctx) // Subscribe to updates from the 
clusterStore
-               // Wait for the clusterStore to be synced
-               if !kube.WaitForCacheSync("multicluster remote secrets", 
a.stop, a.cs.HasSynced) {
-                       log.Warnf("remote cluster cache sync failed")
-               }
-               remoteClustersBySecretThenID := a.cs.All()
+               remoteClustersBySecretThenID := a.cs.AllReady()
                var remoteClusters []*multicluster.Cluster
                for _, clusters := range remoteClustersBySecretThenID {
                        for _, cluster := range clusters {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets_test.go
 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets_test.go
--- 
old/istioctl-1.28.2/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets_test.go
 2025-12-17 21:08:26.000000000 +0100
+++ 
new/istioctl-1.28.3/pilot/pkg/serviceregistry/kube/controller/ambient/remotesecrets_test.go
 2026-01-14 11:04:34.000000000 +0100
@@ -17,6 +17,7 @@
 import (
        "errors"
        "fmt"
+       "strings"
        "sync"
        "testing"
        "time"
@@ -84,6 +85,7 @@
 
 func TestBuildRemoteClustersCollection(t *testing.T) {
        test.SetForTest(t, &features.EnableAmbientMultiNetwork, true)
+       test.SetForTest(t, &features.RemoteClusterTimeout, 1*time.Second)
        secret := &corev1.Secret{
                ObjectMeta: metav1.ObjectMeta{
                        Name:      "remote-cluster-secret",
@@ -160,6 +162,8 @@
 
                        if tt.expectedError {
                                assert.Equal(t, listClusters(), 0)
+                               // Wait for a little after the cluster timeout 
to confirm no segfaults or panics
+                               time.Sleep(features.RemoteClusterTimeout + 
500*time.Millisecond)
                        } else {
                                assert.EventuallyEqual(t, listClusters, 1)
                        }
@@ -550,10 +554,12 @@
 
 func TestSecretController(t *testing.T) {
        test.SetForTest(t, &features.EnableAmbientMultiNetwork, true)
+       test.SetForTest(t, &features.RemoteClusterTimeout, 1*time.Second)
        client := kube.NewFakeClient()
 
        var (
-               secret0 = makeSecret(secretNamespace, "s0",
+               secret0Bad = makeSecret(secretNamespace, "s0", 
clusterCredential{"c0", []byte("bad-kubeconfig")})
+               secret0    = makeSecret(secretNamespace, "s0",
                        clusterCredential{"c0", []byte("kubeconfig0-0")})
                secret0UpdateKubeconfigChanged = makeSecret(secretNamespace, 
"s0",
                        clusterCredential{"c0", []byte("kubeconfig0-1")})
@@ -585,16 +591,23 @@
        steps := []struct {
                name string
                // only set one of these per step. The others should be nil.
-               add    *corev1.Secret
-               update *corev1.Secret
-               delete *corev1.Secret
-
-               want []result
+               add             *corev1.Secret
+               update          *corev1.Secret
+               delete          *corev1.Secret
+               want            []result
+               wantClientError bool
+               afterTestDelay  time.Duration
        }{
                {
-                       name: "Create secret s0 and add kubeconfig for cluster 
c0, which will add remote cluster c0",
-                       add:  secret0,
-                       want: []result{{"config", 1}, {"c0", 2}},
+                       name:            "Create secret s0 with bad kubeconfig 
for cluster c0, which will lead to the cluster object being unsynced and timing 
out",
+                       add:             secret0Bad,
+                       want:            []result{{"config", 1}}, // We don't 
expect to see the failed cluster here because it was never healthy
+                       wantClientError: true,
+               },
+               {
+                       name:   "Create secret s0 and add kubeconfig for 
cluster c0, which will add remote cluster c0",
+                       update: secret0,
+                       want:   []result{{"config", 1}, {"c0", 2}},
                },
                {
                        name:   "Update secret s0 and update the kubeconfig of 
cluster c0, which will update remote cluster c0",
@@ -685,6 +698,27 @@
                DomainSuffix:    "company.com",
                MeshConfig:      watcher,
        }
+       // Mostly the same as testingBuildClientsFromConfig(), but we will make 
the client return an error
+       // if the test case asks
+       options.ClientBuilder = func(kubeConfig []byte, c cluster.ID, 
configOverrides ...func(*rest.Config)) (kube.Client, error) {
+               client := kube.NewFakeClient()
+
+               if strings.Contains(string(kubeConfig), "bad-kubeconfig") {
+                       // Create a bad fake client instead
+                       client = kube.NewErroringFakeClient()
+               }
+               for _, crd := range []schema.GroupVersionResource{
+                       gvr.AuthorizationPolicy,
+                       gvr.PeerAuthentication,
+                       gvr.KubernetesGateway,
+                       gvr.GatewayClass,
+                       gvr.WorkloadEntry,
+                       gvr.ServiceEntry,
+               } {
+                       clienttest.MakeCRD(t, client, crd)
+               }
+               return client, nil
+       }
        t.Cleanup(options.Client.Shutdown)
        // The creation of the stop has to be after the cleanup is registered 
because cleanup is LIFO
        stopCh := test.NewStop(t)
@@ -722,7 +756,6 @@
                retry.UntilOrFail(t, tc.clusters.HasSynced, 
retry.Timeout(2*time.Second))
        })
        kube.WaitForCacheSync("test", stopCh, tc.clusters.HasSynced, 
handlers.HasSynced)
-
        for _, step := range steps {
                t.Run(step.name, func(t *testing.T) {
                        switch {
@@ -733,13 +766,16 @@
                        case step.delete != nil:
                                secrets.Delete(step.delete.Name, 
step.delete.Namespace)
                        }
-
                        assert.EventuallyEqual(t, func() []result {
                                res := slices.Map(handlers.List(), func(e 
testHandler) result {
                                        return result{e.ID, e.Iter}
                                })
                                return res
                        }, step.want)
+                       if step.afterTestDelay > 0 {
+                               // Wait for the cluster to time out to confirm 
we don't segfault
+                               time.Sleep(features.RemoteClusterTimeout + 
500*time.Millisecond)
+                       }
                })
        }
 }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/pkg/kube/client.go 
new/istioctl-1.28.3/pkg/kube/client.go
--- old/istioctl-1.28.2/pkg/kube/client.go      2025-12-17 21:08:26.000000000 
+0100
+++ new/istioctl-1.28.3/pkg/kube/client.go      2026-01-14 11:04:34.000000000 
+0100
@@ -293,6 +293,76 @@
        return fc
 }
 
+// NewErroringFakeClient creates a new fake client that always returns errors
+// on lists and watches to simulate errors from the API server.
+func NewErroringFakeClient(objects ...runtime.Object) CLIClient {
+       c := &client{
+               informerWatchesPending: atomic.NewInt32(0),
+               clusterID:              "fake",
+       }
+
+       c.kube = setupFakeClient(fake.NewClientset(), "kube", objects)
+
+       c.config = &rest.Config{
+               Host: "server",
+       }
+
+       c.informerFactory = informerfactory.NewSharedInformerFactory()
+       s := FakeIstioScheme
+
+       c.metadata = metadatafake.NewSimpleMetadataClient(s)
+       c.dynamic = dynamicfake.NewSimpleDynamicClient(s)
+       c.istio = setupFakeClient(istiofake.NewSimpleClientset(), "istio", 
objects)
+       c.gatewayapi = setupFakeClient(gatewayapifake.NewSimpleClientset(), 
"gateway", objects)
+       c.gatewayapiinference = 
setupFakeClient(gatewayapiinferencefake.NewSimpleClientset(), "inference", 
objects)
+       c.extSet = extfake.NewClientset()
+
+       listReactor := func(action clienttesting.Action) (handled bool, ret 
runtime.Object, err error) {
+               return true, nil, errors.New("fake client list error")
+       }
+       watchReactor := func(tracker clienttesting.ObjectTracker) func(action 
clienttesting.Action) (handled bool, ret watch.Interface, err error) {
+               return func(action clienttesting.Action) (handled bool, ret 
watch.Interface, err error) {
+                       return true, nil, errors.New("fake client watch error")
+               }
+       }
+       // https://github.com/kubernetes/client-go/issues/439
+       createReactor := func(action clienttesting.Action) (handled bool, ret 
runtime.Object, err error) {
+               ret = action.(clienttesting.CreateAction).GetObject()
+               meta, ok := ret.(metav1.Object)
+               if !ok {
+                       return handled, ret, err
+               }
+
+               if meta.GetName() == "" && meta.GetGenerateName() != "" {
+                       
meta.SetName(names.SimpleNameGenerator.GenerateName(meta.GetGenerateName()))
+               }
+
+               return handled, ret, err
+       }
+       for _, fc := range []fakeClient{
+               c.kube.(*fake.Clientset),
+               c.istio.(*istiofake.Clientset),
+               c.gatewayapi.(*gatewayapifake.Clientset),
+               c.gatewayapiinference.(*gatewayapiinferencefake.Clientset),
+               c.dynamic.(*dynamicfake.FakeDynamicClient),
+               c.metadata.(*metadatafake.FakeMetadataClient),
+       } {
+               fc.PrependReactor("list", "*", listReactor)
+               fc.PrependWatchReactor("*", watchReactor(fc.Tracker()))
+               fc.PrependReactor("create", "*", createReactor)
+       }
+
+       c.fastSync = true
+
+       c.version = lazy.NewWithRetry(c.kube.Discovery().ServerVersion)
+
+       if NewCrdWatcher != nil {
+               c.crdWatcher = NewCrdWatcher(c)
+       }
+
+       return c
+}
+
 // NewFakeClient creates a new, fake, client
 func NewFakeClient(objects ...runtime.Object) CLIClient {
        c := &client{
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/pkg/kube/multicluster/cluster.go 
new/istioctl-1.28.3/pkg/kube/multicluster/cluster.go
--- old/istioctl-1.28.2/pkg/kube/multicluster/cluster.go        2025-12-17 
21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/pkg/kube/multicluster/cluster.go        2026-01-14 
11:04:34.000000000 +0100
@@ -68,6 +68,9 @@
 func (c *Cluster) Run(mesh mesh.Watcher, handlers []handler, action ACTION) {
        if features.RemoteClusterTimeout > 0 {
                time.AfterFunc(features.RemoteClusterTimeout, func() {
+                       if c.Closed() {
+                               log.Debugf("remote cluster %s was stopped 
before hitting the sync timeout", c.ID)
+                       }
                        if !c.initialSync.Load() {
                                log.Errorf("remote cluster %s failed to sync 
after %v", c.ID, features.RemoteClusterTimeout)
                                
timeouts.With(clusterLabel.Value(string(c.ID))).Increment()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/pkg/kube/namespace/filter.go 
new/istioctl-1.28.3/pkg/kube/namespace/filter.go
--- old/istioctl-1.28.2/pkg/kube/namespace/filter.go    2025-12-17 
21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/pkg/kube/namespace/filter.go    2026-01-14 
11:04:34.000000000 +0100
@@ -36,6 +36,8 @@
 
 type DiscoveryFilter func(obj any) bool
 
+type ManualSyncWaiter func(stop <-chan struct{})
+
 type discoveryNamespacesFilter struct {
        lock                sync.RWMutex
        namespaces          kclient.Client[*corev1.Namespace]
@@ -44,11 +46,12 @@
        handlers            []func(added, removed sets.String)
 }
 
-func NewDiscoveryNamespacesFilter(
+func newDiscoveryNamespacesFilter(
        namespaces kclient.Client[*corev1.Namespace],
        mesh mesh.Watcher,
        stop <-chan struct{},
-) kubetypes.DynamicObjectFilter {
+       wait bool,
+) *discoveryNamespacesFilter {
        // convert LabelSelectors to Selectors
        f := &discoveryNamespacesFilter{
                namespaces:          namespaces,
@@ -101,11 +104,35 @@
        })
        // Start namespaces and wait for it to be ready now. This is required 
for subsequent users, so we want to block
        namespaces.Start(stop)
-       kube.WaitForCacheSync("discovery filter", stop, namespaces.HasSynced)
-       f.selectorsChanged(mesh.Mesh().GetDiscoverySelectors(), false)
+       if wait {
+               kube.WaitForCacheSync("discovery filter", stop, 
namespaces.HasSynced)
+               f.selectorsChanged(mesh.Mesh().GetDiscoverySelectors(), false)
+       }
        return f
 }
 
+func NewDiscoveryNamespacesFilter(
+       namespaces kclient.Client[*corev1.Namespace],
+       mesh mesh.Watcher,
+       stop <-chan struct{},
+) kubetypes.DynamicObjectFilter {
+       return newDiscoveryNamespacesFilter(namespaces, mesh, stop, true)
+}
+
+// NewNonBlockingDiscoveryNamespacesFilter creates the filter without blocking 
to wait on the initial sync.
+// Use the returned ManualSyncWaiter to wait for the initial sync when desired.
+func NewNonBlockingDiscoveryNamespacesFilter(
+       namespaces kclient.Client[*corev1.Namespace],
+       mesh mesh.Watcher,
+       stop <-chan struct{},
+) (kubetypes.DynamicObjectFilter, ManualSyncWaiter) {
+       f := newDiscoveryNamespacesFilter(namespaces, mesh, stop, false)
+       return f, func(stop <-chan struct{}) {
+               kube.WaitForCacheSync("discovery filter", stop, 
namespaces.HasSynced)
+               f.selectorsChanged(mesh.Mesh().GetDiscoverySelectors(), false)
+       }
+}
+
 func (d *discoveryNamespacesFilter) notifyHandlers(added sets.Set[string], 
removed sets.String) {
        // Clone handlers; we handle dynamic handlers so they can change after 
the filter has started.
        // Important: handlers are not called under the lock. If they are, then 
handlers which eventually call discoveryNamespacesFilter.Filter
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/releasenotes/notes/58047.yaml 
new/istioctl-1.28.3/releasenotes/notes/58047.yaml
--- old/istioctl-1.28.2/releasenotes/notes/58047.yaml   1970-01-01 
01:00:00.000000000 +0100
+++ new/istioctl-1.28.3/releasenotes/notes/58047.yaml   2026-01-14 
11:04:34.000000000 +0100
@@ -0,0 +1,8 @@
+apiVersion: release-notes/v2
+kind: bug-fix
+area: traffic-management
+issue:
+- 58047
+releaseNotes:
+- |
+  **Fixed** an issue in ambient multicluster where informer failures for 
remote clusters wouldn't be fixed until an istiod restart.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/istioctl-1.28.2/releasenotes/notes/gateway-service-selector-labels.yaml 
new/istioctl-1.28.3/releasenotes/notes/gateway-service-selector-labels.yaml
--- old/istioctl-1.28.2/releasenotes/notes/gateway-service-selector-labels.yaml 
1970-01-01 01:00:00.000000000 +0100
+++ new/istioctl-1.28.3/releasenotes/notes/gateway-service-selector-labels.yaml 
2026-01-14 11:04:34.000000000 +0100
@@ -0,0 +1,9 @@
+apiVersion: release-notes/v2
+
+kind: feature
+
+area: installation
+
+releaseNotes:
+- |
+  **Added** `service.selectorLabels` field to gateway Helm chart for custom 
service selector labels during revision-based migrations.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' old/istioctl-1.28.2/tools/build-base-images.sh 
new/istioctl-1.28.3/tools/build-base-images.sh
--- old/istioctl-1.28.2/tools/build-base-images.sh      2025-12-17 
21:08:26.000000000 +0100
+++ new/istioctl-1.28.3/tools/build-base-images.sh      2026-01-14 
11:04:34.000000000 +0100
@@ -77,7 +77,15 @@
     grep -v '^usr/bin/nft' | \
     grep -v '^usr/share/doc/nftables/examples/.*.nft' | \
     grep -v '^etc/apk/commit_hooks.d/ldconfig-commit.sh$' | \
-    grep -v '.*\.so[0-9\.]*' || true
+    grep -v '.*\.so[0-9\.]*' | \
+    # TODO: Remove the following test files when getting a nftables-slim=1.1.1 
package from packages.wolfi.dev/os
+    grep -v '^usr/bin/clear' | \
+    grep -v '^usr/bin/infocmp' | \
+    grep -v '^usr/bin/tabs' | \
+    grep -v '^usr/bin/tic' | \
+    grep -v '^usr/bin/toe' | \
+    grep -v '^usr/bin/tput' | \
+    grep -v '^usr/bin/tset' || true
 )"
 expectedFiles=(
   "usr/bin/xtables-legacy-multi"

++++++ istioctl.obsinfo ++++++
--- /var/tmp/diff_new_pack.HvRtS2/_old  2026-01-21 14:15:34.050405666 +0100
+++ /var/tmp/diff_new_pack.HvRtS2/_new  2026-01-21 14:15:34.054405833 +0100
@@ -1,5 +1,5 @@
 name: istioctl
-version: 1.28.2
-mtime: 1766002106
-commit: ab413ac6c1f40b2f7c69d97e0db4e712e4ef1ecc
+version: 1.28.3
+mtime: 1768385074
+commit: 4c1f845d839e9086ee85ad9337f2647492322eb4
 

++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/istioctl/vendor.tar.gz 
/work/SRC/openSUSE:Factory/.istioctl.new.1928/vendor.tar.gz differ: char 13, 
line 1

Reply via email to