This is an automated email from the ASF dual-hosted git repository.

zhongxjian pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/dubbo-kubernetes.git


The following commit(s) were added to refs/heads/master by this push:
     new fb11b277 Update dubbod and samples context (#831)
fb11b277 is described below

commit fb11b2772ff41efeb6a58afe4bc219ef9974cf7f
Author: Southern Cross <[email protected]>
AuthorDate: Mon Dec 1 18:28:41 2025 +0800

    Update dubbod and samples context (#831)
---
 NOTICE                                             | 10 ++-
 dubbod/planet/pkg/bootstrap/certcontroller.go      |  4 +-
 dubbod/planet/pkg/bootstrap/server.go              | 10 +--
 dubbod/planet/pkg/bootstrap/webhook.go             |  1 -
 dubbod/planet/pkg/config/aggregate/config.go       |  6 +-
 dubbod/planet/pkg/config/kube/crdclient/client.go  | 44 +++++------
 dubbod/planet/pkg/features/experimental.go         |  2 +-
 dubbod/planet/pkg/grpc/tls.go                      |  2 +-
 dubbod/planet/pkg/model/push_context.go            | 36 ++++-----
 .../kube/controller/endpointslice.go               |  4 +-
 dubbod/planet/pkg/xds/discovery.go                 |  4 +-
 .../security/pkg/credentialfetcher/plugin/token.go |  3 +-
 dubbod/security/pkg/nodeagent/cache/secretcache.go |  6 +-
 .../nodeagent/caclient/providers/aegis/client.go   |  6 +-
 .../dubbo-discovery/templates/mutatingwebhook.yaml |  2 -
 .../templates/validatingwebhookconfiguration.yaml  |  5 +-
 .../dubbo-control/dubbo-discovery/values.yaml      |  4 +-
 operator/pkg/manifest/manifest.go                  |  3 +-
 operator/pkg/render/postprocess.go                 | 42 ++++++++++
 pkg/adsc/adsc.go                                   |  5 --
 pkg/dubbo-agent/agent.go                           |  6 +-
 pkg/dubbo-agent/xds_proxy.go                       | 89 +++++++++++-----------
 pkg/dubbo-agent/xds_proxy_delta.go                 |  2 +-
 pkg/filewatcher/filewatcher.go                     |  3 +
 pkg/kube/kclient/client.go                         | 12 +--
 pkg/kube/multicluster/cluster.go                   |  2 +-
 pkg/webhooks/server/server.go                      |  2 -
 pkg/webhooks/webhookpatch.go                       |  2 -
 pkg/xds/server.go                                  | 14 ++--
 samples/grpc-app/README.md                         | 60 +++++++--------
 samples/grpc-app/grpc-app.yaml                     | 22 +++---
 31 files changed, 226 insertions(+), 187 deletions(-)

diff --git a/NOTICE b/NOTICE
index 61aeb212..4e94f695 100644
--- a/NOTICE
+++ b/NOTICE
@@ -8,4 +8,12 @@ For additional information about the Apache Software 
Foundation, please see
 <http://www.apache.org/>.
 
 Portions of this file are based on code from the Istio project
-(https://github.com/istio/istio), licensed under the Apache License, Version 
2.0.
\ No newline at end of file
+(https://github.com/istio/istio), licensed under the Apache License, Version 
2.0.
+
+The following files contain code derived from Istio:
+  - pkg/env/var.go (derived from istio/pkg/env/var.go)
+  - pkg/features/security.go (derived from istio/pkg/features/security.go)
+  - pkg/filewatcher/filewatcher.go (derived from 
istio/pkg/filewatcher/filewatcher.go)
+  - pkg/filewatcher/worker.go (derived from istio/pkg/filewatcher/worker.go)
+  - pkg/h2c/wrapper.go (derived from istio/pkg/h2c/wrapper.go)
+  - pkg/keepalive/options.go (derived from istio/pkg/keepalive/options.go)
\ No newline at end of file
diff --git a/dubbod/planet/pkg/bootstrap/certcontroller.go 
b/dubbod/planet/pkg/bootstrap/certcontroller.go
index f94f330b..8895c7cd 100644
--- a/dubbod/planet/pkg/bootstrap/certcontroller.go
+++ b/dubbod/planet/pkg/bootstrap/certcontroller.go
@@ -125,13 +125,13 @@ func (s *Server) initDNSCertsK8SRA() error {
                        newCertChain, newKeyPEM, _, err := 
chiron.GenKeyCertK8sCA(s.kubeClient.Kube(),
                                strings.Join(s.dnsNames, ","), "", signerName, 
true, SelfSignedCACertTTL.Get())
                        if err != nil {
-                               log.Errorf("failed regenerating key and cert 
for istiod by kubernetes: %v", err)
+                               log.Errorf("failed regenerating key and cert 
for dubbod by kubernetes: %v", err)
                        }
                        s.dubbodCertBundleWatcher.SetAndNotify(newKeyPEM, 
newCertChain, newCaBundle)
                }
        })
 
-       s.addStartFunc("istiod server certificate rotation", func(stop <-chan 
struct{}) error {
+       s.addStartFunc("dubbod server certificate rotation", func(stop <-chan 
struct{}) error {
                go func() {
                        // Track TTL of DNS cert and renew cert in accordance 
to grace period.
                        s.RotateDNSCertForK8sCA(stop, "", signerName, true, 
SelfSignedCACertTTL.Get())
diff --git a/dubbod/planet/pkg/bootstrap/server.go 
b/dubbod/planet/pkg/bootstrap/server.go
index 1c9ff2c0..e558b711 100644
--- a/dubbod/planet/pkg/bootstrap/server.go
+++ b/dubbod/planet/pkg/bootstrap/server.go
@@ -433,7 +433,7 @@ func (s *Server) initRegistryEventHandlers() {
                return
        }
 
-       log.Infof("initRegistryEventHandlers: configController is available, 
registering event handlers")
+       log.Debugf("initRegistryEventHandlers: configController is available, 
registering event handlers")
 
        configHandler := func(prev config.Config, curr config.Config, event 
model.Event) {
                // Log ALL events at INFO level to ensure visibility
@@ -496,17 +496,17 @@ func (s *Server) initRegistryEventHandlers() {
                })
        }
        schemas := collections.Planet.All()
-       log.Infof("initRegistryEventHandlers: found %d schemas to register", 
len(schemas))
+       log.Debugf("initRegistryEventHandlers: found %d schemas to register", 
len(schemas))
        registeredCount := 0
        for _, schema := range schemas {
                gvk := schema.GroupVersionKind()
                schemaID := schema.Identifier()
-               log.Infof("initRegistryEventHandlers: registering event handler 
for %s (GVK: %v)", schemaID, gvk)
+               log.Debugf("initRegistryEventHandlers: registering event 
handler for %s (GVK: %v)", schemaID, gvk)
                s.configController.RegisterEventHandler(gvk, configHandler)
                registeredCount++
-               log.Infof("initRegistryEventHandlers: successfully registered 
event handler for %s (GVK: %v)", schemaID, gvk)
+               log.Debugf("initRegistryEventHandlers: successfully registered 
event handler for %s (GVK: %v)", schemaID, gvk)
        }
-       log.Infof("initRegistryEventHandlers: successfully registered event 
handlers for %d schemas", registeredCount)
+       log.Debugf("initRegistryEventHandlers: successfully registered event 
handlers for %d schemas", registeredCount)
 }
 
 func (s *Server) addReadinessProbe(name string, fn readinessProbe) {
diff --git a/dubbod/planet/pkg/bootstrap/webhook.go 
b/dubbod/planet/pkg/bootstrap/webhook.go
index 19b030ba..14c2ee0c 100644
--- a/dubbod/planet/pkg/bootstrap/webhook.go
+++ b/dubbod/planet/pkg/bootstrap/webhook.go
@@ -64,6 +64,5 @@ func (s *Server) initSecureWebhookServer(args *PlanetArgs) {
                TLSConfig: tlsConfig,
        }
 
-       // register istiodReadyHandler on the httpsMux so that readiness can 
also be checked remotely
        s.httpsMux.HandleFunc("/ready", s.dubbodReadyHandler)
 }
diff --git a/dubbod/planet/pkg/config/aggregate/config.go 
b/dubbod/planet/pkg/config/aggregate/config.go
index f90702a8..7d3af34b 100644
--- a/dubbod/planet/pkg/config/aggregate/config.go
+++ b/dubbod/planet/pkg/config/aggregate/config.go
@@ -160,18 +160,18 @@ func makeStore(stores []model.ConfigStore, writer 
model.ConfigStore) (model.Conf
 
 func (cr *storeCache) RegisterEventHandler(kind config.GroupVersionKind, 
handler model.EventHandler) {
        log := log.RegisterScope("aggregate", "aggregate config controller")
-       log.Infof("RegisterEventHandler: registering handler for %v across %d 
caches", kind, len(cr.caches))
+       log.Debugf("RegisterEventHandler: registering handler for %v across %d 
caches", kind, len(cr.caches))
        registeredCount := 0
        for i, cache := range cr.caches {
                if _, exists := cache.Schemas().FindByGroupVersionKind(kind); 
exists {
-                       log.Infof("RegisterEventHandler: registering handler 
for %v on cache[%d] (type=%T)", kind, i, cache)
+                       log.Debugf("RegisterEventHandler: registering handler 
for %v on cache[%d] (type=%T)", kind, i, cache)
                        cache.RegisterEventHandler(kind, handler)
                        registeredCount++
                } else {
                        log.Debugf("RegisterEventHandler: cache[%d] does not 
support %v, skipping", i, kind)
                }
        }
-       log.Infof("RegisterEventHandler: successfully registered handler for %v 
on %d caches", kind, registeredCount)
+       log.Debugf("RegisterEventHandler: successfully registered handler for 
%v on %d caches", kind, registeredCount)
 }
 
 func (cr *storeCache) Run(stop <-chan struct{}) {
diff --git a/dubbod/planet/pkg/config/kube/crdclient/client.go 
b/dubbod/planet/pkg/config/kube/crdclient/client.go
index e4cab973..f76b4f44 100644
--- a/dubbod/planet/pkg/config/kube/crdclient/client.go
+++ b/dubbod/planet/pkg/config/kube/crdclient/client.go
@@ -153,15 +153,15 @@ func (cl *Client) allKinds() 
map[config.GroupVersionKind]nsStore {
 }
 
 func (cl *Client) addCRD(name string, opts krt.OptionsBuilder) {
-       cl.logger.Infof("addCRD: adding CRD %q", name)
+       cl.logger.Debugf("addCRD: adding CRD %q", name)
        s, f := cl.schemasByCRDName[name]
        if !f {
-               cl.logger.Warnf("addCRD: added resource that we are not 
watching: %v", name)
+               cl.logger.Debugf("addCRD: added resource that we are not 
watching: %v", name)
                return
        }
        resourceGVK := s.GroupVersionKind()
        gvr := s.GroupVersionResource()
-       cl.logger.Infof("addCRD: CRD %q maps to GVK %v, GVR %v", name, 
resourceGVK, gvr)
+       cl.logger.Debugf("addCRD: CRD %q maps to GVK %v, GVR %v", name, 
resourceGVK, gvr)
 
        cl.kindsMu.Lock()
        defer cl.kindsMu.Unlock()
@@ -191,9 +191,9 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
        var namespaceFilter kubetypes.DynamicObjectFilter
        if !s.IsClusterScoped() {
                namespaceFilter = cl.client.ObjectFilter()
-               cl.logger.Infof("addCRD: using namespace filter for %v (not 
cluster-scoped)", resourceGVK)
+               cl.logger.Debugf("addCRD: using namespace filter for %v (not 
cluster-scoped)", resourceGVK)
        } else {
-               cl.logger.Infof("addCRD: no namespace filter for %v 
(cluster-scoped)", resourceGVK)
+               cl.logger.Debugf("addCRD: no namespace filter for %v 
(cluster-scoped)", resourceGVK)
        }
 
        filter := kubetypes.Filter{
@@ -201,7 +201,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
                ObjectTransform: transform,
                FieldSelector:   fieldSelector,
        }
-       cl.logger.Infof("addCRD: created filter for %v (namespaceFilter=%v, 
extraFilter=%v, fieldSelector=%v)", resourceGVK, namespaceFilter != nil, 
extraFilter != nil, fieldSelector)
+       cl.logger.Debugf("addCRD: created filter for %v (namespaceFilter=%v, 
extraFilter=%v, fieldSelector=%v)", resourceGVK, namespaceFilter != nil, 
extraFilter != nil, fieldSelector)
 
        var kc kclient.Untyped
        if s.IsBuiltin() {
@@ -212,7 +212,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
                informerType := kubetypes.StandardInformer
                if resourceGVK == gvk.SubsetRule || resourceGVK == 
gvk.ServiceRoute || resourceGVK == gvk.PeerAuthentication {
                        informerType = kubetypes.DynamicInformer
-                       cl.logger.Infof("addCRD: using DynamicInformer for %v 
(uses Dynamic client)", resourceGVK)
+                       cl.logger.Debugf("addCRD: using DynamicInformer for %v 
(uses Dynamic client)", resourceGVK)
                }
                kc = kclient.NewDelayedInformer[controllers.Object](
                        cl.client,
@@ -235,7 +235,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
        // This helps diagnose if events are being filtered before reaching the 
collection
        wrappedClientDebugHandler := wrappedClient.RegisterBatch(func(o 
[]krt.Event[controllers.Object]) {
                if len(o) > 0 {
-                       cl.logger.Infof("addCRD: wrappedClient event detected 
for %v: %d events", resourceGVK, len(o))
+                       cl.logger.Debugf("addCRD: wrappedClient event detected 
for %v: %d events", resourceGVK, len(o))
                        for i, event := range o {
                                var nameStr, nsStr string
                                if event.New != nil {
@@ -247,7 +247,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
                                        nameStr = obj.GetName()
                                        nsStr = obj.GetNamespace()
                                }
-                               cl.logger.Infof("addCRD: wrappedClient 
event[%d] %s for %v (name=%s/%s)",
+                               cl.logger.Debugf("addCRD: wrappedClient 
event[%d] %s for %v (name=%s/%s)",
                                        i, event.Event, resourceGVK, nsStr, 
nameStr)
                        }
                }
@@ -257,7 +257,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
        // Use false to match Istio's implementation - only process future 
events, not initial sync
        debugHandler := collection.RegisterBatch(func(o 
[]krt.Event[config.Config]) {
                if len(o) > 0 {
-                       cl.logger.Infof("addCRD: collection event detected for 
%v: %d events", resourceGVK, len(o))
+                       cl.logger.Debugf("addCRD: collection event detected for 
%v: %d events", resourceGVK, len(o))
                        for i, event := range o {
                                var nameStr, nsStr string
                                if event.New != nil {
@@ -267,7 +267,7 @@ func (cl *Client) addCRD(name string, opts 
krt.OptionsBuilder) {
                                        nameStr = event.Old.Name
                                        nsStr = event.Old.Namespace
                                }
-                               cl.logger.Infof("addCRD: collection event[%d] 
%s for %v (name=%s/%s)",
+                               cl.logger.Debugf("addCRD: collection event[%d] 
%s for %v (name=%s/%s)",
                                        i, event.Event, resourceGVK, nsStr, 
nameStr)
                        }
                }
@@ -303,13 +303,13 @@ func (cl *Client) RegisterEventHandler(kind 
config.GroupVersionKind, handler mod
                return
        }
 
-       cl.logger.Infof("RegisterEventHandler: registering handler for %v", 
kind)
+       cl.logger.Debugf("RegisterEventHandler: registering handler for %v", 
kind)
        // Match Istio's implementation: RegisterBatch returns a 
HandlerRegistration that is already
        // registered with the collection, so we just need to append it to 
handlers to keep a reference
        // The handler will be called by the collection when events occur, 
regardless of whether we
        // update cl.kinds[kind] or not. However, we update it to keep the 
handlers slice in sync.
        handlerReg := c.collection.RegisterBatch(func(o 
[]krt.Event[config.Config]) {
-               cl.logger.Infof("RegisterEventHandler: batch handler triggered 
for %v with %d events", kind, len(o))
+               cl.logger.Debugf("RegisterEventHandler: batch handler triggered 
for %v with %d events", kind, len(o))
                for i, event := range o {
                        var nameStr, nsStr string
                        if event.New != nil {
@@ -319,7 +319,7 @@ func (cl *Client) RegisterEventHandler(kind 
config.GroupVersionKind, handler mod
                                nameStr = event.Old.Name
                                nsStr = event.Old.Namespace
                        }
-                       cl.logger.Infof("RegisterEventHandler: processing 
event[%d] %s for %v (name=%s/%s)",
+                       cl.logger.Debugf("RegisterEventHandler: processing 
event[%d] %s for %v (name=%s/%s)",
                                i, event.Event, kind, nsStr, nameStr)
                        switch event.Event {
                        case controllers.EventAdd:
@@ -346,7 +346,7 @@ func (cl *Client) RegisterEventHandler(kind 
config.GroupVersionKind, handler mod
        // Update handlers slice to keep reference (though not strictly 
necessary for functionality)
        c.handlers = append(c.handlers, handlerReg)
        cl.kinds[kind] = c
-       cl.logger.Infof("RegisterEventHandler: successfully registered handler 
for %v", kind)
+       cl.logger.Debugf("RegisterEventHandler: successfully registered handler 
for %v", kind)
 }
 
 func (cl *Client) Get(typ config.GroupVersionKind, name, namespace string) 
*config.Config {
@@ -438,26 +438,26 @@ func (cl *Client) List(kind config.GroupVersionKind, 
namespace string) []config.
        if namespace == metav1.NamespaceAll {
                // Get all configs from collection
                configs = h.collection.List()
-               cl.logger.Infof("List: found %d configs for %v (namespace=all, 
synced=%v)",
+               cl.logger.Debugf("List: found %d configs for %v (namespace=all, 
synced=%v)",
                        len(configs), kind, h.collection.HasSynced())
                if len(configs) > 0 {
                        for i, cfg := range configs {
-                               cl.logger.Infof("List: config[%d] %s/%s for 
%v", i, cfg.Namespace, cfg.Name, kind)
+                               cl.logger.Debugf("List: config[%d] %s/%s for 
%v", i, cfg.Namespace, cfg.Name, kind)
                        }
                } else {
-                       cl.logger.Warnf("List: collection returned 0 configs 
for %v (synced=%v), this may indicate informer is not watching correctly or 
resources are being filtered", kind, h.collection.HasSynced())
+                       cl.logger.Debugf("List: collection returned 0 configs 
for %v (synced=%v), this may indicate informer is not watching correctly or 
resources are being filtered", kind, h.collection.HasSynced())
                }
                // Log collection type for diagnosis
-               cl.logger.Infof("List: collection type is %T, HasSynced=%v", 
h.collection, h.collection.HasSynced())
+               cl.logger.Debugf("List: collection type is %T, HasSynced=%v", 
h.collection, h.collection.HasSynced())
        } else {
                configs = h.index.Lookup(namespace)
-               cl.logger.Infof("List: found %d configs for %v in namespace %s 
(synced=%v)", len(configs), kind, namespace, h.collection.HasSynced())
+               cl.logger.Debugf("List: found %d configs for %v in namespace %s 
(synced=%v)", len(configs), kind, namespace, h.collection.HasSynced())
                if len(configs) > 0 {
                        for i, cfg := range configs {
-                               cl.logger.Infof("List: config[%d] %s/%s for 
%v", i, cfg.Namespace, cfg.Name, kind)
+                               cl.logger.Debugf("List: config[%d] %s/%s for 
%v", i, cfg.Namespace, cfg.Name, kind)
                        }
                } else {
-                       cl.logger.Warnf("List: found 0 configs for %v in 
namespace %s (synced=%v), checking if resources exist in cluster", kind, 
namespace, h.collection.HasSynced())
+                       cl.logger.Debugf("List: found 0 configs for %v in 
namespace %s (synced=%v), checking if resources exist in cluster", kind, 
namespace, h.collection.HasSynced())
                }
        }
 
diff --git a/dubbod/planet/pkg/features/experimental.go 
b/dubbod/planet/pkg/features/experimental.go
index d45339d7..2c08b320 100644
--- a/dubbod/planet/pkg/features/experimental.go
+++ b/dubbod/planet/pkg/features/experimental.go
@@ -24,7 +24,7 @@ var (
                "The name of the ConfigMap that stores the Root CA Certificate 
that is used by dubbod").Get()
        EnableLeaderElection = env.Register("ENABLE_LEADER_ELECTION", true,
                "If enabled (default), starts a leader election client and 
gains leadership before executing controllers. "+
-                       "If false, it assumes that only one instance of istiod 
is running and skips leader election.").Get()
+                       "If false, it assumes that only one instance of dubbod 
is running and skips leader election.").Get()
        EnableEnhancedSubsetRuleMerge = 
env.Register("ENABLE_ENHANCED_DESTINATIONRULE_MERGE", true,
                "If enabled, Dubbo merge subsetrules considering their exportTo 
fields,"+
                        " they will be kept as independent rules if the 
exportTos are not equal.").Get()
diff --git a/dubbod/planet/pkg/grpc/tls.go b/dubbod/planet/pkg/grpc/tls.go
index 9ffb8b81..83671ba2 100644
--- a/dubbod/planet/pkg/grpc/tls.go
+++ b/dubbod/planet/pkg/grpc/tls.go
@@ -80,7 +80,7 @@ func getTLSDialOption(opts *TLSOptions) (grpc.DialOption, 
error) {
        }
        // For debugging on localhost (with port forward)
        if strings.Contains(config.ServerName, "localhost") {
-               config.ServerName = "istiod.istio-system.svc"
+               config.ServerName = "dubbod.dubbo-system.svc"
        }
        if opts.SAN != "" {
                config.ServerName = opts.SAN
diff --git a/dubbod/planet/pkg/model/push_context.go 
b/dubbod/planet/pkg/model/push_context.go
index 625b12ab..e022de56 100644
--- a/dubbod/planet/pkg/model/push_context.go
+++ b/dubbod/planet/pkg/model/push_context.go
@@ -512,7 +512,7 @@ func (ps *PushContext) initServiceRegistry(env 
*Environment, configsUpdate sets.
 }
 
 func (ps *PushContext) createNewContext(env *Environment) {
-       log.Infof("createNewContext: creating new PushContext (full 
initialization)")
+       log.Debug("createNewContext: creating new PushContext (full 
initialization)")
        ps.initServiceRegistry(env, nil)
        ps.initServiceRoutes(env)
        ps.initSubsetRules(env)
@@ -541,7 +541,7 @@ func (ps *PushContext) updateContext(env *Environment, 
oldPushContext *PushConte
                        }
                }
                if serviceRouteCount > 0 {
-                       log.Infof("updateContext: detected %d ServiceRoute 
config changes", serviceRouteCount)
+                       log.Debugf("updateContext: detected %d ServiceRoute 
config changes", serviceRouteCount)
                }
        }
 
@@ -559,10 +559,10 @@ func (ps *PushContext) updateContext(env *Environment, 
oldPushContext *PushConte
                        }
                }
                if subsetRuleCount > 0 {
-                       log.Infof("updateContext: detected %d SubsetRule config 
changes", subsetRuleCount)
+                       log.Debugf("updateContext: detected %d SubsetRule 
config changes", subsetRuleCount)
                }
                if pushReq.Full {
-                       log.Infof("updateContext: Full push requested, will 
re-initialize SubsetRule and ServiceRoute indexes")
+                       log.Debugf("updateContext: Full push requested, will 
re-initialize SubsetRule and ServiceRoute indexes")
                }
                log.Debugf("updateContext: subsetRulesChanged=%v, 
serviceRoutesChanged=%v, pushReq.ConfigsUpdated size=%d, Full=%v",
                        subsetRulesChanged, serviceRoutesChanged, 
len(pushReq.ConfigsUpdated), pushReq != nil && pushReq.Full)
@@ -593,7 +593,7 @@ func (ps *PushContext) updateContext(env *Environment, 
oldPushContext *PushConte
        }
 
        if serviceRoutesChanged {
-               log.Infof("updateContext: ServiceRoutes changed, 
re-initializing ServiceRoute index")
+               log.Debugf("updateContext: ServiceRoutes changed, 
re-initializing ServiceRoute index")
                ps.initServiceRoutes(env)
        } else {
                log.Debugf("updateContext: ServiceRoutes unchanged, reusing old 
ServiceRoute index")
@@ -601,7 +601,7 @@ func (ps *PushContext) updateContext(env *Environment, 
oldPushContext *PushConte
        }
 
        if subsetRulesChanged {
-               log.Infof("updateContext: SubsetRules changed, re-initializing 
SubsetRule index")
+               log.Debugf("updateContext: SubsetRules changed, re-initializing 
SubsetRule index")
                ps.initSubsetRules(env)
        } else {
                log.Debugf("updateContext: SubsetRules unchanged, reusing old 
SubsetRule index")
@@ -610,7 +610,7 @@ func (ps *PushContext) updateContext(env *Environment, 
oldPushContext *PushConte
 
        authnPoliciesChanged := pushReq != nil && (pushReq.Full || 
HasConfigsOfKind(pushReq.ConfigsUpdated, kind.PeerAuthentication))
        if authnPoliciesChanged || oldPushContext == nil || 
oldPushContext.AuthenticationPolicies == nil {
-               log.Infof("updateContext: PeerAuthentication changed (full=%v, 
configsUpdatedContainingPeerAuth=%v), rebuilding authentication policies",
+               log.Debugf("updateContext: PeerAuthentication changed (full=%v, 
configsUpdatedContainingPeerAuth=%v), rebuilding authentication policies",
                        pushReq != nil && pushReq.Full, func() bool {
                                if pushReq == nil {
                                        return false
@@ -693,16 +693,16 @@ func (ps *PushContext) GetAllServices() []*Service {
 }
 
 func (ps *PushContext) initServiceRoutes(env *Environment) {
-       log.Infof("initServiceRoutes: starting ServiceRoute initialization")
+       log.Debugf("initServiceRoutes: starting ServiceRoute initialization")
        ps.serviceRouteIndex.referencedDestinations = map[string]sets.String{}
        serviceroutes := env.List(gvk.ServiceRoute, NamespaceAll)
-       log.Infof("initServiceRoutes: found %d ServiceRoute configs", 
len(serviceroutes))
+       log.Debugf("initServiceRoutes: found %d ServiceRoute configs", 
len(serviceroutes))
        sroutes := make([]config.Config, len(serviceroutes))
 
        for i, r := range serviceroutes {
                sroutes[i] = resolveServiceRouteShortnames(r)
                if vs, ok := r.Spec.(*networking.VirtualService); ok {
-                       log.Infof("initServiceRoutes: ServiceRoute %s/%s with 
hosts %v and %d HTTP routes",
+                       log.Debugf("initServiceRoutes: ServiceRoute %s/%s with 
hosts %v and %d HTTP routes",
                                r.Namespace, r.Name, vs.Hosts, len(vs.Http))
                }
        }
@@ -720,7 +720,7 @@ func (ps *PushContext) initServiceRoutes(env *Environment) {
                }
        }
        ps.serviceRouteIndex.hostToRoutes = hostToRoutes
-       log.Infof("initServiceRoutes: indexed ServiceRoutes for %d hostnames", 
len(hostToRoutes))
+       log.Debugf("initServiceRoutes: indexed ServiceRoutes for %d hostnames", 
len(hostToRoutes))
 }
 
 // sortConfigBySelectorAndCreationTime sorts the list of config objects based 
on priority and creation time.
@@ -810,7 +810,7 @@ func (ps *PushContext) setSubsetRules(configs 
[]config.Config) {
        ps.subsetRuleIndex.rootNamespaceLocal = rootNamespaceLocalDestRules
 
        // Log indexing results
-       log.Infof("setSubsetRules: indexed %d namespaces with local rules", 
len(namespaceLocalSubRules))
+       log.Debugf("setSubsetRules: indexed %d namespaces with local rules", 
len(namespaceLocalSubRules))
        for ns, rules := range namespaceLocalSubRules {
                totalRules := 0
                for hostname, ruleList := range rules.specificSubRules {
@@ -823,26 +823,26 @@ func (ps *PushContext) setSubsetRules(configs 
[]config.Config) {
                                        if hasTLS {
                                                tlsMode = 
dr.TrafficPolicy.Tls.Mode.String()
                                        }
-                                       log.Infof("setSubsetRules: namespace 
%s, hostname %s: DestinationRule has %d subsets, TLS mode: %s",
+                                       log.Debugf("setSubsetRules: namespace 
%s, hostname %s: DestinationRule has %d subsets, TLS mode: %s",
                                                ns, hostname, len(dr.Subsets), 
tlsMode)
                                }
                        }
                }
-               log.Infof("setSubsetRules: namespace %s has %d DestinationRules 
with %d specific hostnames", ns, totalRules, len(rules.specificSubRules))
+               log.Debugf("setSubsetRules: namespace %s has %d 
DestinationRules with %d specific hostnames", ns, totalRules, 
len(rules.specificSubRules))
        }
-       log.Infof("setSubsetRules: indexed %d namespaces with exported rules", 
len(exportedDestRulesByNamespace))
+       log.Debugf("setSubsetRules: indexed %d namespaces with exported rules", 
len(exportedDestRulesByNamespace))
        if rootNamespaceLocalDestRules != nil {
                totalRootRules := 0
                for _, ruleList := range 
rootNamespaceLocalDestRules.specificSubRules {
                        totalRootRules += len(ruleList)
                }
-               log.Infof("setSubsetRules: root namespace has %d 
DestinationRules with %d specific hostnames", totalRootRules, 
len(rootNamespaceLocalDestRules.specificSubRules))
+               log.Debugf("setSubsetRules: root namespace has %d 
DestinationRules with %d specific hostnames", totalRootRules, 
len(rootNamespaceLocalDestRules.specificSubRules))
        }
 }
 
 func (ps *PushContext) initSubsetRules(env *Environment) {
        configs := env.List(gvk.SubsetRule, NamespaceAll)
-       log.Infof("initSubsetRules: found %d SubsetRule configs", len(configs))
+       log.Debugf("initSubsetRules: found %d SubsetRule configs", len(configs))
 
        // values returned from ConfigStore.List are immutable.
        // Therefore, we make a copy
@@ -854,7 +854,7 @@ func (ps *PushContext) initSubsetRules(env *Environment) {
                        if dr.TrafficPolicy != nil && dr.TrafficPolicy.Tls != 
nil {
                                tlsMode = dr.TrafficPolicy.Tls.Mode.String()
                        }
-                       log.Infof("initSubsetRules: SubsetRule %s/%s for host 
%s with %d subsets, TLS mode: %s",
+                       log.Debugf("initSubsetRules: SubsetRule %s/%s for host 
%s with %d subsets, TLS mode: %s",
                                configs[i].Namespace, configs[i].Name, dr.Host, 
len(dr.Subsets), tlsMode)
                }
        }
diff --git a/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go 
b/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
index 059800e9..8373f0c3 100644
--- a/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
+++ b/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
@@ -544,13 +544,13 @@ func (esc *endpointSliceController) pushEDS(hostnames 
[]host.Name, namespace str
 
        for _, hostname := range hostnames {
                endpoints := esc.endpointCache.get(hostname)
-               log.Infof("pushEDS: registering %d endpoints for service %s in 
namespace %s (shard=%v)",
+               log.Debugf("pushEDS: registering %d endpoints for service %s in 
namespace %s (shard=%v)",
                        len(endpoints), string(hostname), namespace, shard)
                if len(endpoints) > 0 {
                        // Log endpoint details for first few endpoints
                        for i, ep := range endpoints {
                                if i < 3 {
-                                       log.Infof("pushEDS: endpoint[%d] 
address=%s, port=%d, ServicePortName='%s', HealthStatus=%v",
+                                       log.Debugf("pushEDS: endpoint[%d] 
address=%s, port=%d, ServicePortName='%s', HealthStatus=%v",
                                                i, ep.FirstAddressOrNil(), 
ep.EndpointPort, ep.ServicePortName, ep.HealthStatus)
                                }
                        }
diff --git a/dubbod/planet/pkg/xds/discovery.go 
b/dubbod/planet/pkg/xds/discovery.go
index de867d96..84d3be99 100644
--- a/dubbod/planet/pkg/xds/discovery.go
+++ b/dubbod/planet/pkg/xds/discovery.go
@@ -393,7 +393,7 @@ func (s *DiscoveryServer) EDSUpdate(shard model.ShardKey, 
serviceName string, na
        // 2. Endpoints become unavailable (from non-empty to empty)
        // 3. Endpoint health status changes
        if pushType == model.IncrementalPush || pushType == model.FullPush {
-               log.Infof("EDSUpdate: service %s/%s triggering %v push 
(endpoints=%d)", namespace, serviceName, pushType, len(dubboEndpoints))
+               log.Debugf("EDSUpdate: service %s/%s triggering %v push 
(endpoints=%d)", namespace, serviceName, pushType, len(dubboEndpoints))
                s.ConfigUpdate(&model.PushRequest{
                        Full:           pushType == model.FullPush,
                        ConfigsUpdated: sets.New(model.ConfigKey{Kind: 
kind.ServiceEntry, Name: serviceName, Namespace: namespace}),
@@ -406,7 +406,7 @@ func (s *DiscoveryServer) EDSUpdate(shard model.ShardKey, 
serviceName string, na
                // 2. But we still need to notify clients about the current 
state
                // For proxyless gRPC, we should push even if endpoints are 
empty to ensure clients know the state
                if len(dubboEndpoints) == 0 {
-                       log.Infof("EDSUpdate: service %s/%s endpoints became 
empty (NoPush), forcing push to clear client cache", namespace, serviceName)
+                       log.Debugf("EDSUpdate: service %s/%s endpoints became 
empty (NoPush), forcing push to clear client cache", namespace, serviceName)
                        s.ConfigUpdate(&model.PushRequest{
                                Full:           false, // Incremental push
                                ConfigsUpdated: sets.New(model.ConfigKey{Kind: 
kind.ServiceEntry, Name: serviceName, Namespace: namespace}),
diff --git a/dubbod/security/pkg/credentialfetcher/plugin/token.go 
b/dubbod/security/pkg/credentialfetcher/plugin/token.go
index aca88274..d434116f 100644
--- a/dubbod/security/pkg/credentialfetcher/plugin/token.go
+++ b/dubbod/security/pkg/credentialfetcher/plugin/token.go
@@ -52,5 +52,4 @@ func (t KubernetesTokenPlugin) GetIdentityProvider() string {
        return ""
 }
 
-func (t KubernetesTokenPlugin) Stop() {
-}
+func (t KubernetesTokenPlugin) Stop() {}
diff --git a/dubbod/security/pkg/nodeagent/cache/secretcache.go 
b/dubbod/security/pkg/nodeagent/cache/secretcache.go
index 954b33d9..8ccf085a 100644
--- a/dubbod/security/pkg/nodeagent/cache/secretcache.go
+++ b/dubbod/security/pkg/nodeagent/cache/secretcache.go
@@ -244,7 +244,7 @@ func (sc *SecretManagerClient) getCachedSecret(resourceName 
string) (secret *sec
                                ResourceName: resourceName,
                                RootCert:     rootCertBundle,
                        }
-                       cacheLog.Infof("returned workload trust anchor from 
cache (ttl=%v)", time.Until(c.ExpireTime))
+                       cacheLog.Debugf("returned workload trust anchor from 
cache (ttl=%v)", time.Until(c.ExpireTime))
                } else {
                        ns = &security.SecretItem{
                                ResourceName:     resourceName,
@@ -253,7 +253,7 @@ func (sc *SecretManagerClient) getCachedSecret(resourceName 
string) (secret *sec
                                ExpireTime:       c.ExpireTime,
                                CreatedTime:      c.CreatedTime,
                        }
-                       cacheLog.Infof("returned workload certificate from 
cache (ttl=%v)", time.Until(c.ExpireTime))
+                       cacheLog.Debugf("returned workload certificate from 
cache (ttl=%v)", time.Until(c.ExpireTime))
                }
 
                return ns
@@ -314,7 +314,7 @@ func (sc *SecretManagerClient) 
generateNewSecret(resourceName string) (*security
        cacheLog.WithLabels("resourceName", resourceName,
                "latency", time.Since(t0),
                "ttl", time.Until(expireTime)).
-               Info("generated new workload certificate")
+               Debug("generated new workload certificate")
 
        if len(trustBundlePEM) > 0 {
                rootCertPEM = concatCerts(trustBundlePEM)
diff --git a/dubbod/security/pkg/nodeagent/caclient/providers/aegis/client.go 
b/dubbod/security/pkg/nodeagent/caclient/providers/aegis/client.go
index 71f34b19..cbf9ad25 100644
--- a/dubbod/security/pkg/nodeagent/caclient/providers/aegis/client.go
+++ b/dubbod/security/pkg/nodeagent/caclient/providers/aegis/client.go
@@ -81,11 +81,7 @@ func (c *AegisClient) CSRSign(csrPEM []byte, 
certValidTTLInSec int64) (res []str
                ValidityDuration: certValidTTLInSec,
                Metadata:         crMetaStruct,
        }
-       // TODO(hzxuzhonghu): notify caclient rebuilding only when root cert is 
updated.
-       // It can happen when the istiod dns certs is resigned after root cert 
is updated,
-       // in this case, the ca grpc client can not automatically connect to 
istiod after the underlying network connection closed.
-       // Becase that the grpc client still use the old tls configuration to 
reconnect to istiod.
-       // So here we need to rebuild the caClient in order to use the new root 
cert.
+
        defer func() {
                if err != nil {
                        aegisClientLog.Errorf("failed to sign CSR: %v", err)
diff --git 
a/manifests/charts/dubbo-control/dubbo-discovery/templates/mutatingwebhook.yaml 
b/manifests/charts/dubbo-control/dubbo-discovery/templates/mutatingwebhook.yaml
index 5ec98004..47541bbd 100644
--- 
a/manifests/charts/dubbo-control/dubbo-discovery/templates/mutatingwebhook.yaml
+++ 
b/manifests/charts/dubbo-control/dubbo-discovery/templates/mutatingwebhook.yaml
@@ -1,9 +1,7 @@
----
 apiVersion: admissionregistration.k8s.io/v1
 kind: MutatingWebhookConfiguration
 metadata:
   name: dubbo-grpcxds-injector
-  namespace: default
   labels:
     app: dubbod
     dubbo.apache.org/rev: default
diff --git 
a/manifests/charts/dubbo-control/dubbo-discovery/templates/validatingwebhookconfiguration.yaml
 
b/manifests/charts/dubbo-control/dubbo-discovery/templates/validatingwebhookconfiguration.yaml
index 34c303bd..98e9eae1 100644
--- 
a/manifests/charts/dubbo-control/dubbo-discovery/templates/validatingwebhookconfiguration.yaml
+++ 
b/manifests/charts/dubbo-control/dubbo-discovery/templates/validatingwebhookconfiguration.yaml
@@ -1,9 +1,8 @@
----
+{{- if .Values.global.configValidation }}
 apiVersion: admissionregistration.k8s.io/v1
 kind: ValidatingWebhookConfiguration
 metadata:
   name: dubbo-validator-dubbo-system
-  namespace: default
   labels:
     app: dubbod
     dubbo.apache.org/rev: {{ .Values.revision | default "default" }}
@@ -37,4 +36,4 @@ webhooks:
       resources:
       - "*"
     sideEffects: None
-
+{{- end -}}
diff --git a/manifests/charts/dubbo-control/dubbo-discovery/values.yaml 
b/manifests/charts/dubbo-control/dubbo-discovery/values.yaml
index 0e67204f..9fa297d4 100644
--- a/manifests/charts/dubbo-control/dubbo-discovery/values.yaml
+++ b/manifests/charts/dubbo-control/dubbo-discovery/values.yaml
@@ -50,4 +50,6 @@ _internal_default_values_not_set:
 
     statusPort: 15020
 
-    caAddress: ""
\ No newline at end of file
+    caAddress: ""
+
+    configValidation: true
\ No newline at end of file
diff --git a/operator/pkg/manifest/manifest.go 
b/operator/pkg/manifest/manifest.go
index 606b1f77..5d020604 100644
--- a/operator/pkg/manifest/manifest.go
+++ b/operator/pkg/manifest/manifest.go
@@ -19,6 +19,7 @@ package manifest
 
 import (
        "encoding/json"
+
        "github.com/apache/dubbo-kubernetes/operator/pkg/component"
        "github.com/apache/dubbo-kubernetes/operator/pkg/util"
        "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -75,7 +76,7 @@ func (m Manifest) Hash() string {
 func ObjectHash(o *unstructured.Unstructured) string {
        k := o.GroupVersionKind().Kind
        switch o.GroupVersionKind().Kind {
-       case "ClusterRole", "ClusterRoleBinding":
+       case "ClusterRole", "ClusterRoleBinding", 
"MutatingWebhookConfiguration", "ValidatingWebhookConfiguration":
                return k + ":" + o.GetName()
        }
        return k + ":" + o.GetNamespace() + ":" + o.GetName()
diff --git a/operator/pkg/render/postprocess.go 
b/operator/pkg/render/postprocess.go
index ae211a17..80aec532 100644
--- a/operator/pkg/render/postprocess.go
+++ b/operator/pkg/render/postprocess.go
@@ -18,7 +18,9 @@
 package render
 
 import (
+       "encoding/json"
        "fmt"
+
        "github.com/apache/dubbo-kubernetes/operator/pkg/component"
        "github.com/apache/dubbo-kubernetes/operator/pkg/manifest"
        "github.com/apache/dubbo-kubernetes/operator/pkg/values"
@@ -36,6 +38,46 @@ type patchContext struct {
 func postProcess(_ component.Component, manifests []manifest.Manifest, _ 
values.Map) ([]manifest.Manifest, error) {
        // needPatching builds a map of manifest index -> patch. This ensures 
we only do the full round-tripping once per object.
        needPatching := map[int][]patchContext{}
+
+       // Remove namespace field from cluster-scoped resources
+       clusterScopedKinds := map[string]bool{
+               "MutatingWebhookConfiguration":   true,
+               "ValidatingWebhookConfiguration": true,
+               "ClusterRole":                    true,
+               "ClusterRoleBinding":             true,
+               "CustomResourceDefinition":       true,
+       }
+
+       for idx := range manifests {
+               m := manifests[idx]
+               kind := m.GroupVersionKind().Kind
+               if clusterScopedKinds[kind] && m.GetNamespace() != "" {
+                       // Remove namespace field from cluster-scoped resources
+                       baseJSON, err := yaml.YAMLToJSON([]byte(m.Content))
+                       if err != nil {
+                               return nil, err
+                       }
+                       // Parse JSON to remove namespace field
+                       var obj map[string]interface{}
+                       if err := json.Unmarshal(baseJSON, &obj); err != nil {
+                               return nil, fmt.Errorf("failed to unmarshal 
JSON: %v", err)
+                       }
+                       if metadata, ok := 
obj["metadata"].(map[string]interface{}); ok {
+                               delete(metadata, "namespace")
+                       }
+                       // Convert back to JSON
+                       newJSON, err := json.Marshal(obj)
+                       if err != nil {
+                               return nil, fmt.Errorf("failed to marshal JSON: 
%v", err)
+                       }
+                       nm, err := manifest.FromJSON(newJSON)
+                       if err != nil {
+                               return nil, err
+                       }
+                       manifests[idx] = nm
+               }
+       }
+
        // For anything needing a patch, apply them.
        for idx, patches := range needPatching {
                m := manifests[idx]
diff --git a/pkg/adsc/adsc.go b/pkg/adsc/adsc.go
index 186a3ac5..8be689b5 100644
--- a/pkg/adsc/adsc.go
+++ b/pkg/adsc/adsc.go
@@ -86,17 +86,12 @@ type Config struct {
        XDSRootCAFile string
        // XDSSAN is the expected SAN of the XDS server. If not set, the 
ProxyConfig.DiscoveryAddress is used.
        XDSSAN string
-       // InsecureSkipVerify skips client verification the server's 
certificate chain and host name.
-       InsecureSkipVerify bool
        // Workload defaults to 'test'
        Workload string
        // Revision for this control plane instance. We will only read configs 
that match this revision.
        Revision string
        // Meta includes additional metadata for the node
        Meta *pstruct.Struct
-       // IP is currently the primary key used to locate inbound configs. It 
is sent by client,
-       // must match a known endpoint IP. Tests can use a ServiceEntry to 
register fake IPs.
-       IP string
        // BackoffPolicy determines the reconnect policy. Based on MCP client.
        BackoffPolicy backoff.BackOff
 }
diff --git a/pkg/dubbo-agent/agent.go b/pkg/dubbo-agent/agent.go
index 1fb56210..517ec0bf 100644
--- a/pkg/dubbo-agent/agent.go
+++ b/pkg/dubbo-agent/agent.go
@@ -159,7 +159,7 @@ func (a *Agent) Run(ctx context.Context) (func(), error) {
                if err != nil {
                        return nil, fmt.Errorf("failed generating gRPC XDS 
bootstrap: %v", err)
                }
-               // Prepare Node for preemptive connection, but don't set it yet
+               // Prepare Node for upstream connection, but don't set it yet
                // We'll set it after status port starts and certificates are 
generated
                if node != nil && a.xdsProxy != nil {
                        bootstrapNode = &core.Node{
@@ -220,8 +220,8 @@ func (a *Agent) Run(ctx context.Context) (func(), error) {
                }
        }()
 
-       // Now set bootstrap node to trigger preemptive connection.
-       // This ensures preemptive connection logs appear after certificate 
logs.
+       // Now set bootstrap node to trigger upstream connection.
+       // This ensures upstream connection logs appear after certificate logs.
        if bootstrapNode != nil && a.xdsProxy != nil {
                a.xdsProxy.SetBootstrapNode(bootstrapNode)
        }
diff --git a/pkg/dubbo-agent/xds_proxy.go b/pkg/dubbo-agent/xds_proxy.go
index 65027aac..ef98a95f 100644
--- a/pkg/dubbo-agent/xds_proxy.go
+++ b/pkg/dubbo-agent/xds_proxy.go
@@ -20,13 +20,14 @@ package dubboagent
 import (
        "context"
        "fmt"
-       "github.com/apache/dubbo-kubernetes/dubbod/security/pkg/pki/util"
        "math"
        "net"
        "path/filepath"
        "sync"
        "time"
 
+       "github.com/apache/dubbo-kubernetes/dubbod/security/pkg/pki/util"
+
        "github.com/apache/dubbo-kubernetes/pkg/log"
 
        dubbogrpc "github.com/apache/dubbo-kubernetes/dubbod/planet/pkg/grpc"
@@ -109,10 +110,10 @@ type XdsProxy struct {
        ecdsLastNonce             atomic.String
        initialHealthRequest      *discovery.DiscoveryRequest
        initialDeltaHealthRequest *discovery.DeltaDiscoveryRequest
-       // Preemptive connection for proxyless mode
-       bootstrapNode       *core.Node
-       preemptiveConnMutex sync.RWMutex
-       preemptiveConn      *ProxyConnection
+       // Upstream connection for proxyless mode
+       bootstrapNode      *core.Node
+       proxylessConnMutex sync.RWMutex
+       proxylessConn      *ProxyConnection
 }
 
 func initXdsProxy(ia *Agent) (*XdsProxy, error) {
@@ -170,7 +171,7 @@ func initXdsProxy(ia *Agent) (*XdsProxy, error) {
                proxyLog.Warnf("XDS proxy server stopped serving on UDS: %s", 
proxy.xdsUdsPath)
        }()
 
-       // For proxyless mode, establish a preemptive connection to upstream 
using bootstrap Node
+       // For proxyless mode, establish an upstream connection using bootstrap 
Node
        // This ensures the proxy is ready even before downstream clients 
connect
        // Use a retry loop with exponential backoff to automatically reconnect 
on failures
        ia.wg.Add(1)
@@ -179,9 +180,9 @@ func initXdsProxy(ia *Agent) (*XdsProxy, error) {
                // Wait for bootstrap Node to be set (with timeout)
                // The Node is set synchronously after bootstrap file 
generation in agent.Run()
                for i := 0; i < 50; i++ {
-                       proxy.preemptiveConnMutex.RLock()
+                       proxy.proxylessConnMutex.RLock()
                        nodeReady := proxy.bootstrapNode != nil
-                       proxy.preemptiveConnMutex.RUnlock()
+                       proxy.proxylessConnMutex.RUnlock()
                        if nodeReady {
                                break
                        }
@@ -191,9 +192,9 @@ func initXdsProxy(ia *Agent) (*XdsProxy, error) {
                        case <-time.After(100 * time.Millisecond):
                        }
                }
-               proxy.preemptiveConnMutex.RLock()
+               proxy.proxylessConnMutex.RLock()
                nodeReady := proxy.bootstrapNode != nil
-               proxy.preemptiveConnMutex.RUnlock()
+               proxy.proxylessConnMutex.RUnlock()
                if !nodeReady {
                        proxyLog.Warnf("Bootstrap Node not set after 5 seconds, 
proceeding anyway")
                }
@@ -209,10 +210,10 @@ func initXdsProxy(ia *Agent) (*XdsProxy, error) {
                        }
 
                        // Establish connection
-                       connDone, err := proxy.establishPreemptiveConnection(ia)
+                       connDone, err := proxy.establishProxylessConnection(ia)
                        if err != nil {
                                // Connection failed, log and retry with 
exponential backoff
-                               proxyLog.Warnf("Failed to establish preemptive 
upstream connection: %v, retrying in %v", err, backoff)
+                               proxyLog.Warnf("Failed to establish proxyless 
upstream connection: %v, retrying in %v", err, backoff)
 
                                select {
                                case <-proxy.stopChan:
@@ -228,13 +229,13 @@ func initXdsProxy(ia *Agent) (*XdsProxy, error) {
 
                        // Connection successful, reset backoff
                        backoff = time.Second
-                       proxyLog.Infof("Preemptive upstream connection 
established successfully")
+                       proxyLog.Infof("Proxyless upstream connection connected 
successfully")
                        // Wait for connection to terminate (connDone will be 
closed when connection dies)
                        select {
                        case <-proxy.stopChan:
                                return
                        case <-connDone:
-                               proxyLog.Warnf("Preemptive connection 
terminated, will retry")
+                               proxyLog.Warnf("Proxyless connection 
terminated, will retry")
                        }
                }
        }()
@@ -421,7 +422,7 @@ func (p *XdsProxy) handleUpstreamRequest(con 
*ProxyConnection) {
                                continue
                        }
 
-                       // forward to istiod
+                       // forward to dubbod
                        con.sendRequest(req)
                        if !initialRequestsSent.Load() && req.TypeUrl == 
model.ListenerType {
                                // fire off an initial NDS request
@@ -724,26 +725,26 @@ func (p *XdsProxy) getTLSOptions(agent *Agent) 
(*dubbogrpc.TLSOptions, error) {
 }
 
 func (p *XdsProxy) SetBootstrapNode(node *core.Node) {
-       p.preemptiveConnMutex.Lock()
+       p.proxylessConnMutex.Lock()
        p.bootstrapNode = node
-       p.preemptiveConnMutex.Unlock()
+       p.proxylessConnMutex.Unlock()
 }
 
-func (p *XdsProxy) establishPreemptiveConnection(ia *Agent) (<-chan struct{}, 
error) {
-       p.preemptiveConnMutex.Lock()
+func (p *XdsProxy) establishProxylessConnection(ia *Agent) (<-chan struct{}, 
error) {
+       p.proxylessConnMutex.Lock()
        node := p.bootstrapNode
        // Clean up old connection if it exists
-       if p.preemptiveConn != nil {
-               close(p.preemptiveConn.stopChan)
-               p.preemptiveConn = nil
+       if p.proxylessConn != nil {
+               close(p.proxylessConn.stopChan)
+               p.proxylessConn = nil
        }
-       p.preemptiveConnMutex.Unlock()
+       p.proxylessConnMutex.Unlock()
 
        if node == nil {
                return nil, fmt.Errorf("bootstrap node not available")
        }
 
-       proxyLog.Infof("Establishing preemptive upstream connection for 
proxyless mode with Node: %s", node.Id)
+       proxyLog.Infof("Connecting proxyless upstream connection with Node: 
%s", node.Id)
 
        ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
        defer cancel()
@@ -769,7 +770,7 @@ func (p *XdsProxy) establishPreemptiveConnection(ia *Agent) 
(<-chan struct{}, er
                _ = upstreamConn.Close()
                return nil, fmt.Errorf("failed to create upstream stream: %w", 
err)
        }
-       proxyLog.Infof("connected to upstream XDS server (preemptive): %s", 
p.dubbodAddress)
+       proxyLog.Infof("connected to upstream XDS server (proxyless): %s", 
p.dubbodAddress)
 
        conID := connectionNumber.Inc()
        con := &ProxyConnection{
@@ -782,9 +783,9 @@ func (p *XdsProxy) establishPreemptiveConnection(ia *Agent) 
(<-chan struct{}, er
                node:          node,
        }
 
-       p.preemptiveConnMutex.Lock()
-       p.preemptiveConn = con
-       p.preemptiveConnMutex.Unlock()
+       p.proxylessConnMutex.Lock()
+       p.proxylessConn = con
+       p.proxylessConnMutex.Unlock()
 
        // Close upstream connection when connection terminates and signal done 
channel
        go func() {
@@ -794,11 +795,11 @@ func (p *XdsProxy) establishPreemptiveConnection(ia 
*Agent) (<-chan struct{}, er
                case <-p.stopChan:
                }
                _ = upstreamConn.Close()
-               p.preemptiveConnMutex.Lock()
-               if p.preemptiveConn == con {
-                       p.preemptiveConn = nil
+               p.proxylessConnMutex.Lock()
+               if p.proxylessConn == con {
+                       p.proxylessConn = nil
                }
-               p.preemptiveConnMutex.Unlock()
+               p.proxylessConnMutex.Unlock()
                close(connDone)
        }()
 
@@ -807,14 +808,14 @@ func (p *XdsProxy) establishPreemptiveConnection(ia 
*Agent) (<-chan struct{}, er
                TypeUrl: model.ListenerType,
                Node:    node,
        }
-       proxyLog.Infof("preemptive connection sending initial LDS request with 
Node: %s", node.Id)
+       proxyLog.Infof("proxyless connection sending initial LDS request with 
Node: %s", node.Id)
        if err := upstream.Send(ldsReq); err != nil {
                _ = upstreamConn.Close()
-               p.preemptiveConnMutex.Lock()
-               if p.preemptiveConn == con {
-                       p.preemptiveConn = nil
+               p.proxylessConnMutex.Lock()
+               if p.proxylessConn == con {
+                       p.proxylessConn = nil
                }
-               p.preemptiveConnMutex.Unlock()
+               p.proxylessConnMutex.Unlock()
                close(connDone)
                return nil, fmt.Errorf("failed to send initial LDS request: 
%w", err)
        }
@@ -842,7 +843,7 @@ func (p *XdsProxy) establishPreemptiveConnection(ia *Agent) 
(<-chan struct{}, er
                                upstreamErr(con, err)
                                return
                        }
-                       proxyLog.Debugf("preemptive connection received 
response: TypeUrl=%s, Resources=%d",
+                       proxyLog.Debugf("proxyless connection received 
response: TypeUrl=%s, Resources=%d",
                                model.GetShortType(resp.TypeUrl), 
len(resp.Resources))
                        // Send ACK
                        ackReq := &discovery.DiscoveryRequest{
@@ -883,19 +884,19 @@ func (p *XdsProxy) establishPreemptiveConnection(ia 
*Agent) (<-chan struct{}, er
                }
        }()
 
-       // Return immediately - connection is established and running in 
background
+       // Return immediately - connection is connected and running in 
background
        // The connDone channel will be closed when connection terminates
        return connDone, nil
 }
 
 func (p *XdsProxy) close() {
        close(p.stopChan)
-       p.preemptiveConnMutex.Lock()
-       if p.preemptiveConn != nil {
-               close(p.preemptiveConn.stopChan)
-               p.preemptiveConn = nil
+       p.proxylessConnMutex.Lock()
+       if p.proxylessConn != nil {
+               close(p.proxylessConn.stopChan)
+               p.proxylessConn = nil
        }
-       p.preemptiveConnMutex.Unlock()
+       p.proxylessConnMutex.Unlock()
        if p.downstreamGrpcServer != nil {
                p.downstreamGrpcServer.Stop()
        }
diff --git a/pkg/dubbo-agent/xds_proxy_delta.go 
b/pkg/dubbo-agent/xds_proxy_delta.go
index e5baa5d8..78964ef2 100644
--- a/pkg/dubbo-agent/xds_proxy_delta.go
+++ b/pkg/dubbo-agent/xds_proxy_delta.go
@@ -175,7 +175,7 @@ func (p *XdsProxy) handleUpstreamDeltaRequest(con 
*ProxyConnection) {
                                continue
                        }
 
-                       // forward to istiod
+                       // forward to dubbod
                        con.sendDeltaRequest(req)
                        if !initialRequestsSent.Load() && req.TypeUrl == 
model.ListenerType {
                                // fire off an initial NDS request
diff --git a/pkg/filewatcher/filewatcher.go b/pkg/filewatcher/filewatcher.go
index 8d4d51c6..9471f4bb 100644
--- a/pkg/filewatcher/filewatcher.go
+++ b/pkg/filewatcher/filewatcher.go
@@ -1,4 +1,7 @@
 /*
+ * Portions of this file are derived from the Istio project:
+ *   https://github.com/istio/istio/blob/master/pkg/filewatcher/filewatcher.go
+ *
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
  * this work for additional information regarding copyright ownership.
diff --git a/pkg/kube/kclient/client.go b/pkg/kube/kclient/client.go
index 50363027..3d83e3d4 100644
--- a/pkg/kube/kclient/client.go
+++ b/pkg/kube/kclient/client.go
@@ -210,10 +210,10 @@ func (n *informerClient[T]) List(namespace string, 
selector klabels.Selector) []
        })
 
        if err != nil {
-               log.Warnf("informerClient.List: lister returned err for 
namespace=%s: %v", namespace, err)
+               log.Debugf("informerClient.List: lister returned err for 
namespace=%s: %v", namespace, err)
        }
        if namespace == metav1.NamespaceAll {
-               log.Infof("informerClient.List: namespace=%s, total=%d, 
filtered=%d, result=%d", namespace, totalCount, filteredCount, len(res))
+               log.Debugf("informerClient.List: namespace=%s, total=%d, 
filtered=%d, result=%d", namespace, totalCount, filteredCount, len(res))
        } else if filteredCount > 0 {
                log.Debugf("informerClient.List: filtered out %d items for 
namespace=%s (total=%d, result=%d)", filteredCount, namespace, totalCount, 
len(res))
        }
@@ -311,7 +311,7 @@ func (n *informerClient[T]) AddEventHandler(h 
cache.ResourceEventHandler) cache.
                        allowed := n.filter(cast)
                        if !allowed {
                                // Log when objects are filtered out to help 
diagnose missing events
-                               log.Infof("informerClient.AddEventHandler: 
FilterFunc filtered out object %s/%s", nsStr, nameStr)
+                               log.Debugf("informerClient.AddEventHandler: 
FilterFunc filtered out object %s/%s", nsStr, nameStr)
                        } else {
                                log.Debugf("informerClient.AddEventHandler: 
FilterFunc allowing object %s/%s", nsStr, nameStr)
                        }
@@ -327,7 +327,7 @@ func (n *informerClient[T]) AddEventHandler(h 
cache.ResourceEventHandler) cache.
                                        nsStr = objWithNs.GetNamespace()
                                        nameStr = objWithNs.GetName()
                                }
-                               log.Infof("informerClient.AddEventHandler: 
OnAdd called for %s/%s", nsStr, nameStr)
+                               log.Debugf("informerClient.AddEventHandler: 
OnAdd called for %s/%s", nsStr, nameStr)
                                h.OnAdd(obj, false)
                        },
                        UpdateFunc: func(oldObj, newObj interface{}) {
@@ -339,7 +339,7 @@ func (n *informerClient[T]) AddEventHandler(h 
cache.ResourceEventHandler) cache.
                                        nsStr = objWithNs.GetNamespace()
                                        nameStr = objWithNs.GetName()
                                }
-                               log.Infof("informerClient.AddEventHandler: 
OnUpdate called for %s/%s", nsStr, nameStr)
+                               log.Debugf("informerClient.AddEventHandler: 
OnUpdate called for %s/%s", nsStr, nameStr)
                                h.OnUpdate(oldObj, newObj)
                        },
                        DeleteFunc: func(obj interface{}) {
@@ -351,7 +351,7 @@ func (n *informerClient[T]) AddEventHandler(h 
cache.ResourceEventHandler) cache.
                                        nsStr = objWithNs.GetNamespace()
                                        nameStr = objWithNs.GetName()
                                }
-                               log.Infof("informerClient.AddEventHandler: 
OnDelete called for %s/%s", nsStr, nameStr)
+                               log.Debugf("informerClient.AddEventHandler: 
OnDelete called for %s/%s", nsStr, nameStr)
                                h.OnDelete(obj)
                        },
                },
diff --git a/pkg/kube/multicluster/cluster.go b/pkg/kube/multicluster/cluster.go
index 72790127..d9cf660b 100644
--- a/pkg/kube/multicluster/cluster.go
+++ b/pkg/kube/multicluster/cluster.go
@@ -40,7 +40,7 @@ type Cluster struct {
 func (c *Cluster) HasSynced() bool {
        // It could happen when a wrong credential provide, this cluster has no 
chance to run.
        // In this case, the `initialSyncTimeout` will never be set
-       // In order not block istiod start up, check close as well.
+       // In order not block dubbod start up, check close as well.
        if c.Closed() {
                return true
        }
diff --git a/pkg/webhooks/server/server.go b/pkg/webhooks/server/server.go
index 98faa5bd..83784c73 100644
--- a/pkg/webhooks/server/server.go
+++ b/pkg/webhooks/server/server.go
@@ -124,8 +124,6 @@ func (wh *Webhook) validate(request *kube.AdmissionRequest) 
*kube.AdmissionRespo
 
        gvk := obj.GroupVersionKind()
 
-       // "Version" is not relevant for Istio types; each version has the same 
schema. So do a lookup that does not consider
-       // version. This ensures if a new version comes out and Istiod is not 
updated, we won't reject it.
        s, exists := 
wh.schemas.FindByGroupKind(collection.FromKubernetesGVK(&gvk))
        if !exists {
                log.Infof("unrecognized type %v", 
addDryRunMessageIfNeeded(obj.GroupVersionKind().String()))
diff --git a/pkg/webhooks/webhookpatch.go b/pkg/webhooks/webhookpatch.go
index 52b5f4d9..05e55f83 100644
--- a/pkg/webhooks/webhookpatch.go
+++ b/pkg/webhooks/webhookpatch.go
@@ -114,12 +114,10 @@ func (w *WebhookCertPatcher) 
patchMutatingWebhookConfig(webhookConfigName string
        if config == nil {
                return errNotFound
        }
-       // prevents a race condition between multiple istiods when the revision 
is changed or modified
        v, ok := config.Labels["dubbo.apache.org/rev"]
        if !ok {
                return nil
        }
-       log.Infof("This is webhook label: %v", v)
 
        if v != w.revision {
                return errWrongRevision
diff --git a/pkg/xds/server.go b/pkg/xds/server.go
index dfadb39b..a2671236 100644
--- a/pkg/xds/server.go
+++ b/pkg/xds/server.go
@@ -288,8 +288,8 @@ func ShouldRespond(w Watcher, id string, request 
*discovery.DiscoveryRequest) (b
 
        previousInfo := w.GetWatchedResource(request.TypeUrl)
        // This can happen in two cases:
-       // 1. When Envoy starts for the first time, it sends an initial 
Discovery request to Istiod.
-       // 2. When Envoy reconnects to a new Istiod that does not have 
information about this typeUrl
+       // 1. When an xDS client (Envoy, gRPC xDS client, etc.) starts for the 
first time, it sends an initial Discovery request.
+       // 2. When an xDS client reconnects to a new control plane that does 
not have information about this typeUrl
        // i.e. non empty response nonce.
        // We should always respond with the current resource names.
        if previousInfo == nil {
@@ -320,11 +320,11 @@ func ShouldRespond(w Watcher, id string, request 
*discovery.DiscoveryRequest) (b
        }
 
        // If there is mismatch in the nonce, that is a case of expired/stale 
nonce.
-       // A nonce becomes stale following a newer nonce being sent to Envoy.
+       // A nonce becomes stale following a newer nonce being sent to the xDS 
client.
        // previousInfo.NonceSent can be empty if we previously had 
shouldRespond=true but didn't send any resources.
        if request.ResponseNonce != previousInfo.NonceSent {
                newResources := sets.New(request.ResourceNames...)
-               // Special-case proxyless gRPC: Envoy will send a "stale" nonce 
when it changes
+               // Special-case proxyless gRPC: xDS clients may send a "stale" 
nonce when they change
                // subscriptions (e.g., after ServiceRoute introduces subset 
clusters). Treat this
                // as a resource change rather than an ACK so the new clusters 
get a response.
                previousResourcesCopy := previousInfo.ResourceNames.Copy()
@@ -378,8 +378,8 @@ func ShouldRespond(w Watcher, id string, request 
*discovery.DiscoveryRequest) (b
                return wr
        })
 
-       // Envoy can send two DiscoveryRequests with same version and nonce.
-       // when it detects a new resource. We should respond if they change.
+       // xDS clients can send two DiscoveryRequests with same version and 
nonce
+       // when they detect a new resource. We should respond if they change.
        removed := previousResources.Difference(cur)
        added := cur.Difference(previousResources)
 
@@ -408,7 +408,7 @@ func ShouldRespond(w Watcher, id string, request 
*discovery.DiscoveryRequest) (b
                return false, emptyResourceDelta
        }
 
-       // We should always respond "alwaysRespond" marked requests to let 
Envoy finish warming
+       // We should always respond "alwaysRespond" marked requests to let xDS 
clients finish warming
        // even though Nonce match and it looks like an ACK.
        if alwaysRespond {
                log.Infof("%s: FORCE RESPONSE %s for warming.", stype, id)
diff --git a/samples/grpc-app/README.md b/samples/grpc-app/README.md
index e0c8dac5..42516e91 100644
--- a/samples/grpc-app/README.md
+++ b/samples/grpc-app/README.md
@@ -5,8 +5,8 @@ This example demonstrates how to deploy gRPC applications with 
proxyless service
 ## Overview
 
 This sample includes:
-- **Producer**: A gRPC server that receives requests (port 17070) and is 
deployed with multiple versions (v1/v2) to showcase gray release scenarios.
-- **Consumer**: A gRPC client that sends requests to the producer service and 
exposes a test server (port 17171) for driving traffic via `grpcurl`.
+- **Provider**: A gRPC server that receives requests (port 17070) and is 
deployed with multiple versions (v1/v2) to showcase gray release scenarios.
+- **Consumer**: A gRPC client that sends requests to the provider service and 
exposes a test server (port 17171) for driving traffic via `grpcurl`.
 
 Both services use native gRPC xDS clients to connect to the Dubbo control 
plane through the `dubbo-proxy` sidecar, enabling service discovery, load 
balancing, and traffic management without requiring Envoy proxy for application 
traffic.
 
@@ -38,17 +38,17 @@ kubectl port-forward -n grpc-app $(kubectl get pod -l 
app=consumer -n grpc-app -
 ```
 
 ```bash
-grpcurl -plaintext -d '{"url": 
"xds:///producer.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
+grpcurl -plaintext -d '{"url": 
"xds:///provider.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
 ```
 
 ```json
 {
   "output": [
-    "[0 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-    "[1 body] Hostname=producer-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
-    "[2 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-    "[3 body] Hostname=producer-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
-    "[4 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070"
+    "[0 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+    "[1 body] Hostname=provider-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
+    "[2 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+    "[3 body] Hostname=provider-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
+    "[4 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070"
   ]
 }
 ```
@@ -64,10 +64,10 @@ cat <<EOF | kubectl apply -f -
 apiVersion: networking.dubbo.apache.org/v1
 kind: SubsetRule
 metadata:
-  name: producer-versions
+  name: provider-versions
   namespace: grpc-app
 spec:
-  host: producer.grpc-app.svc.cluster.local
+  host: provider.grpc-app.svc.cluster.local
   subsets:
   - name: v1
     labels:
@@ -87,19 +87,19 @@ cat <<EOF | kubectl apply -f -
 apiVersion: networking.dubbo.apache.org/v1
 kind: ServiceRoute
 metadata:
-  name: producer-weights
+  name: provider-weights
   namespace: grpc-app
 spec:
   hosts:
-  - producer.grpc-app.svc.cluster.local
+  - provider.grpc-app.svc.cluster.local
   http:
   - route:
     - destination:
-        host: producer.grpc-app.svc.cluster.local
+        host: provider.grpc-app.svc.cluster.local
         subset: v1
       weight: 10
     - destination:
-        host: producer.grpc-app.svc.cluster.local
+        host: provider.grpc-app.svc.cluster.local
         subset: v2
       weight: 90
 EOF
@@ -108,7 +108,7 @@ EOF
 Now, send a set of 10 requests to verify the traffic distribution:
 
 ```bash
-grpcurl -plaintext -d '{"url": 
"xds:///producer.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
+grpcurl -plaintext -d '{"url": 
"xds:///provider.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
 ```
 
 The response should contain mostly `v2` responses, demonstrating the weighted 
traffic splitting:
@@ -116,11 +116,11 @@ The response should contain mostly `v2` responses, 
demonstrating the weighted tr
 ```json
 {
   "output": [
-    "[0 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-    "[1 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-    "[2 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-    "[3 body] Hostname=producer-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
-    "[4 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070"
+    "[0 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+    "[1 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+    "[2 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+    "[3 body] Hostname=provider-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070",
+    "[4 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070"
   ]
 }
 ```
@@ -138,10 +138,10 @@ cat <<EOF | kubectl apply -f -
 apiVersion: networking.dubbo.apache.org/v1
 kind: SubsetRule
 metadata:
-  name: producer-mtls
+  name: provider-mtls
   namespace: grpc-app
 spec:
-  host: producer.grpc-app.svc.cluster.local
+  host: provider.grpc-app.svc.cluster.local
   trafficPolicy:
     tls:
       mode: ISTIO_MUTUAL
@@ -151,7 +151,7 @@ EOF
 Now an attempt to call the server that is not yet configured for mTLS will 
fail:
 
 ```bash
-grpcurl -plaintext -d '{"url": 
"xds:///producer.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
+grpcurl -plaintext -d '{"url": 
"xds:///provider.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
 ```
 
 Expected error output:
@@ -177,7 +177,7 @@ cat <<EOF | kubectl apply -f -
 apiVersion: security.dubbo.apache.org/v1
 kind: PeerAuthentication
 metadata:
-  name: producer-mtls
+  name: provider-mtls
   namespace: grpc-app
 spec:
   mtls:
@@ -188,18 +188,18 @@ EOF
 Requests will start to succeed after applying the policy:
 
 ```bash
-grpcurl -plaintext -d '{"url": 
"xds:///producer.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
+grpcurl -plaintext -d '{"url": 
"xds:///provider.grpc-app.svc.cluster.local:7070","count": 5}' localhost:17171 
echo.EchoTestService/ForwardEcho
 ```
 
 Expected successful output:
 ```json
 {
    "output": [
-      "[0 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-      "[1 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-      "[2 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-      "[3 body] Hostname=producer-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
-      "[4 body] Hostname=producer-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070"
+      "[0 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+      "[1 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+      "[2 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+      "[3 body] Hostname=provider-v2-594b6977c8-5gw2z ServiceVersion=v2 
Namespace=grpc-app IP=192.168.219.88 ServicePort=17070",
+      "[4 body] Hostname=provider-v1-fbb7b9bd9-l8frj ServiceVersion=v1 
Namespace=grpc-app IP=192.168.219.119 ServicePort=17070"
    ]
 }
 ```
diff --git a/samples/grpc-app/grpc-app.yaml b/samples/grpc-app/grpc-app.yaml
index 6877235d..34479481 100644
--- a/samples/grpc-app/grpc-app.yaml
+++ b/samples/grpc-app/grpc-app.yaml
@@ -17,12 +17,12 @@ apiVersion: v1
 kind: Service
 metadata:
   labels:
-    app: producer
-  name: producer
+    app: provider
+  name: provider
   namespace: grpc-app
 spec:
   selector:
-    app: producer
+    app: provider
   type: ClusterIP
   ports:
     - name: grpc
@@ -32,13 +32,13 @@ spec:
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: producer-v1
+  name: provider-v1
   namespace: grpc-app
 spec:
   replicas: 1
   selector:
     matchLabels:
-      app: producer
+      app: provider
       version: v1
   template:
     metadata:
@@ -47,12 +47,12 @@ spec:
         inject.dubbo.apache.org/templates: grpc-agent
         proxy.dubbo.apache.org/config: '{"holdApplicationUntilProxyStarts": 
true}'
       labels:
-        app: producer
+        app: provider
         version: v1
     spec:
       containers:
         - name: app
-          image: mfordjody/grpc-producer:dev-debug
+          image: mfordjody/grpc-provider:dev-debug
           imagePullPolicy: Always
           ports:
             - containerPort: 17070
@@ -102,13 +102,13 @@ spec:
 apiVersion: apps/v1
 kind: Deployment
 metadata:
-  name: producer-v2
+  name: provider-v2
   namespace: grpc-app
 spec:
   replicas: 1
   selector:
     matchLabels:
-      app: producer
+      app: provider
       version: v2
   template:
     metadata:
@@ -117,12 +117,12 @@ spec:
         inject.dubbo.apache.org/templates: grpc-agent
         proxy.dubbo.apache.org/config: '{"holdApplicationUntilProxyStarts": 
true}'
       labels:
-        app: producer
+        app: provider
         version: v2
     spec:
       containers:
         - name: app
-          image: mfordjody/grpc-producer:dev-debug
+          image: mfordjody/grpc-provider:dev-debug
           imagePullPolicy: Always
           ports:
             - containerPort: 17070

Reply via email to