This is an automated email from the ASF dual-hosted git repository.
zhongxjian pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/dubbo-kubernetes.git
The following commit(s) were added to refs/heads/master by this push:
new a8a38e1c Update dubboctl planet integrated (#829)
a8a38e1c is described below
commit a8a38e1cd44b1306a8f5ed9c3934bda4539a200d
Author: mfordjody <[email protected]>
AuthorDate: Sun Nov 30 12:32:03 2025 +0800
Update dubboctl planet integrated (#829)
---
dubbod/planet/pkg/bootstrap/server.go | 2 +-
dubbod/planet/pkg/model/endpointshards.go | 16 +++------
dubbod/planet/pkg/model/push_context.go | 2 --
dubbod/planet/pkg/model/service.go | 8 ++---
dubbod/planet/pkg/model/subsetrule.go | 2 +-
dubbod/planet/pkg/networking/grpcgen/cds.go | 20 ++++-------
dubbod/planet/pkg/networking/grpcgen/grpcgen.go | 1 -
dubbod/planet/pkg/networking/grpcgen/lds.go | 9 ++---
dubbod/planet/pkg/networking/grpcgen/rds.go | 2 +-
.../kube/controller/endpointslice.go | 25 ++++++-------
dubbod/planet/pkg/xds/ads.go | 7 ++--
dubbod/planet/pkg/xds/cds.go | 1 -
dubbod/planet/pkg/xds/discovery.go | 6 ++--
dubbod/planet/pkg/xds/eds.go | 41 +++++++---------------
.../planet/pkg/xds/endpoints/endpoint_builder.go | 16 ++-------
dubbod/planet/pkg/xds/xdsgen.go | 28 +++++++--------
manifests/profiles/default.yaml | 3 +-
operator/pkg/component/component.go | 21 ++++++++---
operator/pkg/install/installer.go | 3 ++
pkg/dubbo-agent/xds_proxy.go | 1 -
pkg/dubbo-agent/xds_proxy_delta.go | 1 -
pkg/xds/server.go | 2 +-
tests/grpc-app/README.md | 4 +--
tests/grpc-app/consumer/main.go | 26 +++++++-------
.../{dockerfile.producer => dockerfile.provider} | 10 +++---
tests/grpc-app/{producer => provider}/main.go | 0
26 files changed, 107 insertions(+), 150 deletions(-)
diff --git a/dubbod/planet/pkg/bootstrap/server.go
b/dubbod/planet/pkg/bootstrap/server.go
index d567726f..1c9ff2c0 100644
--- a/dubbod/planet/pkg/bootstrap/server.go
+++ b/dubbod/planet/pkg/bootstrap/server.go
@@ -482,7 +482,7 @@ func (s *Server) initRegistryEventHandlers() {
// Log the config change
log.Infof("configHandler: %s event for %s/%s/%s", event,
configKey.Kind, configKey.Namespace, configKey.Name)
- // CRITICAL: Some configs
(SubsetRule/ServiceRoute/PeerAuthentication) require Full push to ensure
+ // Some configs (SubsetRule/ServiceRoute/PeerAuthentication)
require Full push to ensure
// PushContext is re-initialized and configuration is reloaded.
// PeerAuthentication must rebuild AuthenticationPolicies to
enable STRICT mTLS on LDS; without
// a full push the cached PushContext would continue serving
plaintext listeners.
diff --git a/dubbod/planet/pkg/model/endpointshards.go
b/dubbod/planet/pkg/model/endpointshards.go
index 2d98afdc..7721329c 100644
--- a/dubbod/planet/pkg/model/endpointshards.go
+++ b/dubbod/planet/pkg/model/endpointshards.go
@@ -58,17 +58,14 @@ type EndpointIndex struct {
}
func endpointUpdateRequiresPush(oldDubboEndpoints []*DubboEndpoint,
incomingEndpoints []*DubboEndpoint) ([]*DubboEndpoint, bool) {
- // CRITICAL FIX: If old endpoints are empty (nil or empty slice), and
we have new endpoints, we must push
- // This ensures that when endpoints become available after being empty,
clients receive the update
- // This is critical for proxyless gRPC - when endpoints become
available, clients must be notified
- // to prevent "weighted-target: no targets to pick from" errors
+ // If old endpoints are empty (nil or empty slice), and we have new
endpoints, we must push
// Note: len() for nil slices is defined as zero, so we can use len()
directly
oldWasEmpty := len(oldDubboEndpoints) == 0
newIsEmpty := len(incomingEndpoints) == 0
if oldWasEmpty {
// If there are no old endpoints, we should push with incoming
endpoints as there is nothing to compare.
- // CRITICAL: Even if new endpoints are unhealthy, we must push
to notify clients that endpoints are now available
+ // Even if new endpoints are unhealthy, we must push to notify
clients that endpoints are now available
// The client will handle unhealthy endpoints appropriately
(e.g., retry, circuit breaker)
if !newIsEmpty {
return incomingEndpoints, true
@@ -76,8 +73,7 @@ func endpointUpdateRequiresPush(oldDubboEndpoints
[]*DubboEndpoint, incomingEndp
// If both old and new are empty, no push needed
return incomingEndpoints, false
}
- // CRITICAL FIX: If old endpoints exist but new endpoints are empty, we
must push
- // This ensures that when endpoints become unavailable, clients receive
empty ClusterLoadAssignment
+ // If old endpoints exist but new endpoints are empty, we must push
if newIsEmpty {
return incomingEndpoints, true
}
@@ -170,7 +166,7 @@ func (e *EndpointIndex) UpdateServiceEndpoints(shard
ShardKey, hostname string,
ep.Lock()
defer ep.Unlock()
oldDubboEndpoints := ep.Shards[shard]
- // CRITICAL: Check if this is a transition from empty to non-empty
BEFORE calling endpointUpdateRequiresPush
+ // Check if this is a transition from empty to non-empty BEFORE calling
endpointUpdateRequiresPush
// This ensures we always push when endpoints become available, even if
they're unhealthy
// Note: len() for nil slices is defined as zero, so we can use len()
directly
oldWasEmpty := len(oldDubboEndpoints) == 0
@@ -179,15 +175,13 @@ func (e *EndpointIndex) UpdateServiceEndpoints(shard
ShardKey, hostname string,
newDubboEndpoints, needPush :=
endpointUpdateRequiresPush(oldDubboEndpoints, dubboEndpoints)
- // CRITICAL FIX: If endpoints transition from empty to non-empty, we
MUST push
- // This is essential for proxyless gRPC - clients need to know
endpoints are now available
+ // If endpoints transition from empty to non-empty, we MUST push
// even if they're initially unhealthy (client will handle
retries/circuit breaking)
if transitionFromEmptyToNonEmpty {
needPush = true
log.Debugf("UpdateServiceEndpoints: service=%s, shard=%v,
endpoints transitioned from empty to non-empty, forcing push", hostname, shard)
}
- // CRITICAL: Log endpoint update details for debugging
if logPushType {
oldHealthyCount := 0
oldUnhealthyCount := 0
diff --git a/dubbod/planet/pkg/model/push_context.go
b/dubbod/planet/pkg/model/push_context.go
index 572f035b..625b12ab 100644
--- a/dubbod/planet/pkg/model/push_context.go
+++ b/dubbod/planet/pkg/model/push_context.go
@@ -984,11 +984,9 @@ func firstDestinationRule(csr *consolidatedSubRules,
hostname host.Name) *networ
}
if rules := csr.specificSubRules[hostname]; len(rules) > 0 {
log.Infof("firstDestinationRule: found %d rules for hostname
%s", len(rules), hostname)
- // CRITICAL: According to Istio behavior, multiple
DestinationRules should be merged into one.
// The first rule should contain the merged result if merge was
successful.
// However, if merge failed (e.g.,
EnableEnhancedSubsetRuleMerge is disabled),
// we need to check all rules and prefer the one with TLS
configuration.
- // This ensures that when multiple SubsetRules exist (e.g., one
with subsets, one with TLS),
// we return the one that has TLS if available, or the first
one otherwise.
var bestRule *networking.DestinationRule
var bestRuleHasTLS bool
diff --git a/dubbod/planet/pkg/model/service.go
b/dubbod/planet/pkg/model/service.go
index 96725810..86b6935b 100644
--- a/dubbod/planet/pkg/model/service.go
+++ b/dubbod/planet/pkg/model/service.go
@@ -256,13 +256,13 @@ func (ep *DubboEndpoint) Equals(other *DubboEndpoint)
bool {
return false
}
- // CRITICAL FIX: Compare HealthStatus to detect health status changes
+ // Compare HealthStatus to detect health status changes
// This is necessary to trigger EDS push when endpoints become
healthy/unhealthy
if ep.HealthStatus != other.HealthStatus {
return false
}
- // CRITICAL FIX: Compare EndpointPort to detect port changes
+ // Compare EndpointPort to detect port changes
if ep.EndpointPort != other.EndpointPort {
return false
}
@@ -418,11 +418,11 @@ func (s *Service) Equals(other *Service) bool {
}
func (s *Service) SupportsUnhealthyEndpoints() bool {
- // CRITICAL FIX: Return PublishNotReadyAddresses to support publishing
not-ready endpoints
+ // Return PublishNotReadyAddresses to support publishing not-ready
endpoints
// This allows endpoints with Ready=false to be included in EDS if the
service has
// publishNotReadyAddresses=true, which is useful for services that
need to receive
// traffic even before they are fully ready (e.g., during startup).
- // CRITICAL FIX: Check if s is nil before accessing Attributes
+ // Check if s is nil before accessing Attributes
if s == nil {
return false
}
diff --git a/dubbod/planet/pkg/model/subsetrule.go
b/dubbod/planet/pkg/model/subsetrule.go
index 1b829318..4cc70ac2 100644
--- a/dubbod/planet/pkg/model/subsetrule.go
+++ b/dubbod/planet/pkg/model/subsetrule.go
@@ -113,7 +113,7 @@ func (ps *PushContext) mergeSubsetRule(p
*consolidatedSubRules, subRuleConfig co
} else {
// Merge TrafficPolicy fields, with TLS
settings from the latest rule taking precedence
if rule.TrafficPolicy.Tls != nil {
- // CRITICAL: TLS settings from
the latest rule always win (ISTIO_MUTUAL/DUBBO_MUTUAL)
+ // TLS settings from the latest
rule always win (ISTIO_MUTUAL/DUBBO_MUTUAL)
mergedRule.TrafficPolicy.Tls =
rule.TrafficPolicy.Tls
log.Infof("mergeSubsetRule:
updated TLS settings in merged TrafficPolicy for host %s (mode: %v)",
resolvedHost,
rule.TrafficPolicy.Tls.Mode)
diff --git a/dubbod/planet/pkg/networking/grpcgen/cds.go
b/dubbod/planet/pkg/networking/grpcgen/cds.go
index 835c27a5..b5b13a99 100644
--- a/dubbod/planet/pkg/networking/grpcgen/cds.go
+++ b/dubbod/planet/pkg/networking/grpcgen/cds.go
@@ -119,8 +119,7 @@ func (b *clusterBuilder) build() []*cluster.Cluster {
var defaultCluster *cluster.Cluster
defaultRequested := b.filter == nil ||
b.filter.Contains(b.defaultClusterName)
- // CRITICAL: Check if DestinationRule has TLS configuration before
generating default cluster
- // According to Istio's proxyless gRPC implementation:
+ // Check if DestinationRule has TLS configuration before generating
default cluster
// - DestinationRule with ISTIO_MUTUAL configures CLIENT-SIDE
(outbound) mTLS
// - If DestinationRule specifies ISTIO_MUTUAL, we MUST generate
default cluster and apply TLS
// even if it's not explicitly requested, so that clients can use
mTLS when connecting
@@ -142,15 +141,12 @@ func (b *clusterBuilder) build() []*cluster.Cluster {
// Generate default cluster if requested OR if DestinationRule has
ISTIO_MUTUAL TLS
if defaultRequested || hasTLSInDR {
defaultCluster = b.edsCluster(b.defaultClusterName)
- // CRITICAL: For gRPC proxyless, we need to set CommonLbConfig
to handle endpoint health status
- // Following Istio's implementation, we should include
UNHEALTHY and DRAINING endpoints
+ // For gRPC proxyless, we need to set CommonLbConfig to handle
endpoint health status
// in OverrideHostStatus so that clients can use them when
healthy endpoints are not available.
- // This prevents "weighted-target: no targets to pick from"
errors when all endpoints are unhealthy.
// The client will prioritize HEALTHY endpoints but can fall
back to UNHEALTHY/DRAINING if needed.
if defaultCluster.CommonLbConfig == nil {
defaultCluster.CommonLbConfig =
&cluster.Cluster_CommonLbConfig{}
}
- // CRITICAL FIX: Following Istio's implementation, always
include UNHEALTHY and DRAINING
// in OverrideHostStatus. This allows clients to use unhealthy
endpoints when healthy ones
// are not available, preventing "weighted-target: no targets
to pick from" errors.
// The client will still prioritize HEALTHY endpoints, but can
fall back to others.
@@ -200,7 +196,7 @@ func (b *clusterBuilder) edsCluster(name string)
*cluster.Cluster {
},
},
},
- // CRITICAL: For gRPC proxyless, we need to set LbPolicy to
ROUND_ROBIN
+ // For gRPC proxyless, we need to set LbPolicy to ROUND_ROBIN
// This is the default load balancing policy for gRPC xDS
clients
LbPolicy: cluster.Cluster_ROUND_ROBIN,
}
@@ -242,7 +238,7 @@ func (b *clusterBuilder)
applyDestinationRule(defaultCluster *cluster.Cluster) (
log.Infof("applyDestinationRule: found DestinationRule for %s/%s with
%d subsets, defaultCluster requested=%v, hasTLS=%v",
b.svc.Attributes.Namespace, b.hostname, len(dr.Subsets),
defaultCluster != nil, hasTLS)
- // CRITICAL: Apply TLS to default cluster if it exists and doesn't have
TransportSocket yet
+ // Apply TLS to default cluster if it exists and doesn't have
TransportSocket yet
// This ensures that default cluster gets TLS from the top-level
TrafficPolicy in DestinationRule
// When SubsetRule sets ISTIO_MUTUAL, inbound listener enforces STRICT
mTLS, so outbound must also use TLS
// NOTE: We re-check hasTLS here because firstDestinationRule might
have returned a different rule
@@ -262,7 +258,7 @@ func (b *clusterBuilder)
applyDestinationRule(defaultCluster *cluster.Cluster) (
log.Debugf("applyDestinationRule: skipping TLS for
default cluster %s (DestinationRule has no TrafficPolicy or TLS)",
b.defaultClusterName)
}
} else if defaultCluster == nil && hasTLS {
- // CRITICAL: If default cluster was not generated in build()
but DestinationRule has TLS,
+ // If default cluster was not generated in build() but
DestinationRule has TLS,
// we need to generate it here to ensure TLS is applied
// This can happen if build() checked the first rule (without
TLS) but applyDestinationRule
// found a different rule (with TLS) via firstDestinationRule's
improved logic
@@ -313,8 +309,7 @@ func (b *clusterBuilder)
applyDestinationRule(defaultCluster *cluster.Cluster) (
}
clusterName :=
model.BuildSubsetKey(model.TrafficDirectionOutbound, subset.Name, b.hostname,
b.portNum)
- // CRITICAL: Always generate subset clusters if default cluster
is requested
- // This is essential for RDS WeightedCluster to work correctly
+ // Always generate subset clusters if default cluster is
requested
shouldGenerate := true
if b.filter != nil && !b.filter.Contains(clusterName) {
// Subset cluster not explicitly requested, but
generate it if default cluster was requested
@@ -388,7 +383,6 @@ func (b *clusterBuilder) applyTLSForCluster(c
*cluster.Cluster, subset *networki
return
}
- // Log SNI configuration for debugging
sni := tlsContext.Sni
if sni == "" {
log.Warnf("applyTLSForCluster: SNI is empty for cluster %s,
this may cause TLS handshake failures", c.Name)
@@ -415,7 +409,7 @@ func (b *clusterBuilder) buildUpstreamTLSContext(c
*cluster.Cluster, tlsSettings
CommonTlsContext: common,
Sni: tlsSettings.GetSni(),
}
- // CRITICAL: SNI must be the service hostname, not the cluster name
+ // SNI must be the service hostname, not the cluster name
// Cluster name format: outbound|port|subset|hostname
// We need to extract the hostname from the cluster name or use the
service hostname
if tlsContext.Sni == "" {
diff --git a/dubbod/planet/pkg/networking/grpcgen/grpcgen.go
b/dubbod/planet/pkg/networking/grpcgen/grpcgen.go
index f09cd3d4..97ec45df 100644
--- a/dubbod/planet/pkg/networking/grpcgen/grpcgen.go
+++ b/dubbod/planet/pkg/networking/grpcgen/grpcgen.go
@@ -40,7 +40,6 @@ func (g *GrpcConfigGenerator) Generate(proxy *model.Proxy, w
*model.WatchedResou
switch w.TypeUrl {
case v3.ListenerType:
// Pass requested names to BuildListeners to ensure consistent
behavior
- // This is critical for gRPC proxyless clients to avoid
resource count oscillation
// When requestedNames is empty (wildcard), BuildListeners
generates all listeners
// When requestedNames is non-empty, BuildListeners only
generates requested listeners
return g.BuildListeners(proxy, req.Push, requestedNames),
model.DefaultXdsLogDetails, nil
diff --git a/dubbod/planet/pkg/networking/grpcgen/lds.go
b/dubbod/planet/pkg/networking/grpcgen/lds.go
index 015681ec..44f9509a 100644
--- a/dubbod/planet/pkg/networking/grpcgen/lds.go
+++ b/dubbod/planet/pkg/networking/grpcgen/lds.go
@@ -66,7 +66,7 @@ func (g *GrpcConfigGenerator) BuildListeners(node
*model.Proxy, push *model.Push
log.Debugf("BuildListeners: specific request for %s, requested
listeners: %v", node.ID, names)
}
- // CRITICAL: If names is provided (non-empty), we MUST only generate
those specific listeners
+ // If names is provided (non-empty), we MUST only generate those
specific listeners
// This prevents the push loop where client requests 1 listener but
receives 14
// The filter ensures we only generate requested listeners
filter := newListenerNameFilter(names, node)
@@ -78,7 +78,6 @@ func (g *GrpcConfigGenerator) BuildListeners(node
*model.Proxy, push *model.Push
resp = append(resp, buildInboundListeners(node, push,
filter.inboundNames())...)
// Final safety check: ensure we only return resources that were
requested
- // This is critical for preventing push loops in proxyless gRPC
if len(names) > 0 {
requestedSet := sets.New(names...)
filtered := make(model.Resources, 0, len(resp))
@@ -199,7 +198,6 @@ func buildInboundListeners(node *model.Proxy, push
*model.PushContext, names []s
continue
}
- // According to Istio's proxyless gRPC implementation:
// - DestinationRule with ISTIO_MUTUAL only configures
CLIENT-SIDE (outbound) mTLS
// - PeerAuthentication with STRICT configures SERVER-SIDE
(inbound) mTLS
// Both are REQUIRED for mTLS to work. Server-side mTLS should
ONLY be controlled by PeerAuthentication.
@@ -473,7 +471,7 @@ func buildOutboundListeners(node *model.Proxy, push
*model.PushContext, filter l
// Build route name (same format as cluster name) for
RDS
routeName := clusterName
- // CRITICAL: For gRPC proxyless, outbound listeners
MUST use ApiListener with RDS
+ // For gRPC proxyless, outbound listeners MUST use
ApiListener with RDS
// This is the correct pattern used by Istio for gRPC
xDS clients
// Using FilterChain with inline RouteConfig causes the
gRPC client to remain in IDLE state
hcm := &hcmv3.HttpConnectionManager{
@@ -500,7 +498,7 @@ func buildOutboundListeners(node *model.Proxy, push
*model.PushContext, filter l
}
// Build outbound listener with ApiListener (Istio
pattern)
- // CRITICAL: gRPC xDS clients expect ApiListener for
outbound, not FilterChain
+ // gRPC xDS clients expect ApiListener for outbound,
not FilterChain
ll := &listener.Listener{
Name: fullListenerName,
Address: &core.Address{Address:
&core.Address_SocketAddress{
@@ -522,7 +520,6 @@ func buildOutboundListeners(node *model.Proxy, push
*model.PushContext, filter l
ll.AdditionalAddresses =
util.BuildAdditionalAddresses(extrAddresses, uint32(port))
}
- // CRITICAL: Log listener details for debugging gRPC
xDS client connection issues
log.Debugf("buildOutboundListeners: created ApiListener
name=%s, address=%s:%d, routeName=%s",
ll.Name, ll.Address.GetSocketAddress().Address,
ll.Address.GetSocketAddress().GetPortValue(), routeName)
diff --git a/dubbod/planet/pkg/networking/grpcgen/rds.go
b/dubbod/planet/pkg/networking/grpcgen/rds.go
index 11dcf120..7d4051eb 100644
--- a/dubbod/planet/pkg/networking/grpcgen/rds.go
+++ b/dubbod/planet/pkg/networking/grpcgen/rds.go
@@ -66,7 +66,7 @@ func buildHTTPRoute(node *model.Proxy, push
*model.PushContext, routeName string
}
// Build VirtualHost Domains for outbound route
- // CRITICAL: Domains must match the xDS URL hostname for gRPC
xDS client
+ // Domains must match the xDS URL hostname for gRPC xDS client
hostStr := string(hostname)
domains := []string{
fmt.Sprintf("%s:%d", hostStr, parsedPort), // FQDN with
port - MOST SPECIFIC
diff --git a/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
b/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
index a96316ee..059800e9 100644
--- a/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
+++ b/dubbod/planet/pkg/serviceregistry/kube/controller/endpointslice.go
@@ -199,7 +199,6 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
// If we start using them for other features, this can be
adjusted.
healthStatus := endpointHealthStatus(svc, e)
- // CRITICAL: Log health status for debugging (use Warningf to
ensure visibility)
if len(e.Addresses) > 0 {
ready := "nil"
terminating := "nil"
@@ -222,7 +221,7 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
var overrideAddresses []string
builder := esc.c.NewEndpointBuilder(pod)
// EDS and ServiceEntry use name for service port - ADS
will need to map to numbers.
- // CRITICAL FIX: Always use Service.Port.Name as source
of truth for ServicePortName
+ // Always use Service.Port.Name as source of truth for
ServicePortName
// - Use EndpointSlice.Port.Port (service port number)
as endpointPort
// - Always resolve portName from Service by matching
port number (Service is source of truth)
// - This ensures ep.ServicePortName matches
svcPort.Name in BuildClusterLoadAssignment
@@ -241,7 +240,7 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
var targetPortNum int32
var portName string
- // CRITICAL FIX: EndpointSlice.Port.Port might
be targetPort or service port
+ // EndpointSlice.Port.Port might be targetPort
or service port
// We need to find the matching ServicePort and
resolve:
// 1. servicePortNum (Service.Port) - used for
matching in BuildClusterLoadAssignment
// 2. targetPortNum (Service.TargetPort) - used
as EndpointPort
@@ -314,9 +313,8 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
}
if !matched {
- // CRITICAL FIX: If we can't
match by Service, try to find ServicePort by port number only
+ // If we can't match by
Service, try to find ServicePort by port number only
// This handles cases where
EndpointSlice.Port.Name doesn't match but port number does
- // This is critical for
ensuring portName matches Service.Port.Name
for _, kubePort := range
kubeSvc.Spec.Ports {
if int32(kubePort.Port)
== epSlicePortNum {
// Found by
port number, use Service.Port.Name
@@ -352,7 +350,7 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
log.Warnf("updateEndpointCacheForSlice: failed to match EndpointSlice.Port
(portNum=%d, portName='%s') with Service %s, using EndpointSlice values
(WARNING: portName may not match Service.Port.Name)",
epSlicePortNum,
epSlicePortName, svcNamespacedName.Name)
// Fallback: use
EndpointSlice values
- // CRITICAL: This
should rarely happen, but if it does, portName may not match Service.Port.Name
+ // This should rarely
happen, but if it does, portName may not match Service.Port.Name
// which will cause
endpoints to be filtered in BuildClusterLoadAssignment
servicePortNum =
epSlicePortNum
targetPortNum =
epSlicePortNum
@@ -381,11 +379,9 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
svcNamespacedName.Name,
epSlicePortNum, epSlicePortName)
}
- // CRITICAL: Log endpoint creation with actual
values for debugging
log.Debugf("updateEndpointCacheForSlice:
creating endpoint for service %s (address=%s, servicePortNum=%d,
targetPortNum=%d, portName='%s', hostname=%s, kubeSvc=%v)",
svcNamespacedName.Name, a,
servicePortNum, targetPortNum, portName, hostName, kubeSvc != nil)
- // CRITICAL FIX: According to Istio's
implementation and Kubernetes EndpointSlice spec:
// - EndpointSlice.Port.Port should be the
Service Port (not targetPort)
// - But IstioEndpoint.EndpointPort should be
the targetPort (container port)
// - ServicePortName should match
Service.Port.Name for filtering in BuildClusterLoadAssignment
@@ -393,10 +389,10 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
// We use targetPortNum as EndpointPort because
that's what the container actually listens on.
// The servicePortNum is used for matching in
BuildClusterLoadAssignment via portName.
//
- // CRITICAL: svc.SupportsUnhealthyEndpoints()
returns true if Service has publishNotReadyAddresses=true
+ // svc.SupportsUnhealthyEndpoints() returns
true if Service has publishNotReadyAddresses=true
// This allows endpoints with Ready=false to be
included in EDS, which is useful for services
// that need to receive traffic even before
they are fully ready (e.g., during startup).
- // CRITICAL FIX: Check if svc is nil before
calling SupportsUnhealthyEndpoints
+ // Check if svc is nil before calling
SupportsUnhealthyEndpoints
var supportsUnhealthy bool
if svc != nil {
supportsUnhealthy =
svc.SupportsUnhealthyEndpoints()
@@ -406,7 +402,7 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
}
dubboEndpoint := builder.buildDubboEndpoint(a,
targetPortNum, portName, nil, healthStatus, supportsUnhealthy)
- // CRITICAL: Log if endpoint is unhealthy and
service doesn't support it
+ // Log if endpoint is unhealthy and service
doesn't support it
if healthStatus == model.UnHealthy &&
!supportsUnhealthy {
if svc != nil {
log.Debugf("updateEndpointCacheForSlice: endpoint %s is unhealthy
(HealthStatus=%v) but service %s does not support unhealthy endpoints
(PublishNotReadyAddresses=%v). Endpoint will be filtered in EDS.",
@@ -417,7 +413,7 @@ func (esc *endpointSliceController)
updateEndpointCacheForSlice(hostName host.Na
}
}
- // CRITICAL: Verify the endpoint was created
with correct ServicePortName
+ // Verify the endpoint was created with correct
ServicePortName
if dubboEndpoint != nil {
log.Debugf("updateEndpointCacheForSlice: created endpoint with
ServicePortName='%s', EndpointPort=%d, address=%s",
dubboEndpoint.ServicePortName,
dubboEndpoint.EndpointPort, dubboEndpoint.FirstAddressOrNil())
@@ -458,7 +454,7 @@ func (e *endpointSliceCache) update(hostname host.Name,
slice string, endpoints
}
func endpointHealthStatus(svc *model.Service, e v1.Endpoint)
model.HealthStatus {
- // CRITICAL FIX: Correct health status logic
+ // Correct health status logic
// 1. If Ready is nil or true, endpoint is healthy
// 2. If Ready is false, check if it's terminating
// 3. If terminating, mark as Terminating
@@ -470,7 +466,7 @@ func endpointHealthStatus(svc *model.Service, e
v1.Endpoint) model.HealthStatus
}
// Ready is false, check if it's terminating
- // CRITICAL FIX: Terminating should be checked only if it's not nil AND
true
+ // Terminating should be checked only if it's not nil AND true
// If Terminating is nil, it means the endpoint is not terminating
(it's just not ready)
if e.Conditions.Terminating != nil && *e.Conditions.Terminating {
return model.Terminating
@@ -548,7 +544,6 @@ func (esc *endpointSliceController) pushEDS(hostnames
[]host.Name, namespace str
for _, hostname := range hostnames {
endpoints := esc.endpointCache.get(hostname)
- // CRITICAL: Log endpoint registration for debugging
log.Infof("pushEDS: registering %d endpoints for service %s in
namespace %s (shard=%v)",
len(endpoints), string(hostname), namespace, shard)
if len(endpoints) > 0 {
diff --git a/dubbod/planet/pkg/xds/ads.go b/dubbod/planet/pkg/xds/ads.go
index 8a598f72..66d3dfc3 100644
--- a/dubbod/planet/pkg/xds/ads.go
+++ b/dubbod/planet/pkg/xds/ads.go
@@ -137,7 +137,6 @@ func (s *DiscoveryServer) initConnection(node *core.Node,
con *Connection, ident
}
// Trigger a ConfigUpdate to ensure ConnectedEndpoints count is updated
in subsequent XDS: Pushing logs.
- // This ensures that when the next global push happens, the count
reflects the newly established connection.
// The push will be debounced, so this is safe to call.
s.ConfigUpdate(&model.PushRequest{
Full: true,
@@ -317,7 +316,7 @@ func (s *DiscoveryServer) processRequest(req
*discovery.DiscoveryRequest, con *C
shouldRespond, delta := xds.ShouldRespond(con.proxy, con.ID(), req)
- // CRITICAL: Log NEW requests (will respond) at INFO level so every
grpcurl request is visible
+ // Log NEW requests (will respond) at INFO level so every grpcurl
request is visible
// This ensures every grpcurl request triggers visible xDS logs in
control plane
// Format: "LDS: REQ node-xxx resources:1 nonce:abc123 [resource1,
resource2] (will respond)"
resourceNamesStr := ""
@@ -347,7 +346,7 @@ func (s *DiscoveryServer) processRequest(req
*discovery.DiscoveryRequest, con *C
return nil
}
- // CRITICAL FIX: For proxyless gRPC, if client sends wildcard (empty
ResourceNames) after receiving specific resources,
+ // For proxyless gRPC, if client sends wildcard (empty ResourceNames)
after receiving specific resources,
// this is likely an ACK and we should NOT push all resources again
// Check if this is a wildcard request after specific resources were
sent
watchedResource := con.proxy.GetWatchedResource(req.TypeUrl)
@@ -388,7 +387,7 @@ func (s *DiscoveryServer) processRequest(req
*discovery.DiscoveryRequest, con *C
Forced: false, // Only recompute ServiceTargets when
ConfigsUpdated indicates service changes
}
- // CRITICAL FIX: Get WatchedResource after ShouldRespond has created it
+ // Get WatchedResource after ShouldRespond has created it
// ShouldRespond may have created a new WatchedResource for first-time
requests
w := con.proxy.GetWatchedResource(req.TypeUrl)
if w == nil {
diff --git a/dubbod/planet/pkg/xds/cds.go b/dubbod/planet/pkg/xds/cds.go
index 1cc35be7..6df220b2 100644
--- a/dubbod/planet/pkg/xds/cds.go
+++ b/dubbod/planet/pkg/xds/cds.go
@@ -34,7 +34,6 @@ func cdsNeedsPush(req *model.PushRequest, proxy *model.Proxy)
(*model.PushReques
return req, res
}
- // CRITICAL: According to Istio proxyless gRPC behavior, when
SubsetRule (DestinationRule) is created/updated
// with TLS configuration (ISTIO_MUTUAL), CDS must be pushed to update
cluster TransportSocket.
// Even if req.Full is false, we need to check if SubsetRule was
updated, as it affects cluster TLS config.
if req != nil && req.ConfigsUpdated != nil {
diff --git a/dubbod/planet/pkg/xds/discovery.go
b/dubbod/planet/pkg/xds/discovery.go
index 56545ccd..de867d96 100644
--- a/dubbod/planet/pkg/xds/discovery.go
+++ b/dubbod/planet/pkg/xds/discovery.go
@@ -314,7 +314,6 @@ func (s *DiscoveryServer) dropCacheForRequest(req
*model.PushRequest) {
log.Debugf("dropCacheForRequest: cleared all cache
(Forced=true)")
} else {
// Otherwise, just clear the updated configs
- // CRITICAL: Log cache clear for debugging
if len(req.ConfigsUpdated) > 0 {
configs := make([]string, 0, len(req.ConfigsUpdated))
for ckey := range req.ConfigsUpdated {
@@ -388,12 +387,11 @@ func (s *DiscoveryServer) ProxyUpdate(clusterID
cluster.ID, ip string) {
func (s *DiscoveryServer) EDSUpdate(shard model.ShardKey, serviceName string,
namespace string, dubboEndpoints []*model.DubboEndpoint) {
pushType := s.Env.EndpointIndex.UpdateServiceEndpoints(shard,
serviceName, namespace, dubboEndpoints, true)
- // CRITICAL FIX: Always push EDS updates when endpoints change state
+ // Always push EDS updates when endpoints change state
// This ensures clients receive updates when:
// 1. Endpoints become available (from empty to non-empty)
// 2. Endpoints become unavailable (from non-empty to empty)
// 3. Endpoint health status changes
- // This is critical for proxyless gRPC to prevent "weighted-target: no
targets to pick from" errors
if pushType == model.IncrementalPush || pushType == model.FullPush {
log.Infof("EDSUpdate: service %s/%s triggering %v push
(endpoints=%d)", namespace, serviceName, pushType, len(dubboEndpoints))
s.ConfigUpdate(&model.PushRequest{
@@ -402,7 +400,7 @@ func (s *DiscoveryServer) EDSUpdate(shard model.ShardKey,
serviceName string, na
Reason:
model.NewReasonStats(model.EndpointUpdate),
})
} else if pushType == model.NoPush {
- // CRITICAL: Even when UpdateServiceEndpoints returns NoPush,
we may still need to push
+ // Even when UpdateServiceEndpoints returns NoPush, we may
still need to push
// This happens when:
// 1. All old endpoints were unhealthy and new endpoints are
also unhealthy (health status didn't change)
// 2. But we still need to notify clients about the current
state
diff --git a/dubbod/planet/pkg/xds/eds.go b/dubbod/planet/pkg/xds/eds.go
index ee040058..659e51fc 100644
--- a/dubbod/planet/pkg/xds/eds.go
+++ b/dubbod/planet/pkg/xds/eds.go
@@ -46,9 +46,7 @@ func edsNeedsPush(req *model.PushRequest, proxy *model.Proxy)
bool {
if res, ok := xdsNeedsPush(req, proxy); ok {
return res
}
- // CRITICAL FIX: For proxy requests (ProxyRequest reason), we should
always push EDS
- // This ensures that when a client explicitly requests EDS resources,
we generate them
- // even if there are no config updates. This is essential for proxyless
gRPC clients
+ // For proxy requests (ProxyRequest reason), we should always push EDS
// that request specific EDS resources.
if req.Reason != nil && req.Reason.Has(model.ProxyRequest) {
log.Debugf("edsNeedsPush: ProxyRequest detected, pushing EDS
even without config updates")
@@ -63,7 +61,6 @@ func edsNeedsPush(req *model.PushRequest, proxy *model.Proxy)
bool {
}
func (eds *EdsGenerator) Generate(proxy *model.Proxy, w
*model.WatchedResource, req *model.PushRequest) (model.Resources,
model.XdsLogDetails, error) {
- // CRITICAL: Log EDS generation attempt for debugging
log.Debugf("EDS Generate: proxy=%s, watchedResources=%d, req.Full=%v,
req.ConfigsUpdated=%v",
proxy.ID, len(w.ResourceNames), req.Full,
len(req.ConfigsUpdated))
@@ -110,16 +107,15 @@ func (eds *EdsGenerator) buildEndpoints(proxy
*model.Proxy, req *model.PushReque
regenerated := 0
for clusterName := range w.ResourceNames {
hostname := model.ParseSubsetKeyHostname(clusterName)
- // CRITICAL FIX: Always check if this service was updated, even
if edsUpdatedServices is nil
+ // Always check if this service was updated, even if
edsUpdatedServices is nil
// For incremental pushes, we need to check ConfigsUpdated
directly to ensure cache is cleared
serviceWasUpdated := false
if req.ConfigsUpdated != nil {
- // CRITICAL: Log all ConfigsUpdated entries for
debugging hostname matching
configKeys := make([]string, 0, len(req.ConfigsUpdated))
for ckey := range req.ConfigsUpdated {
configKeys = append(configKeys,
fmt.Sprintf("%s/%s/%s", ckey.Kind, ckey.Namespace, ckey.Name))
if ckey.Kind == kind.ServiceEntry {
- // CRITICAL: Match ConfigsUpdated.Name
with hostname
+ // Match ConfigsUpdated.Name with
hostname
// Both should be FQDN (e.g.,
"consumer.grpc-app.svc.cluster.local")
if ckey.Name == hostname {
serviceWasUpdated = true
@@ -130,15 +126,14 @@ func (eds *EdsGenerator) buildEndpoints(proxy
*model.Proxy, req *model.PushReque
}
}
- // CRITICAL FIX: For proxyless gRPC, we MUST always process all
watched clusters
+ // For proxyless gRPC, we MUST always process all watched
clusters
// This ensures clients receive EDS updates even when endpoints
become available or unavailable
// For non-proxyless (Istio behavior), we skip clusters that
are not in edsUpdatedServices for incremental pushes
if edsUpdatedServices != nil {
if _, ok := edsUpdatedServices[hostname]; !ok {
// Cluster was not in edsUpdatedServices
if !serviceWasUpdated {
- // CRITICAL: For proxyless gRPC, always
process all clusters to ensure EDS is up-to-date
- // This is essential because proxyless
gRPC clients need to know endpoint state changes
+ // For proxyless gRPC, always process
all clusters to ensure EDS is up-to-date
// even if the service wasn't
explicitly updated in this push
if proxy.IsProxylessGrpc() {
log.Debugf("buildEndpoints:
proxyless gRPC, processing cluster %s even though not in edsUpdatedServices
(hostname=%s)", clusterName, hostname)
@@ -160,13 +155,13 @@ func (eds *EdsGenerator) buildEndpoints(proxy
*model.Proxy, req *model.PushReque
continue
}
- // CRITICAL FIX: For proxyless gRPC, we must always regenerate
EDS when serviceWasUpdated is true
+ // For proxyless gRPC, we must always regenerate EDS when
serviceWasUpdated is true
// to ensure empty endpoints are sent when they become
unavailable, and endpoints are sent when available.
// For incremental pushes (Full=false), we must always
regenerate EDS
// if ConfigsUpdated contains this service, because endpoint
health status may have changed.
// The cache may contain stale empty endpoints from when the
endpoint was unhealthy.
// For full pushes, we can use cache if the service was not
updated.
- // CRITICAL: If this is an incremental push and ConfigsUpdated
contains any ServiceEntry,
+ // If this is an incremental push and ConfigsUpdated contains
any ServiceEntry,
// we must check if it matches this hostname. If it does, force
regeneration.
shouldRegenerate := serviceWasUpdated
if !shouldRegenerate && !req.Full && req.ConfigsUpdated != nil {
@@ -180,9 +175,7 @@ func (eds *EdsGenerator) buildEndpoints(proxy *model.Proxy,
req *model.PushReque
}
}
- // CRITICAL: For proxyless gRPC, if serviceWasUpdated is true,
we must regenerate
- // even if cache exists, to ensure endpoints are always
up-to-date
- // This is critical for preventing "weighted-target: no targets
to pick from" errors
+ // For proxyless gRPC, if serviceWasUpdated is true, we must
regenerate
if shouldRegenerate && proxy.IsProxylessGrpc() {
log.Debugf("buildEndpoints: proxyless gRPC, forcing
regeneration for cluster %s (hostname=%s, serviceWasUpdated=%v)", clusterName,
hostname, serviceWasUpdated)
}
@@ -203,7 +196,6 @@ func (eds *EdsGenerator) buildEndpoints(proxy *model.Proxy,
req *model.PushReque
// 1. A valid ClusterLoadAssignment with endpoints, or
// 2. An empty ClusterLoadAssignment (via
buildEmptyClusterLoadAssignment)
// The empty ClusterLoadAssignment has an explicit empty
Endpoints list,
- // which is critical for proxyless gRPC clients to know the
endpoint state
// and prevent "weighted-target: no targets to pick from"
errors.
// The nil check below is defensive programming, but should
never be true in practice.
if l == nil {
@@ -217,7 +209,6 @@ func (eds *EdsGenerator) buildEndpoints(proxy *model.Proxy,
req *model.PushReque
}
regenerated++
- // CRITICAL: Log EDS push details for proxyless gRPC to help
diagnose "weighted-target: no targets to pick from" errors
if len(l.Endpoints) == 0 {
empty++
if proxy.IsProxylessGrpc() {
@@ -262,15 +253,14 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
for clusterName := range w.ResourceNames {
hostname := model.ParseSubsetKeyHostname(clusterName)
- // CRITICAL FIX: For proxyless gRPC, we MUST always process all
watched clusters
+ // For proxyless gRPC, we MUST always process all watched
clusters
// This ensures clients receive EDS updates even when endpoints
become available or unavailable
// For non-proxyless (Istio behavior), we skip clusters that
are not in edsUpdatedServices for incremental pushes
serviceWasUpdated := false
if len(edsUpdatedServices) > 0 {
if _, ok := edsUpdatedServices[hostname]; !ok {
// Cluster was not in edsUpdatedServices
- // CRITICAL: For proxyless gRPC, always process
all clusters to ensure EDS is up-to-date
- // This is essential because proxyless gRPC
clients need to know endpoint state changes
+ // For proxyless gRPC, always process all
clusters to ensure EDS is up-to-date
// even if the service wasn't explicitly
updated in this push
if proxy.IsProxylessGrpc() {
log.Debugf("buildDeltaEndpoints:
proxyless gRPC, processing cluster %s even though not in edsUpdatedServices
(hostname=%s)", clusterName, hostname)
@@ -299,7 +289,7 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
continue
}
- // CRITICAL FIX: For proxyless gRPC, we must always regenerate
EDS when serviceWasUpdated is true
+ // For proxyless gRPC, we must always regenerate EDS when
serviceWasUpdated is true
// to ensure empty endpoints are sent when they become
unavailable, and endpoints are sent when available.
// For incremental pushes (Full=false), we must always
regenerate EDS
// if ConfigsUpdated contains this service, because endpoint
health status may have changed.
@@ -316,9 +306,7 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
}
}
- // CRITICAL: For proxyless gRPC, if serviceWasUpdated is true,
we must regenerate
- // even if cache exists, to ensure endpoints are always
up-to-date
- // This is critical for preventing "weighted-target: no targets
to pick from" errors
+ // For proxyless gRPC, if serviceWasUpdated is true, we must
regenerate
if shouldRegenerate && proxy.IsProxylessGrpc() {
log.Debugf("buildDeltaEndpoints: proxyless gRPC,
forcing regeneration for cluster %s (hostname=%s, serviceWasUpdated=%v)",
clusterName, hostname, serviceWasUpdated)
}
@@ -339,7 +327,6 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
// 1. A valid ClusterLoadAssignment with endpoints, or
// 2. An empty ClusterLoadAssignment (via
buildEmptyClusterLoadAssignment)
// The empty ClusterLoadAssignment has an explicit empty
Endpoints list,
- // which is critical for proxyless gRPC clients to know the
endpoint state
// and prevent "weighted-target: no targets to pick from"
errors.
// The nil check below is defensive programming, but should
never be true in practice.
if l == nil {
@@ -353,7 +340,6 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
}
regenerated++
- // CRITICAL: Log EDS push details for proxyless gRPC to help
diagnose "weighted-target: no targets to pick from" errors
if len(l.Endpoints) == 0 {
empty++
if proxy.IsProxylessGrpc() {
@@ -372,8 +358,7 @@ func (eds *EdsGenerator) buildDeltaEndpoints(proxy
*model.Proxy, req *model.Push
}
}
- // CRITICAL FIX: Always send ClusterLoadAssignment, even if
empty
- // This is essential for proxyless gRPC - clients need to know
when endpoints are unavailable
+ // Always send ClusterLoadAssignment, even if empty
// Removing the cluster from the response causes
"weighted-target: no targets to pick from" errors
// because the client never receives an update indicating
endpoints are empty
resource := &discovery.Resource{
diff --git a/dubbod/planet/pkg/xds/endpoints/endpoint_builder.go
b/dubbod/planet/pkg/xds/endpoints/endpoint_builder.go
index e18b9190..1ffab400 100644
--- a/dubbod/planet/pkg/xds/endpoints/endpoint_builder.go
+++ b/dubbod/planet/pkg/xds/endpoints/endpoint_builder.go
@@ -85,7 +85,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
}
// Get endpoints from the endpoint index
- // CRITICAL: Log all available services in EndpointIndex for debugging
allServices := endpointIndex.AllServices()
if len(allServices) > 0 {
log.Infof("BuildClusterLoadAssignment: EndpointIndex contains
%d services: %v", len(allServices), allServices)
@@ -95,7 +94,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
shards, ok := endpointIndex.ShardsForService(string(b.hostname),
b.service.Attributes.Namespace)
if !ok {
- // CRITICAL: Log at INFO level for proxyless gRPC to help
diagnose "weighted-target: no targets to pick from" errors
// Also log what services ARE available in the namespace
servicesInNamespace :=
endpointIndex.ServicesInNamespace(b.service.Attributes.Namespace)
log.Infof("BuildClusterLoadAssignment: no shards found for
service %s in namespace %s (cluster=%s, port=%d, svcPort.Name='%s',
svcPort.Port=%d). "+
@@ -105,7 +103,7 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
return buildEmptyClusterLoadAssignment(b.clusterName)
}
- // CRITICAL: Log shards info before processing
+ // Log shards info before processing
shards.RLock()
shardCount := len(shards.Shards)
totalEndpointsInShards := 0
@@ -138,7 +136,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
var unhealthyCount int
var buildFailedCount int
- // CRITICAL: Log all endpoint ServicePortNames for debugging port name
matching issues
allServicePortNames := make(map[string]int)
for _, eps := range shards.Shards {
for _, ep := range eps {
@@ -158,7 +155,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
for _, ep := range eps {
totalEndpoints++
// Filter by port name
- // CRITICAL: According to Istio's implementation, we
must match ServicePortName exactly
// However, if ServicePortName is empty, we should
still include the endpoint if there's only one port
// This handles cases where EndpointSlice doesn't have
port name but Service does
if ep.ServicePortName != svcPort.Name {
@@ -181,7 +177,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
continue
}
- // CRITICAL FIX: Following Istio's implementation, we
should ALWAYS include endpoints in EDS,
// regardless of their health status. The client will
decide whether to use them based on
// OverrideHostStatus in the Cluster configuration.
//
@@ -189,7 +184,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
// we should still include them in EDS but mark them as
UNHEALTHY. The client's OverrideHostStatus
// will determine if they can be used.
//
- // This is critical for proxyless gRPC - even if
endpoints are unhealthy, they should be
// included in EDS so the client knows they exist and
can attempt to connect to them.
// The client will handle connection failures
appropriately.
//
@@ -213,7 +207,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
}
if len(lbEndpoints) == 0 {
- // CRITICAL: Log at WARN level for proxyless gRPC to help
diagnose "weighted-target: no targets to pick from" errors
logLevel := log.Debugf
// For proxyless gRPC, log empty endpoints at INFO level to
help diagnose connection issues
// This helps identify when endpoints are not available vs when
they're filtered out
@@ -228,7 +221,6 @@ func (b *EndpointBuilder)
BuildClusterLoadAssignment(endpointIndex *model.Endpoi
}
// Create LocalityLbEndpoints with empty locality (default)
- // CRITICAL: Log endpoint health status for debugging
healthyCount := 0
unhealthyInLbCount := 0
drainingCount := 0
@@ -324,8 +316,6 @@ func (b *EndpointBuilder) buildLbEndpoint(ep
*model.DubboEndpoint) *endpoint.LbE
}
func buildEmptyClusterLoadAssignment(clusterName string)
*endpoint.ClusterLoadAssignment {
- // CRITICAL FIX: Following Istio's pattern, empty ClusterLoadAssignment
should have empty Endpoints list
- // This ensures gRPC proxyless clients receive the update and clear
their endpoint cache,
// preventing "weighted-target: no targets to pick from" errors
return &endpoint.ClusterLoadAssignment{
ClusterName: clusterName,
@@ -340,7 +330,7 @@ func (b *EndpointBuilder) Cacheable() bool {
// Key implements model.XdsCacheEntry
func (b *EndpointBuilder) Key() any {
- // CRITICAL FIX: EDS cache expects uint64 key, not string
+ // EDS cache expects uint64 key, not string
// Hash the cluster name to uint64 to match the cache type
return xxhash.Sum64String(b.clusterName)
}
@@ -352,7 +342,7 @@ func (b *EndpointBuilder) Type() string {
// DependentConfigs implements model.XdsCacheEntry
func (b *EndpointBuilder) DependentConfigs() []model.ConfigHash {
- // CRITICAL FIX: Return ServiceEntry ConfigHash so that EDS cache can
be properly cleared
+ // Return ServiceEntry ConfigHash so that EDS cache can be properly
cleared
// when endpoints are updated. Without this, cache.Clear() cannot find
and remove stale EDS entries.
if b.service == nil {
return nil
diff --git a/dubbod/planet/pkg/xds/xdsgen.go b/dubbod/planet/pkg/xds/xdsgen.go
index 54e7f1aa..715f300e 100644
--- a/dubbod/planet/pkg/xds/xdsgen.go
+++ b/dubbod/planet/pkg/xds/xdsgen.go
@@ -77,7 +77,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
return nil
}
- // CRITICAL FIX: For proxyless gRPC, handle wildcard (empty
ResourceNames) requests correctly
+ // For proxyless gRPC, handle wildcard (empty ResourceNames) requests
correctly
// When client sends empty ResourceNames after receiving specific
resources, it's likely an ACK
// We should NOT generate all resources, but instead return the last
sent resources
// However, for initial wildcard requests, we need to extract resource
names from parent resources
@@ -93,7 +93,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
// We'll populate this from the last sent resources
after generation
log.Debugf("pushXds: proxyless gRPC wildcard request
with NonceSent=%s, will use last sent resources", w.NonceSent)
} else if len(w.ResourceNames) == 0 && w.NonceSent == "" {
- // CRITICAL FIX: Initial wildcard request - need to
extract resource names from parent resources
+ // Initial wildcard request - need to extract resource
names from parent resources
// For CDS: extract cluster names from LDS
// For EDS: extract cluster names from CDS
if w.TypeUrl == v3.ClusterType {
@@ -155,7 +155,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
}
}
} else if w.TypeUrl == v3.RouteType {
- // CRITICAL FIX: Extract route names from LDS
response for RDS wildcard requests
+ // Extract route names from LDS response for
RDS wildcard requests
// RDS is not a wildcard type, so when client
sends empty ResourceNames,
// we need to extract route names from LDS
listeners that reference RDS
ldsWatched :=
con.proxy.GetWatchedResource(v3.ListenerType)
@@ -230,7 +230,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
return err
}
- // CRITICAL FIX: For proxyless gRPC wildcard requests with previous
NonceSent, return last sent resources
+ // For proxyless gRPC wildcard requests with previous NonceSent, return
last sent resources
if useLastSentResources && res == nil {
// This is a wildcard ACK - client is acknowledging previous
push
// We should NOT push again, as the client already has the
resources
@@ -243,7 +243,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
return nil
}
- // CRITICAL FIX: For proxyless gRPC, filter resources to only include
requested ones
+ // For proxyless gRPC, filter resources to only include requested ones
// This prevents the push loop where client requests 1 resource but
receives 13/14
var filteredRes model.Resources
if con.proxy.IsProxylessGrpc() {
@@ -261,7 +261,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
info += " filtered:" +
strconv.Itoa(len(res)-len(filteredRes))
res = filteredRes
}
- // CRITICAL: If filtering resulted in 0 resources but
client requested specific resources,
+ // If filtering resulted in 0 resources but client
requested specific resources,
// this means the requested resources don't exist.
Don't send empty response to avoid loop.
// Instead, log and return nil to prevent push.
if len(res) == 0 && len(requestedResourceNames) > 0 {
@@ -276,7 +276,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
}
}
- // CRITICAL: Never send empty response for proxyless gRPC - this causes
push loops
+ // Never send empty response for proxyless gRPC - this causes push loops
// If we have no resources to send, return nil instead of sending empty
response
if len(res) == 0 {
log.Debugf("pushXds: no resources to send for %s (proxy: %s),
skipping push", w.TypeUrl, con.proxy.ID)
@@ -301,8 +301,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
return err
}
- // CRITICAL FIX: Update NonceSent after successfully sending the
response
- // This is essential for tracking which nonce was sent and preventing
push loops
+ // Update NonceSent after successfully sending the response
con.proxy.UpdateWatchedResource(w.TypeUrl, func(wr
*model.WatchedResource) *model.WatchedResource {
if wr == nil {
return nil
@@ -341,7 +340,6 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
case !req.Full:
default:
// Log format matches Istio: "LDS: PUSH for node:xxx
resources:1 size:342B"
- // CRITICAL: Always log resource names for better debugging,
especially for proxyless gRPC
resourceNamesStr := ""
if len(res) > 0 {
if len(res) <= 10 {
@@ -366,7 +364,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
util.ByteCount(ResourceSize(res)), info,
resourceNamesStr)
}
- // CRITICAL FIX: For proxyless gRPC, after pushing LDS with outbound
listeners,
+ // For proxyless gRPC, after pushing LDS with outbound listeners,
// automatically trigger CDS and RDS push for the referenced clusters
and routes
// ONLY if this is a direct request push (not a push from
pushConnection which would cause loops)
// Only auto-push if CDS/RDS is not already being watched by the client
(client will request it naturally)
@@ -405,8 +403,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
}
}
- // CRITICAL FIX: Auto-push RDS for referenced routes
- // This is essential for gRPC proxyless - client needs
RDS to route traffic correctly
+ // Auto-push RDS for referenced routes
if len(routeNames) > 0 {
rdsWatched :=
con.proxy.GetWatchedResource(v3.RouteType)
// Only auto-push RDS if client hasn't already
requested it
@@ -474,7 +471,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
}
}
- // CRITICAL FIX: For proxyless gRPC, after pushing CDS with EDS
clusters,
+ // For proxyless gRPC, after pushing CDS with EDS clusters,
// we should NOT automatically push EDS before the client requests it.
// The gRPC xDS client will automatically request EDS after receiving
CDS with EDS clusters.
// If we push EDS before the client requests it, the client will
receive the response
@@ -490,7 +487,6 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
edsWatched :=
con.proxy.GetWatchedResource(v3.EndpointType)
if edsWatched == nil {
// EDS not watched yet, create watched resource
with cluster names
- // This ensures that when the client requests
EDS, we know which clusters to send
con.proxy.NewWatchedResource(v3.EndpointType,
edsClusterNames)
log.Debugf("pushXds: CDS push completed,
created EDS watched resource for clusters: %v (waiting for client request)",
edsClusterNames)
} else {
@@ -527,7 +523,7 @@ func (s *DiscoveryServer) pushXds(con *Connection, w
*model.WatchedResource, req
log.Debugf("pushXds: CDS push
completed, EDS clusters already watched: %v", edsClusterNames)
}
}
- // CRITICAL: Do NOT push EDS here - wait for the client
to request it naturally
+ // Do NOT push EDS here - wait for the client to
request it naturally
// The gRPC xDS client will automatically request EDS
after receiving CDS with EDS clusters
// Pushing EDS before the client requests it causes "no
state exists" warnings
}
diff --git a/manifests/profiles/default.yaml b/manifests/profiles/default.yaml
index 94ff4627..4c768b73 100644
--- a/manifests/profiles/default.yaml
+++ b/manifests/profiles/default.yaml
@@ -35,7 +35,8 @@ spec:
base: {}
nacos:
enabled: true
- zookeeper: {}
+ zookeeper:
+ enabled: false
diff --git a/operator/pkg/component/component.go
b/operator/pkg/component/component.go
index 9405229b..3d3ed7e5 100644
--- a/operator/pkg/component/component.go
+++ b/operator/pkg/component/component.go
@@ -32,6 +32,7 @@ const (
NacosRegisterComponentName Name = "Nacos"
ZookeeperRegisterComponentName Name = "Zookeeper"
AdminComponentName Name = "Admin"
+ PlanetDiscoveryComponentName Name = "Planet"
)
type Component struct {
@@ -71,6 +72,16 @@ var AllComponents = []Component{
HelmSubDir: "admin",
HelmValuesTreeRoot: "admin",
},
+ {
+ UserFacingName: PlanetDiscoveryComponentName,
+ SpecName: "planet",
+ ResourceType: "Deployment",
+ ResourceName: "dubbod",
+ ContainerName: "dubbo-discovery",
+ Default: true,
+ HelmSubDir: "dubbo-control/dubbo-discovery",
+ HelmValuesTreeRoot: "planet",
+ },
{
UserFacingName: NacosRegisterComponentName,
SpecName: "nacos",
@@ -99,13 +110,15 @@ var (
AdminComponentName: "Admin Dashboard",
NacosRegisterComponentName: "Nacos Register Plane",
ZookeeperRegisterComponentName: "Zookeeper Register Plane",
+ PlanetDiscoveryComponentName: "Dubbo Control Plane",
}
Icons = map[Name]string{
- BaseComponentName: "",
- NacosRegisterComponentName: "",
- ZookeeperRegisterComponentName: "",
- AdminComponentName: "",
+ BaseComponentName: "🔮",
+ NacosRegisterComponentName: "🔨",
+ ZookeeperRegisterComponentName: "🔧️",
+ AdminComponentName: "🔭",
+ PlanetDiscoveryComponentName: "🪐",
}
)
diff --git a/operator/pkg/install/installer.go
b/operator/pkg/install/installer.go
index 9b74c0e9..7cb360af 100644
--- a/operator/pkg/install/installer.go
+++ b/operator/pkg/install/installer.go
@@ -250,8 +250,11 @@ func (i Installer) prune(manifests []manifest.ManifestSet)
error {
var componentDependencies = map[component.Name][]component.Name{
component.BaseComponentName: {
+ component.PlanetDiscoveryComponentName,
component.NacosRegisterComponentName,
component.ZookeeperRegisterComponentName,
+ },
+ component.PlanetDiscoveryComponentName: {
component.AdminComponentName,
},
component.NacosRegisterComponentName: {},
diff --git a/pkg/dubbo-agent/xds_proxy.go b/pkg/dubbo-agent/xds_proxy.go
index 9e98e613..65027aac 100644
--- a/pkg/dubbo-agent/xds_proxy.go
+++ b/pkg/dubbo-agent/xds_proxy.go
@@ -803,7 +803,6 @@ func (p *XdsProxy) establishPreemptiveConnection(ia *Agent)
(<-chan struct{}, er
}()
// Send initial LDS request with bootstrap Node to initialize connection
- // This is critical for planet-discovery to recognize the connection
and count it in ConnectedEndpoints
ldsReq := &discovery.DiscoveryRequest{
TypeUrl: model.ListenerType,
Node: node,
diff --git a/pkg/dubbo-agent/xds_proxy_delta.go
b/pkg/dubbo-agent/xds_proxy_delta.go
index 3c974da5..e5baa5d8 100644
--- a/pkg/dubbo-agent/xds_proxy_delta.go
+++ b/pkg/dubbo-agent/xds_proxy_delta.go
@@ -312,7 +312,6 @@ func (p *XdsProxy) handleUpstreamDeltaResponse(con
*ProxyConnection) {
}
// Forward all delta responses to downstream (gRPC
client)
- // This is critical for proxyless gRPC clients to
receive XDS configuration
if err := con.downstreamDeltas.Send(resp); err != nil {
proxyLog.Errorf("delta connection #%d failed to
send response to downstream: %v", con.conID, err)
downstreamErr(con, err)
diff --git a/pkg/xds/server.go b/pkg/xds/server.go
index bf5e126b..dfadb39b 100644
--- a/pkg/xds/server.go
+++ b/pkg/xds/server.go
@@ -383,7 +383,7 @@ func ShouldRespond(w Watcher, id string, request
*discovery.DiscoveryRequest) (b
removed := previousResources.Difference(cur)
added := cur.Difference(previousResources)
- // CRITICAL FIX: For proxyless gRPC, if client sends wildcard (empty
ResourceNames) after receiving specific resources,
+ // For proxyless gRPC, if client sends wildcard (empty ResourceNames)
after receiving specific resources,
// this is likely an ACK and we should NOT push all resources again
// Check if this is a wildcard request after specific resources were
sent
if len(request.ResourceNames) == 0 && len(previousResources) > 0 &&
previousInfo.NonceSent != "" {
diff --git a/tests/grpc-app/README.md b/tests/grpc-app/README.md
index f8f9098f..a88ecf5f 100644
--- a/tests/grpc-app/README.md
+++ b/tests/grpc-app/README.md
@@ -4,8 +4,8 @@ This is a test example for gRPC proxyless service mesh based on
[Istio's blog po
## Architecture
-- **Producer**: gRPC server with xDS support (port 17070). This service is
deployed with multiple versions (v1/v2) to demonstrate gray
release/traffic-splitting scenarios and exposes gRPC reflection so `grpcurl`
can query it directly.
-- **Consumer**: gRPC client with xDS support + test server (port 17171). This
component drives load toward the producer service for automated tests.
+- **Provider**: gRPC server with xDS support (port 17070). This service is
deployed with multiple versions (v1/v2) to demonstrate gray
release/traffic-splitting scenarios and exposes gRPC reflection so `grpcurl`
can query it directly.
+- **Consumer**: gRPC client with xDS support + test server (port 17171). This
component drives load toward the provider service for automated tests.
Both services use `dubbo-proxy` sidecar as an xDS proxy to connect to the
control plane. The sidecar runs an xDS proxy server that listens on a Unix
Domain Socket (UDS) at `/etc/dubbo/proxy/XDS`. The gRPC applications connect to
this xDS proxy via the UDS socket using the `GRPC_XDS_BOOTSTRAP` environment
variable.
diff --git a/tests/grpc-app/consumer/main.go b/tests/grpc-app/consumer/main.go
index a9916f3a..8b527d6d 100644
--- a/tests/grpc-app/consumer/main.go
+++ b/tests/grpc-app/consumer/main.go
@@ -319,7 +319,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
}
}
- // CRITICAL: Reuse connections to avoid creating new xDS connections
for each RPC call
+ // Reuse connections to avoid creating new xDS connections for each RPC
call
// This prevents the RDS request loop issue and ensures stable
connection state
s.connMutex.RLock()
cached, exists := s.connCache[req.Url]
@@ -329,11 +329,10 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
}
s.connMutex.RUnlock()
- // CRITICAL: Check if cached connection is still valid and not too old.
+ // Check if cached connection is still valid and not too old.
// When xDS config changes (e.g., TLS is added/removed), gRPC xDS
client should update connections,
// but if the connection was established before xDS config was
received, it may use old configuration.
// To ensure we use the latest xDS config, we clear connections older
than 10 seconds.
- // This ensures that when SubsetRule or PeerAuthentication is
created/updated, connections are
// rebuilt quickly to use the new configuration.
const maxConnectionAge = 10 * time.Second
if exists && conn != nil {
@@ -367,7 +366,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
// Double-check after acquiring write lock
if cached, exists = s.connCache[req.Url]; !exists || cached ==
nil || cached.conn == nil {
conn = nil
- // CRITICAL: When TLS is configured (SubsetRule
ISTIO_MUTUAL), gRPC xDS client needs
+ // When TLS is configured (SubsetRule ISTIO_MUTUAL),
gRPC xDS client needs
// to fetch certificates from CertificateProvider. The
CertificateProvider uses file_watcher
// to read certificate files. If the files are not
ready or CertificateProvider is not
// initialized, certificate fetching will timeout.
@@ -393,7 +392,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
// Dial with xDS URL - use background context, not the
request context
// The request context might timeout before xDS
configuration is received
- // CRITICAL: When TLS is configured (SubsetRule
ISTIO_MUTUAL), gRPC xDS client needs
+ // When TLS is configured (SubsetRule ISTIO_MUTUAL),
gRPC xDS client needs
// to fetch certificates from CertificateProvider. This
may take time, especially on
// first connection. We use a longer timeout context to
allow certificate fetching.
log.Printf("ForwardEcho: creating new connection for
%s...", req.Url)
@@ -422,14 +421,14 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
initialState := conn.GetState()
log.Printf("ForwardEcho: initial connection state: %v", initialState)
- // CRITICAL: Even if connection is READY, we need to verify it's still
valid
+ // Even if connection is READY, we need to verify it's still valid
// because xDS configuration may have changed (e.g., from plaintext to
TLS)
// and the cached connection might be using old configuration.
// gRPC xDS client should automatically update connections, but if the
connection
// was established before xDS config was received, it might be using
FallbackCreds (plaintext).
// We'll proceed with RPC calls, but if they fail with TLS/plaintext
mismatch errors,
// we'll clear the cache and retry.
- // CRITICAL: When TLS is configured (SubsetRule ISTIO_MUTUAL), gRPC xDS
client needs
+ // When TLS is configured (SubsetRule ISTIO_MUTUAL), gRPC xDS client
needs
// to fetch certificates from CertificateProvider during TLS handshake.
The TLS handshake
// happens when the connection state transitions to READY. If
CertificateProvider is not ready,
// the TLS handshake will timeout. We need to wait for the connection
to be READY, which
@@ -440,7 +439,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
// Only wait for new connections or connections that are not
READY
// For gRPC xDS proxyless, we need to wait for the client to
receive and process LDS/CDS/EDS
// The connection state may transition: IDLE -> CONNECTING ->
READY (or TRANSIENT_FAILURE -> CONNECTING -> READY)
- // CRITICAL: When TLS is configured, the TLS handshake happens
during this state transition.
+ // When TLS is configured, the TLS handshake happens during
this state transition.
// If CertificateProvider is not ready, the TLS handshake will
timeout and connection will fail.
// We use a longer timeout (60 seconds) to allow
CertificateProvider to fetch certificates.
log.Printf("ForwardEcho: waiting for xDS configuration to be
processed and connection to be ready (60 seconds)...")
@@ -449,7 +448,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
maxWait := 60 * time.Second
// Wait for state changes, allowing multiple state transitions
- // CRITICAL: Don't exit on TRANSIENT_FAILURE - it may recover
to READY
+ // Don't exit on TRANSIENT_FAILURE - it may recover to READY
stateChanged := false
currentState := initialState
startTime := time.Now()
@@ -569,7 +568,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
errMsg := formatGRPCError(err, i, count)
output = append(output, errMsg)
- // CRITICAL: Only clear cache if we detect specific
TLS/plaintext mismatch errors.
+ // Only clear cache if we detect specific TLS/plaintext
mismatch errors.
// TRANSIENT_FAILURE can occur for many reasons (e.g.,
xDS config updates, endpoint changes),
// so we should NOT clear cache on every
TRANSIENT_FAILURE.
// Only clear cache when we detect explicit TLS-related
errors that indicate a mismatch.
@@ -592,12 +591,11 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
log.Printf("ForwardEcho: detected TLS/plaintext
mismatch or CertificateProvider not ready error: %v", err)
}
- // CRITICAL: When TLS mismatch is detected, immediately
clear cache and force reconnection.
+ // When TLS mismatch is detected, immediately clear
cache and force reconnection.
// This ensures that:
// 1. If client config changed (plaintext -> TLS), new
connection uses TLS
// 2. If server config changed (TLS -> plaintext), new
connection uses plaintext
// 3. Connection behavior is consistent with current
xDS configuration
- // According to Istio proxyless gRPC behavior:
// - When only client TLS (SubsetRule ISTIO_MUTUAL) but
server plaintext: connection SHOULD FAIL
// - When client TLS + server mTLS (PeerAuthentication
STRICT): connection SHOULD SUCCEED
// - When both plaintext: connection SHOULD SUCCEED
@@ -629,7 +627,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
conn = nil
s.connMutex.Unlock()
- // CRITICAL: Wait for xDS config to propagate
and be processed by gRPC xDS client.
+ // Wait for xDS config to propagate and be
processed by gRPC xDS client.
// When CDS/LDS config changes, it takes time
for:
// 1. Control plane to push new config to gRPC
xDS client
// 2. gRPC xDS client to process and apply new
config
@@ -647,7 +645,7 @@ func (s *testServerImpl) ForwardEcho(ctx context.Context,
req *pb.ForwardEchoReq
}
// Recreate connection - this will use current
xDS config
- // CRITICAL: When TLS is configured, gRPC xDS
client needs to fetch certificates
+ // When TLS is configured, gRPC xDS client
needs to fetch certificates
// from CertificateProvider. Use a longer
timeout to allow certificate fetching.
log.Printf("ForwardEcho: recreating connection
with current xDS config...")
creds, credErr :=
xdscreds.NewClientCredentials(xdscreds.ClientOptions{
diff --git a/tests/grpc-app/docker/dockerfile.producer
b/tests/grpc-app/docker/dockerfile.provider
similarity index 86%
rename from tests/grpc-app/docker/dockerfile.producer
rename to tests/grpc-app/docker/dockerfile.provider
index b6630631..58945080 100644
--- a/tests/grpc-app/docker/dockerfile.producer
+++ b/tests/grpc-app/docker/dockerfile.provider
@@ -32,11 +32,11 @@ RUN protoc --go_out=. --go_opt=paths=source_relative \
--go-grpc_out=. --go-grpc_opt=paths=source_relative \
proto/echo.proto
-COPY producer/ ./producer/
+COPY provider/ ./provider/
ARG GOOS=linux
ARG GOARCH=amd64
-RUN GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 go build -a -ldflags
'-extldflags "-static"' -o /build/grpc-producer ./producer/
+RUN GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 go build -a -ldflags
'-extldflags "-static"' -o /build/grpc-provider ./provider/
FROM alpine:latest
RUN apk update && \
@@ -44,7 +44,7 @@ RUN apk update && \
(sleep 2 && apk update && apk --no-cache add ca-certificates tzdata)
WORKDIR /app
-COPY --from=builder /build/grpc-producer /usr/local/bin/grpc-producer
-RUN chmod +x /usr/local/bin/grpc-producer
+COPY --from=builder /build/grpc-provider /usr/local/bin/grpc-provider
+RUN chmod +x /usr/local/bin/grpc-provider
-ENTRYPOINT ["/usr/local/bin/grpc-producer"]
+ENTRYPOINT ["/usr/local/bin/grpc-provider"]
diff --git a/tests/grpc-app/producer/main.go b/tests/grpc-app/provider/main.go
similarity index 100%
rename from tests/grpc-app/producer/main.go
rename to tests/grpc-app/provider/main.go