Copilot commented on code in PR #1007:
URL: 
https://github.com/apache/skywalking-banyandb/pull/1007#discussion_r2922770906


##########
pkg/test/setup/setup.go:
##########
@@ -609,13 +609,23 @@ func CMD(flags ...string) func() {
        }
 }
 
+func hasFlag(flags []string, target string) bool {
+       for _, f := range flags {
+               if f == target {
+                       return true
+               }
+       }
+       return false
+}
+
 func startDataNode(config *ClusterConfig, dataDir string, flags ...string) 
(string, string, func()) {
        if config == nil {
                config = defaultClusterConfig
        }
        isPropertyMode := config.SchemaRegistry.Mode == ModeProperty
+       runSchemaServer := isPropertyMode && !hasFlag(flags, 
"--has-schema-role=false")
        portCount := 2
-       if isPropertyMode {
+       if runSchemaServer {
                portCount = 3

Review Comment:
   `hasFlag` only matches an exact string (e.g. `--has-schema-role=false`). If 
callers pass the bool flag as separate args (`--has-schema-role`, `false`) or 
use `--has-schema-role=false` with extra whitespace, `runSchemaServer` will be 
computed incorrectly, leading to allocating the wrong number of ports and 
potentially adding a schema server address even though the node won’t start 
one. Consider parsing the flag more robustly (handle both `--k=v` and `--k v`, 
or use the flag parser) before deciding `portCount`/`runSchemaServer`.



##########
test/integration/distributed/lifecycle/property/suite_test.go:
##########
@@ -102,19 +100,34 @@ func init() {
        }
 }
 
-func diffGroups(left, right []string) []string {
-       rightSet := make(map[string]struct{}, len(right))
-       for _, groupName := range right {
-               rightSet[groupName] = struct{}{}
-       }
-       result := make([]string, 0)
-       for _, groupName := range left {
-               if _, exists := rightSet[groupName]; exists {
-                       continue
+func verifyClusterNodeRoles(liaisonAddr string) {
+       conn, connErr := grpchelper.Conn(liaisonAddr, 10*time.Second,
+               grpclib.WithTransportCredentials(insecure.NewCredentials()))
+       Expect(connErr).NotTo(HaveOccurred())
+       defer func() { _ = conn.Close() }()
+       clusterClient := databasev1.NewClusterStateServiceClient(conn)
+       state, stateErr := clusterClient.GetClusterState(
+               context.Background(), &databasev1.GetClusterStateRequest{})
+       Expect(stateErr).NotTo(HaveOccurred())
+       tire2 := state.GetRouteTables()["tire2"]
+       Expect(tire2).NotTo(BeNil(), "tire2 route table not found")
+       for _, node := range tire2.GetRegistered() {
+               labels := node.GetLabels()
+               hasMetaRole := false
+               for _, role := range node.GetRoles() {
+                       if role == databasev1.Role_ROLE_META {
+                               hasMetaRole = true
+                               break
+                       }
+               }
+               if labels["type"] == "hot" {
+                       Expect(hasMetaRole).To(BeTrue(),
+                               fmt.Sprintf("hot node %s should have 
ROLE_META", node.GetMetadata().GetName()))
+               } else if labels["type"] == "warm" {
+                       Expect(hasMetaRole).To(BeFalse(),
+                               fmt.Sprintf("warm node %s should NOT have 
ROLE_META", node.GetMetadata().GetName()))
                }

Review Comment:
   This loop only asserts role expectations *if* it encounters a node with 
label `type=hot`/`type=warm`. If the warm node never registers (or labels are 
missing), the test can still pass. Track whether at least one hot and one warm 
node were observed and `Expect` both to be true after iterating.



##########
test/integration/distributed/lifecycle/property/suite_test.go:
##########
@@ -102,19 +100,34 @@ func init() {
        }
 }
 
-func diffGroups(left, right []string) []string {
-       rightSet := make(map[string]struct{}, len(right))
-       for _, groupName := range right {
-               rightSet[groupName] = struct{}{}
-       }
-       result := make([]string, 0)
-       for _, groupName := range left {
-               if _, exists := rightSet[groupName]; exists {
-                       continue
+func verifyClusterNodeRoles(liaisonAddr string) {
+       conn, connErr := grpchelper.Conn(liaisonAddr, 10*time.Second,
+               grpclib.WithTransportCredentials(insecure.NewCredentials()))
+       Expect(connErr).NotTo(HaveOccurred())
+       defer func() { _ = conn.Close() }()
+       clusterClient := databasev1.NewClusterStateServiceClient(conn)
+       state, stateErr := clusterClient.GetClusterState(
+               context.Background(), &databasev1.GetClusterStateRequest{})
+       Expect(stateErr).NotTo(HaveOccurred())

Review Comment:
   `verifyClusterNodeRoles` makes a single `GetClusterState` call with 
`context.Background()`. Because cluster state propagation is asynchronous, this 
can be flaky (and the RPC has no deadline). Consider wrapping the check in 
`Eventually` (similar to `waitForActiveDataNodes`) and using a context with 
timeout/deadline for the RPC.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to