This is an automated email from the ASF dual-hosted git repository.
houston pushed a commit to branch branch_9x
in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/branch_9x by this push:
new 04acaca3e18 SOLR-17331: More optimal placements with
OrderedNodePlacementPlugin (#2515)
04acaca3e18 is described below
commit 04acaca3e186e3a1e3f260bf3c3ac8ed32b1ff28
Author: Houston Putman <[email protected]>
AuthorDate: Tue Jun 18 14:26:05 2024 -0400
SOLR-17331: More optimal placements with OrderedNodePlacementPlugin (#2515)
- Move tests, adding tests for the simple plugin
(cherry picked from commit fc0d84afaa8b49bd0515f796abd901e5150d5982)
---
solr/CHANGES.txt | 2 +
.../plugins/OrderedNodePlacementPlugin.java | 87 ++++++-----
.../org/apache/solr/cloud/MigrateReplicasTest.java | 8 +-
.../plugins/MinimizeCoresPlacementFactoryTest.java | 2 +-
...ryTest.java => SimplePlacementFactoryTest.java} | 172 ++++++++++++---------
5 files changed, 151 insertions(+), 120 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 05437a698f0..2fe4b04a7f5 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -33,6 +33,8 @@ Improvements
* SOLR-17109: Give security manager explicit read access to sharedLib (Tomás
Fernández Löbbe via Eric Pugh)
+* SOLR-17331: OrderedNodePlacementPlugin will give an even more optimal
replica placements during ReplicaMigration commands (Houston Putman, Yohann
Callea)
+
Optimizations
---------------------
* SOLR-17257: Both Minimize Cores and the Affinity replica placement
strategies would over-gather
diff --git
a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/OrderedNodePlacementPlugin.java
b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/OrderedNodePlacementPlugin.java
index 5c8d195954e..86ca79526b3 100644
---
a/solr/core/src/java/org/apache/solr/cluster/placement/plugins/OrderedNodePlacementPlugin.java
+++
b/solr/core/src/java/org/apache/solr/cluster/placement/plugins/OrderedNodePlacementPlugin.java
@@ -69,23 +69,26 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
Set<Node> allNodes = new HashSet<>();
Set<SolrCollection> allCollections = new HashSet<>();
- Deque<PendingPlacementRequest> pendingRequests = new
ArrayDeque<>(requests.size());
+ // This helps us keep track if we have made a full lap of the outstanding
requests
+ // without doing a placement or not.
+ int placementCount = 0;
+ Deque<OutstandingPlacementRequest> outstandingRequests = new
ArrayDeque<>(requests.size());
for (PlacementRequest request : requests) {
- PendingPlacementRequest pending = new PendingPlacementRequest(request);
- pendingRequests.add(pending);
+ OutstandingPlacementRequest outstanding = new
OutstandingPlacementRequest(request);
+ outstandingRequests.add(outstanding);
placementPlans.add(
placementContext
.getPlacementPlanFactory()
- .createPlacementPlan(request,
pending.getComputedPlacementSet()));
+ .createPlacementPlan(request,
outstanding.getComputedPlacementSet()));
allNodes.addAll(request.getTargetNodes());
allCollections.add(request.getCollection());
}
Collection<WeightedNode> weightedNodes =
getWeightedNodes(placementContext, allNodes, allCollections,
true).values();
- while (!pendingRequests.isEmpty()) {
- PendingPlacementRequest request = pendingRequests.poll();
- if (!request.isPending()) {
+ while (!outstandingRequests.isEmpty()) {
+ OutstandingPlacementRequest request = outstandingRequests.poll();
+ if (request.isComplete()) {
continue;
}
@@ -94,9 +97,9 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
SolrCollection solrCollection = request.getCollection();
// Now place all replicas of all shards on available nodes
- for (String shardName : request.getPendingShards()) {
- for (Replica.ReplicaType replicaType :
request.getPendingReplicaTypes(shardName)) {
- int replicaCount = request.getPendingReplicas(shardName,
replicaType);
+ for (String shardName : request.getOutstandingShards()) {
+ for (Replica.ReplicaType replicaType :
request.getOutstandingReplicaTypes(shardName)) {
+ int replicaCount = request.getOutstandingReplicas(shardName,
replicaType);
if (log.isDebugEnabled()) {
log.debug(
"Placing {} replicas for Collection: {}, Shard: {},
ReplicaType: {}",
@@ -131,12 +134,13 @@ public abstract class OrderedNodePlacementPlugin
implements PlacementPlugin {
// options than we have replicas to place, that's ok, because the
replicas will likely
// be put on all the tie options.
//
- // Only skip the request if it can be requeued, and there are
other pending requests to
- // compute.
+ // Only skip the request if it can be requeued, and there are
other outstanding requests
+ // to
+ // compute. If the next outstanding request cannot be requeued,
int numWeightTies = nodesForReplicaType.peekTies();
- if (!pendingRequests.isEmpty()
- && request.canBeRequeued()
- && numWeightTies > (replicaCount - replicasPlaced)) {
+ if (numWeightTies > (replicaCount - replicasPlaced)
+ && !outstandingRequests.isEmpty()
+ && request.canBeRequeued(placementCount)) {
log.debug(
"There is a tie for best weight. There are more options ({})
than replicas to place ({}), so try this placement request later: {}",
numWeightTies,
@@ -151,6 +155,7 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
node.addReplica(
createProjectedReplica(solrCollection, shardName,
replicaType, node.getNode()));
replicasPlaced += 1;
+ placementCount += 1;
request.addPlacement(
placementContext
.getPlacementPlanFactory()
@@ -185,9 +190,9 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
}
}
}
- if (request.isPending()) {
- request.requeue();
- pendingRequests.add(request);
+ if (!request.isComplete()) {
+ request.requeue(placementCount);
+ outstandingRequests.add(request);
}
}
return placementPlans;
@@ -808,8 +813,8 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
}
/** Context for a placement request still has replicas that need to be
placed. */
- static class PendingPlacementRequest {
- boolean hasBeenRequeued;
+ static class OutstandingPlacementRequest {
+ int requeuedAtPlacementCount;
final SolrCollection collection;
@@ -821,8 +826,8 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
// A live view on how many replicas still need to be placed for each shard
& replica type
final Map<String, Map<Replica.ReplicaType, Integer>>
replicasToPlaceForShards;
- public PendingPlacementRequest(PlacementRequest request) {
- hasBeenRequeued = false;
+ public OutstandingPlacementRequest(PlacementRequest request) {
+ requeuedAtPlacementCount = -1;
collection = request.getCollection();
targetNodes = request.getTargetNodes();
Set<String> shards = request.getShardNames();
@@ -843,13 +848,13 @@ public abstract class OrderedNodePlacementPlugin
implements PlacementPlugin {
}
/**
- * Determine if this request is not yet complete, and there are requested
replicas that have not
+ * Determine if this request is complete, and there are no requested
replicas that have not yet
* had placements computed.
*
- * @return if there are still replica placements that need to be computed
+ * @return if all replica placements have been computed
*/
- public boolean isPending() {
- return !replicasToPlaceForShards.isEmpty();
+ public boolean isComplete() {
+ return replicasToPlaceForShards.isEmpty();
}
public SolrCollection getCollection() {
@@ -864,7 +869,7 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
* The set of ReplicaPlacements computed for this request.
*
* <p>The list that is returned is the same list used internally, so it
will be augmented until
- * {@link #isPending()} returns false.
+ * {@link #isComplete()} returns true.
*
* @return The live set of replicaPlacements for this request.
*/
@@ -879,7 +884,7 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
*
* @return list of unfinished shards
*/
- public Collection<String> getPendingShards() {
+ public Collection<String> getOutstandingShards() {
return new ArrayList<>(replicasToPlaceForShards.keySet());
}
@@ -890,7 +895,7 @@ public abstract class OrderedNodePlacementPlugin implements
PlacementPlugin {
* @param shard name of the shard to check for uncomputed placements
* @return the set of unfinished replica types
*/
- public Collection<Replica.ReplicaType> getPendingReplicaTypes(String
shard) {
+ public Collection<Replica.ReplicaType> getOutstandingReplicaTypes(String
shard) {
return Optional.ofNullable(replicasToPlaceForShards.get(shard))
.map(Map::keySet)
// Use a sorted TreeSet to make sure that tests are repeatable
@@ -906,30 +911,36 @@ public abstract class OrderedNodePlacementPlugin
implements PlacementPlugin {
* @param type type of replica to be placed
* @return the number of replicas that have not yet had placements computed
*/
- public int getPendingReplicas(String shard, Replica.ReplicaType type) {
+ public int getOutstandingReplicas(String shard, Replica.ReplicaType type) {
return Optional.ofNullable(replicasToPlaceForShards.get(shard))
.map(m -> m.get(type))
.orElse(0);
}
/**
- * Currently, only of requeue is allowed per pending request.
+ * The request can be requeued as long as there have been placements since
the last time it was
+ * requeued. If no placements have been made since, we will not allow a
requeue which will force
+ * placements to be determined. This removes the possibility of a
never-ending loop.
*
* @return true if the request has not been requeued already
*/
- public boolean canBeRequeued() {
- return !hasBeenRequeued;
+ public boolean canBeRequeued(int placementCount) {
+ return requeuedAtPlacementCount < placementCount;
}
- /** Let the pending request know that it has been requeued */
- public void requeue() {
- hasBeenRequeued = true;
+ /**
+ * Let the request know that it has been requeued
+ *
+ * @param placementCount the number of placements made at the time of the
requeue
+ */
+ public void requeue(int placementCount) {
+ requeuedAtPlacementCount = placementCount;
}
/**
- * Track the given replica placement for this pending request.
+ * Track the given replica placement for this request.
*
- * @param replica placement that has been made for the pending request
+ * @param replica placement that has been made for the request
*/
public void addPlacement(ReplicaPlacement replica) {
computedPlacements.add(replica);
diff --git a/solr/core/src/test/org/apache/solr/cloud/MigrateReplicasTest.java
b/solr/core/src/test/org/apache/solr/cloud/MigrateReplicasTest.java
index f96b621c023..39ccf43fb53 100644
--- a/solr/core/src/test/org/apache/solr/cloud/MigrateReplicasTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/MigrateReplicasTest.java
@@ -237,7 +237,7 @@ public class MigrateReplicasTest extends SolrCloudTestCase {
}
@Test
- public void testGoodSpreadDuringAssignWithNoTarget() throws Exception {
+ public void testWithNoTarget() throws Exception {
configureCluster(5)
.addConfig(
"conf1",
TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
@@ -300,9 +300,9 @@ public class MigrateReplicasTest extends SolrCloudTestCase {
}
for (String node : eventualTargetNodes) {
- assertEquals(
- "The non-source node '" + node + "' has the wrong number of replicas
after the migration",
- 2,
+ assertNotEquals(
+ "The non-source node '" + node + "' should not receive all replicas
from the migration",
+ 4,
collection.getReplicas(node).size());
}
}
diff --git
a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
index 6afd24fafd1..e4cb6e71a94 100644
---
a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
+++
b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
@@ -38,7 +38,7 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** Unit test for {@link AffinityPlacementFactory} */
+/** Unit test for {@link MinimizeCoresPlacementFactoryTest} */
public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryTest {
private static final Logger log =
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
diff --git
a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/SimplePlacementFactoryTest.java
similarity index 76%
copy from
solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
copy to
solr/core/src/test/org/apache/solr/cluster/placement/plugins/SimplePlacementFactoryTest.java
index 6afd24fafd1..d4b91e582c1 100644
---
a/solr/core/src/test/org/apache/solr/cluster/placement/plugins/MinimizeCoresPlacementFactoryTest.java
+++
b/solr/core/src/test/org/apache/solr/cluster/placement/plugins/SimplePlacementFactoryTest.java
@@ -18,10 +18,14 @@
package org.apache.solr.cluster.placement.plugins;
import java.lang.invoke.MethodHandles;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
+import java.util.Locale;
+import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
import org.apache.solr.cluster.Node;
import org.apache.solr.cluster.Shard;
import org.apache.solr.cluster.SolrCollection;
@@ -30,6 +34,7 @@ import org.apache.solr.cluster.placement.Builders;
import org.apache.solr.cluster.placement.PlacementContext;
import org.apache.solr.cluster.placement.PlacementPlan;
import org.apache.solr.cluster.placement.PlacementPlugin;
+import org.apache.solr.cluster.placement.PlacementRequest;
import org.apache.solr.cluster.placement.ReplicaPlacement;
import org.apache.solr.cluster.placement.impl.BalanceRequestImpl;
import org.apache.solr.cluster.placement.impl.PlacementRequestImpl;
@@ -38,8 +43,8 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/** Unit test for {@link AffinityPlacementFactory} */
-public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryTest {
+/** Unit test for {@link SimplePlacementFactoryTest} */
+public class SimplePlacementFactoryTest extends AbstractPlacementFactoryTest {
private static final Logger log =
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private PlacementPlugin plugin;
@@ -50,7 +55,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
}
private void configurePlugin() {
- MinimizeCoresPlacementFactory factory = new
MinimizeCoresPlacementFactory();
+ SimplePlacementFactory factory = new SimplePlacementFactory();
plugin = factory.createPluginInstance();
}
@@ -112,6 +117,85 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
assertEquals(hasExistingCollection ? liveNodes.get(1) : liveNodes.get(0),
rp.getNode());
}
+ /**
+ * Tests that existing collection replicas are taken into account when
preventing more than one
+ * replica per shard to be placed on any node.
+ */
+ @Test
+ public void testMultiplePlacementsWithExistingReplicas() throws Exception {
+ String collectionName = "existingCollection";
+
+ // Cluster nodes and their attributes
+ Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(3);
+
+ // The collection already exists with shards and replicas
+ Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder(collectionName);
+ // Note that the collection as defined below is in a state that would NOT
be returned by the
+ // placement plugin: shard 1 has two replicas on node 0. The plugin should
still be able to
+ // place additional replicas as long as they don't break the rules.
+ List<List<String>> shardsReplicas =
+ List.of(
+ List.of("NRT 0"), // shard 1
+ List.of("NRT 1"), // shard 2
+ List.of("NRT 2")); // shard 3
+ collectionBuilder.customCollectionSetup(shardsReplicas,
clusterBuilder.getLiveNodeBuilders());
+ clusterBuilder.addCollection(collectionBuilder);
+ SolrCollection solrCollection = collectionBuilder.build();
+
+ List<Node> liveNodes = clusterBuilder.buildLiveNodes();
+
+ // Place an additional NRT and an additional TLOG replica for each shard
+ PlacementRequestImpl placementRequest =
+ new PlacementRequestImpl(
+ solrCollection, solrCollection.getShardNames(), new
HashSet<>(liveNodes), 1, 0, 0);
+
+ List<PlacementRequest> placementRequests =
+ solrCollection.getShardNames().stream()
+ .map(
+ shard ->
+ new PlacementRequestImpl(
+ solrCollection,
+ Collections.singleton(shard),
+ new HashSet<>(liveNodes),
+ 1,
+ 0,
+ 0))
+ .collect(Collectors.toList());
+ // Randomize the order of the requests
+ Collections.shuffle(placementRequests, random());
+
+ // The replicas must be placed on the most appropriate nodes, i.e. those
that do not already
+ // have a replica for the shard and then on the node with the lowest
number of cores. NRT are
+ // placed first and given the cluster state here the placement is
deterministic (easier to test,
+ // only one good placement).
+ List<PlacementPlan> pps =
+ plugin.computePlacements(placementRequests,
clusterBuilder.buildPlacementContext());
+
+ List<ReplicaPlacement> replicaPlacements =
+ pps.stream().flatMap(pp ->
pp.getReplicaPlacements().stream()).collect(Collectors.toList());
+ replicaPlacements.forEach(
+ rp ->
+ assertNotEquals(
+ "Two replicas of the same shard on the same node",
+
solrCollection.getShard(rp.getShardName()).iterator().next().getNode(),
+ rp.getNode()));
+ Map<Node, List<ReplicaPlacement>> placementsByNode =
+ replicaPlacements.stream()
+ .collect(Collectors.groupingBy(ReplicaPlacement::getNode,
Collectors.toList()));
+ for (Map.Entry<Node, List<ReplicaPlacement>> entry :
placementsByNode.entrySet()) {
+ assertEquals(
+ String.format(
+ Locale.ROOT,
+ "Node %s has multiple replicas placed on it, when each node
should have 1 placement: [%s]",
+ entry.getKey().getName(),
+ entry.getValue().stream()
+ .map(ReplicaPlacement::getShardName)
+ .collect(Collectors.joining(", "))),
+ 1,
+ entry.getValue().size());
+ }
+ }
+
/**
* Tests that existing collection replicas are taken into account when
preventing more than one
* replica per shard to be placed on any node.
@@ -122,12 +206,6 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
// Cluster nodes and their attributes
Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(5);
- List<Builders.NodeBuilder> nodeBuilders =
clusterBuilder.getLiveNodeBuilders();
- int coresOnNode = 10;
- for (Builders.NodeBuilder nodeBuilder : nodeBuilders) {
- nodeBuilder.setCoreCount(coresOnNode);
- coresOnNode += 10;
- }
// The collection already exists with shards and replicas
Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder(collectionName);
@@ -138,7 +216,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
List.of(
List.of("NRT 0", "TLOG 0", "NRT 3"), // shard 1
List.of("NRT 1", "NRT 3", "TLOG 2")); // shard 2
- collectionBuilder.customCollectionSetup(shardsReplicas, nodeBuilders);
+ collectionBuilder.customCollectionSetup(shardsReplicas,
clusterBuilder.getLiveNodeBuilders());
clusterBuilder.addCollection(collectionBuilder);
SolrCollection solrCollection = collectionBuilder.build();
@@ -157,7 +235,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
plugin.computePlacement(placementRequest,
clusterBuilder.buildPlacementContext());
// Each expected placement is represented as a string "shard replica-type
node"
- Set<String> expectedPlacements = Set.of("1 NRT 1", "1 TLOG 2", "2 NRT 0",
"2 TLOG 4");
+ Set<String> expectedPlacements = Set.of("1 NRT 1", "1 TLOG 2", "2 TLOG 0",
"2 NRT 4");
verifyPlacements(expectedPlacements, pp,
collectionBuilder.getShardBuilders(), liveNodes);
}
@@ -171,11 +249,6 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
// Cluster nodes and their attributes
Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(3);
- List<Builders.NodeBuilder> nodeBuilders =
clusterBuilder.getLiveNodeBuilders();
- int coreCount = 0;
- for (Builders.NodeBuilder nodeBuilder : nodeBuilders) {
- nodeBuilder.setCoreCount(coreCount++);
- }
// The collection already exists with shards and replicas
Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder(collectionName);
@@ -186,7 +259,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
List.of(
List.of("NRT 10", "TLOG 11"), // shard 1
List.of()); // shard 2
- collectionBuilder.customCollectionSetup(shardsReplicas, nodeBuilders);
+ collectionBuilder.customCollectionSetup(shardsReplicas,
clusterBuilder.getLiveNodeBuilders());
clusterBuilder.addCollection(collectionBuilder);
SolrCollection solrCollection = collectionBuilder.build();
@@ -224,59 +297,11 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
/** Tests replica balancing across all nodes in a cluster */
@Test
- public void testBalancingBareMetrics() throws Exception {
+ public void testBalancing() throws Exception {
// Cluster nodes and their attributes
Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(5);
List<Builders.NodeBuilder> nodeBuilders =
clusterBuilder.getLiveNodeBuilders();
- // The collection already exists with shards and replicas
- Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder("a");
- // Note that the collection as defined below is in a state that would NOT
be returned by the
- // placement plugin: shard 1 has two replicas on node 0. The plugin should
still be able to
- // place additional replicas as long as they don't break the rules.
- List<List<String>> shardsReplicas =
- List.of(
- List.of("NRT 0", "TLOG 0", "NRT 2"), // shard 1
- List.of("NRT 1", "NRT 4", "TLOG 3")); // shard 2
- collectionBuilder.customCollectionSetup(shardsReplicas, nodeBuilders);
- clusterBuilder.addCollection(collectionBuilder);
-
- // Add another collection
- collectionBuilder = Builders.newCollectionBuilder("b");
- shardsReplicas =
- List.of(
- List.of("NRT 1", "TLOG 0", "NRT 3"), // shard 1
- List.of("NRT 1", "NRT 3", "TLOG 0")); // shard 2
- collectionBuilder.customCollectionSetup(shardsReplicas, nodeBuilders);
- clusterBuilder.addCollection(collectionBuilder);
-
- BalanceRequestImpl balanceRequest =
- new BalanceRequestImpl(new HashSet<>(clusterBuilder.buildLiveNodes()));
- BalancePlan balancePlan =
- plugin.computeBalancing(balanceRequest,
clusterBuilder.buildPlacementContext());
-
- // Each expected placement is represented as a string "col shard
replica-type fromNode ->
- // toNode"
- Set<String> expectedPlacements = Set.of("b 1 TLOG 0 -> 2", "b 1 NRT 3 ->
4");
- verifyBalancing(
- expectedPlacements,
- balancePlan,
- collectionBuilder.getShardBuilders(),
- clusterBuilder.buildLiveNodes());
- }
-
- /** Tests replica balancing across all nodes in a cluster */
- @Test
- public void testBalancingExistingMetrics() throws Exception {
- // Cluster nodes and their attributes
- Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(5);
- List<Builders.NodeBuilder> nodeBuilders =
clusterBuilder.getLiveNodeBuilders();
- int coresOnNode = 10;
- for (Builders.NodeBuilder nodeBuilder : nodeBuilders) {
- nodeBuilder.setCoreCount(coresOnNode);
- coresOnNode += 10;
- }
-
// The collection already exists with shards and replicas
Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder("a");
// Note that the collection as defined below is in a state that would NOT
be returned by the
@@ -296,7 +321,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
// Each expected placement is represented as a string "col shard
replica-type fromNode ->
// toNode"
- Set<String> expectedPlacements = Set.of("a 1 NRT 3 -> 1", "a 2 NRT 3 ->
0");
+ Set<String> expectedPlacements = Set.of("a 1 NRT 0 -> 4");
verifyBalancing(
expectedPlacements,
balancePlan,
@@ -309,13 +334,6 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
public void testBalancingWithNodeSubset() throws Exception {
// Cluster nodes and their attributes
Builders.ClusterBuilder clusterBuilder =
Builders.newClusterBuilder().initializeLiveNodes(5);
- List<Builders.NodeBuilder> nodeBuilders =
clusterBuilder.getLiveNodeBuilders();
- int coresOnNode = 10;
- for (Builders.NodeBuilder nodeBuilder : nodeBuilders) {
- nodeBuilder.setCoreCount(coresOnNode);
- coresOnNode += 10;
- }
-
// The collection already exists with shards and replicas
Builders.CollectionBuilder collectionBuilder =
Builders.newCollectionBuilder("a");
// Note that the collection as defined below is in a state that would NOT
be returned by the
@@ -325,10 +343,10 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
List.of(
List.of("NRT 0", "TLOG 0", "NRT 3"), // shard 1
List.of("NRT 1", "NRT 3", "TLOG 2")); // shard 2
- collectionBuilder.customCollectionSetup(shardsReplicas, nodeBuilders);
+ collectionBuilder.customCollectionSetup(shardsReplicas,
clusterBuilder.getLiveNodeBuilders());
clusterBuilder.addCollection(collectionBuilder);
- // Only balance over node 1 and 2
+ // Only balance over node 1-4
List<Node> overNodes = clusterBuilder.buildLiveNodes();
overNodes.remove(0);
@@ -338,7 +356,7 @@ public class MinimizeCoresPlacementFactoryTest extends
AbstractPlacementFactoryT
// Each expected placement is represented as a string "col shard
replica-type fromNode ->
// toNode"
- Set<String> expectedPlacements = Set.of("a 1 NRT 3 -> 1");
+ Set<String> expectedPlacements = Set.of("a 1 NRT 3 -> 4");
verifyBalancing(
expectedPlacements,
balancePlan,