This is an automated email from the ASF dual-hosted git repository.
dsmiley pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/main by this push:
new c694258eac6 SOLR-17535: Deprecate ClusterState.forEachCollection
(#2854)
c694258eac6 is described below
commit c694258eac62a53000cb3d4ce9c0a7302268f97d
Author: David Smiley <[email protected]>
AuthorDate: Tue Nov 12 00:58:28 2024 -0500
SOLR-17535: Deprecate ClusterState.forEachCollection (#2854)
Use collectionStream() instead. Redirect callers. A simple refactoring.
---
solr/CHANGES.txt | 3 +-
.../impl/CollectionsRepairEventListener.java | 71 ++++++++++++----------
.../apache/solr/cloud/ReindexCollectionTest.java | 3 +-
.../solrj/impl/SolrClientNodeStateProvider.java | 27 ++++----
.../org/apache/solr/common/LazySolrCluster.java | 4 +-
.../org/apache/solr/common/cloud/ClusterState.java | 3 +
6 files changed, 62 insertions(+), 49 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 244f68171b7..b16fea65387 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -227,7 +227,8 @@ led to the suppression of exceptions. (Andrey Bozhko)
* SOLR-17534: Introduce ClusterState.getCollectionNames, a convenience method
(David Smiley)
-* SOLR-17535: Introduce ClusterState.collectionStream to replace
getCollectionStates and getCollectionsMap (David Smiley)
+* SOLR-17535: Introduce ClusterState.collectionStream to replace
getCollectionStates, getCollectionsMap,
+ and forEachCollection, which are now deprecated. (David Smiley)
* SOLR-17545: Upgrade to Gradle 8.10 (Houston Putman)
diff --git
a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
index 05a243f38ee..b2c5f0c99cd 100644
---
a/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
+++
b/solr/core/src/java/org/apache/solr/cluster/events/impl/CollectionsRepairEventListener.java
@@ -40,7 +40,6 @@ import org.apache.solr.cloud.api.collections.Assign;
import org.apache.solr.cluster.events.ClusterEvent;
import org.apache.solr.cluster.events.ClusterEventListener;
import org.apache.solr.cluster.events.NodesDownEvent;
-import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.ReplicaCount;
import org.apache.solr.common.cloud.ReplicaPosition;
import org.apache.solr.common.util.SolrNamedThreadFactory;
@@ -168,38 +167,44 @@ public class CollectionsRepairEventListener
// collection / positions
Map<String, List<ReplicaPosition>> newPositions = new HashMap<>();
try {
- ClusterState clusterState = solrCloudManager.getClusterState();
- clusterState.forEachCollection(
- coll -> {
- // shard / number of replicas per type
- Map<String, ReplicaCount> lostReplicas = new HashMap<>();
- coll.forEachReplica(
- (shard, replica) -> {
- if (reallyLostNodes.contains(replica.getNodeName())) {
- lostReplicas
- .computeIfAbsent(shard, s -> ReplicaCount.empty())
- .increment(replica.type);
- }
- });
- Assign.AssignStrategy assignStrategy =
Assign.createAssignStrategy(cc);
- lostReplicas.forEach(
- (shard, types) -> {
- Assign.AssignRequest assignRequest =
- new Assign.AssignRequestBuilder()
- .forCollection(coll.getName())
- .forShard(Collections.singletonList(shard))
- .assignReplicas(types)
- .build();
- try {
- List<ReplicaPosition> positions =
- assignStrategy.assign(solrCloudManager, assignRequest);
- newPositions.put(coll.getName(), positions);
- } catch (Exception e) {
- log.warn(
- "Exception computing positions for {}/{}: {}",
coll.getName(), shard, e);
- }
- });
- });
+ // shard / number of replicas per type
+ solrCloudManager
+ .getClusterState()
+ .collectionStream()
+ .forEach(
+ coll -> {
+ // shard / number of replicas per type
+ Map<String, ReplicaCount> lostReplicas = new HashMap<>();
+ coll.forEachReplica(
+ (shard, replica) -> {
+ if (reallyLostNodes.contains(replica.getNodeName())) {
+ lostReplicas
+ .computeIfAbsent(shard, s -> ReplicaCount.empty())
+ .increment(replica.type);
+ }
+ });
+ Assign.AssignStrategy assignStrategy =
Assign.createAssignStrategy(cc);
+ lostReplicas.forEach(
+ (shard, types) -> {
+ Assign.AssignRequest assignRequest =
+ new Assign.AssignRequestBuilder()
+ .forCollection(coll.getName())
+ .forShard(Collections.singletonList(shard))
+ .assignReplicas(types)
+ .build();
+ try {
+ List<ReplicaPosition> positions =
+ assignStrategy.assign(solrCloudManager,
assignRequest);
+ newPositions.put(coll.getName(), positions);
+ } catch (Exception e) {
+ log.warn(
+ "Exception computing positions for {}/{}: {}",
+ coll.getName(),
+ shard,
+ e);
+ }
+ });
+ });
} catch (IOException e) {
log.warn("Exception getting cluster state", e);
return;
diff --git
a/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
b/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
index 200e7b68974..0b79aa1c336 100644
--- a/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/ReindexCollectionTest.java
@@ -367,7 +367,8 @@ public class ReindexCollectionTest extends
SolrCloudTestCase {
// verify that the target and checkpoint collections don't exist
cloudManager
.getClusterState()
- .forEachCollection(
+ .collectionStream()
+ .forEach(
coll -> {
assertFalse(
coll.getName() + " still exists",
diff --git
a/solr/solrj-zookeeper/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
b/solr/solrj-zookeeper/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
index 186788c8914..eb2e377c3ad 100644
---
a/solr/solrj-zookeeper/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
+++
b/solr/solrj-zookeeper/src/java/org/apache/solr/client/solrj/impl/SolrClientNodeStateProvider.java
@@ -78,18 +78,21 @@ public class SolrClientNodeStateProvider implements
NodeStateProvider, MapWriter
if (clusterState == null) { // zkStateReader still initializing
return;
}
- clusterState.forEachCollection(
- coll ->
- coll.forEachReplica(
- (shard, replica) -> {
- Map<String, Map<String, List<Replica>>> nodeData =
- nodeVsCollectionVsShardVsReplicaInfo.computeIfAbsent(
- replica.getNodeName(), k -> new HashMap<>());
- Map<String, List<Replica>> collData =
- nodeData.computeIfAbsent(coll.getName(), k -> new
HashMap<>());
- List<Replica> replicas = collData.computeIfAbsent(shard, k
-> new ArrayList<>());
- replicas.add((Replica) replica.clone());
- }));
+ clusterState
+ .collectionStream()
+ .forEach(
+ coll ->
+ coll.forEachReplica(
+ (shard, replica) -> {
+ Map<String, Map<String, List<Replica>>> nodeData =
+ nodeVsCollectionVsShardVsReplicaInfo.computeIfAbsent(
+ replica.getNodeName(), k -> new HashMap<>());
+ Map<String, List<Replica>> collData =
+ nodeData.computeIfAbsent(coll.getName(), k -> new
HashMap<>());
+ List<Replica> replicas =
+ collData.computeIfAbsent(shard, k -> new
ArrayList<>());
+ replicas.add((Replica) replica.clone());
+ }));
}
@Override
diff --git
a/solr/solrj-zookeeper/src/java/org/apache/solr/common/LazySolrCluster.java
b/solr/solrj-zookeeper/src/java/org/apache/solr/common/LazySolrCluster.java
index 811a0b4e2e8..9b09d063ab2 100644
--- a/solr/solrj-zookeeper/src/java/org/apache/solr/common/LazySolrCluster.java
+++ b/solr/solrj-zookeeper/src/java/org/apache/solr/common/LazySolrCluster.java
@@ -170,8 +170,8 @@ public class LazySolrCluster implements SolrCluster {
public void forEachEntry(BiConsumer<String, ? super SolrCollection> fun)
{
zkStateReader
.getClusterState()
- .forEachCollection(
- coll -> fun.accept(coll.getName(), _collection(coll.getName(),
coll)));
+ .collectionStream()
+ .forEach(coll -> fun.accept(coll.getName(),
_collection(coll.getName(), coll)));
}
@Override
diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
index ce607ac8637..6219625dbd5 100644
--- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
+++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java
@@ -425,7 +425,10 @@ public class ClusterState implements MapWriter {
/**
* Calls {@code consumer} with a resolved {@link DocCollection}s for all
collections. Use this
* sparingly in case there are many collections.
+ *
+ * @deprecated see {@link #collectionStream()}
*/
+ @Deprecated
public void forEachCollection(Consumer<DocCollection> consumer) {
collectionStream().forEach(consumer);
}