sanpwc commented on code in PR #759:
URL: https://github.com/apache/ignite-3/pull/759#discussion_r857967867
##########
modules/table/src/main/java/org/apache/ignite/internal/table/distributed/TableManager.java:
##########
@@ -1317,152 +1379,153 @@ private RuntimeException convertThrowable(Throwable
th) {
}
/**
- * Sets the nodes as baseline for all tables created by the manager.
+ * Prepare the listener for handling configuration changes in raft group.
*
- * @param nodes New baseline nodes.
- * @throws NodeStoppingException If an implementation stopped before the
method was invoked.
+ * @param tblName Name of the table.
+ * @param partNum Number of partition.
+ * @param partId Partition unique id.
+ * @return prepare listener.
+ *
+ * @see RaftGroupEventsListener
*/
- public void setBaseline(Set<String> nodes) throws NodeStoppingException {
- if (!busyLock.enterBusy()) {
- throw new NodeStoppingException();
- }
- try {
- setBaselineInternal(nodes);
- } finally {
- busyLock.leaveBusy();
- }
+ private RaftGroupEventsListener raftGroupEventsListener(String tblName,
int partNum,
+ String partId) {
+ return new RaftGroupEventsListener() {
+ @Override
+ public void onLeaderElected() {
+ }
+
+ @Override
+ public void onNewPeersConfigurationApplied(List<PeerId> peers) {
+ Map<ByteArray, Entry> keys = metaStorageMgr.getAll(
+ Set.of(partAssignmentsPlannedKey(partId),
partAssignmentsPendingKey(partId))).join();
+
+ Entry plannedEntry =
keys.get(partAssignmentsPlannedKey(partId));
+ Entry pendingEntry =
keys.get(partAssignmentsPendingKey(partId));
+
+ tablesCfg.tables().get(tblName).change(ch -> {
+ List<List<ClusterNode>> assignments =
+ (List<List<ClusterNode>>)
ByteUtils.fromBytes(((ExtendedTableChange) ch).assignments());
+ assignments.set(partNum, ((List<ClusterNode>)
ByteUtils.fromBytes(pendingEntry.value())));
+ ((ExtendedTableChange)
ch).changeAssignments(ByteUtils.toBytes(assignments));
+ });
+
+ if (plannedEntry.value() != null) {
+ if (!metaStorageMgr.invoke(If.iif(
+
revision(partAssignmentsPlannedKey(partId)).eq(plannedEntry.revision()),
+ ops(
+ put(partAssignmentsStableKey(partId),
pendingEntry.value()),
+ put(partAssignmentsPendingKey(partId),
plannedEntry.value()),
+ remove(partAssignmentsPlannedKey(partId)))
+ .yield(true),
+ ops().yield(false))).join().getAsBoolean()) {
+ onNewPeersConfigurationApplied(peers);
+ }
+ } else {
+ if (!metaStorageMgr.invoke(If.iif(
+ notExists(partAssignmentsPlannedKey(partId)),
+ ops(put(partAssignmentsStableKey(partId),
pendingEntry.value()),
+
remove(partAssignmentsPendingKey(partId))).yield(true),
+ ops().yield(false))).join().getAsBoolean()) {
+ onNewPeersConfigurationApplied(peers);
+ }
+ }
+ }
+
+ @Override
+ public void onReconfigurationError(Status status) {}
+ };
}
/**
- * Internal method for setting a baseline.
- *
- * @param nodes Names of baseline nodes.
+ * Register the new meta storage listener for changes in pending
partitions.
*/
- private void setBaselineInternal(Set<String> nodes) {
- if (nodes == null || nodes.isEmpty()) {
- throw new IgniteException("New baseline can't be null or empty");
- }
+ private void registerRebalanceListeners() {
+
metaStorageMgr.registerWatchByPrefix(ByteArray.fromString(PENDING_ASSIGNMENTS_PREFIX),
new WatchListener() {
+ @Override
+ public boolean onUpdate(@NotNull WatchEvent evt) {
+ assert evt.single();
- var currClusterMembers = new HashSet<>(baselineMgr.nodes());
+ if (evt.entryEvent().newEntry().value() == null) {
+ return true;
+ }
- var currClusterMemberNames =
-
currClusterMembers.stream().map(ClusterNode::name).collect(Collectors.toSet());
+ int part =
extractPartitionNumber(evt.entryEvent().newEntry().key());
+ UUID tblId = extractTableId(evt.entryEvent().newEntry().key());
- for (String nodeName : nodes) {
- if (!currClusterMemberNames.contains(nodeName)) {
- throw new IgniteException("Node '" + nodeName + "' not in
current network cluster membership. "
- + " Adding not alive nodes is not supported yet.");
- }
- }
+ TableImpl tbl = tablesByIdVv.latest().get(tblId);
- var newBaseline = currClusterMembers
- .stream().filter(n ->
nodes.contains(n.name())).collect(Collectors.toSet());
+ String grpId = partitionRaftGroupName(tblId, part);
- updateAssignments(currClusterMembers);
+ Supplier<RaftGroupListener> raftGrpLsnrSupplier = () -> new
PartitionListener(tblId,
+ new VersionedRowStore(
+
tablesByIdVv.latest().get(tblId).internalTable().storage().getOrCreatePartition(part),
txManager));
- if (!newBaseline.equals(currClusterMembers)) {
- updateAssignments(newBaseline);
- }
- }
+ Supplier<RaftGroupEventsListener> raftGrpEvtsLsnrSupplier = ()
-> raftGroupEventsListener(
+ tablesByIdVv.latest().get(tblId).name(),
+ part,
+ grpId);
- /**
- * Update assignments for all current tables according to input nodes
list. These approach has known issues {@link
- * Ignite#setBaseline(Set)}.
- *
- * @param clusterNodes Set of nodes for assignment.
- */
- private void updateAssignments(Set<ClusterNode> clusterNodes) {
- var setBaselineFut = new CompletableFuture<>();
- var changePeersQueue = new
ArrayList<Supplier<CompletableFuture<Void>>>();
+ List<List<ClusterNode>> assignments = (List<List<ClusterNode>>)
+ ByteUtils.fromBytes(((ExtendedTableConfiguration)
tablesCfg.tables().get(tbl.name())).assignments().value());
- tablesCfg.tables()
- .change(tbls -> {
- changePeersQueue.clear();
+ List<ClusterNode> newPeers = ((List<ClusterNode>)
ByteUtils.fromBytes(evt.entryEvent().newEntry().value()));
- for (int i = 0; i < tbls.size(); i++) {
- tbls.createOrUpdate(tbls.get(i).name(), changeX -> {
- ExtendedTableChange change = (ExtendedTableChange)
changeX;
- byte[] currAssignments = change.assignments();
+ RaftGroupService raftGrpSvc = null;
- List<List<ClusterNode>> recalculatedAssignments =
AffinityUtils.calculateAssignments(
- clusterNodes,
- change.partitions(),
- change.replicas());
+ try {
+ var deltaPeers = newPeers.stream()
+ .filter(p -> !assignments.get(part).contains(p))
+ .collect(Collectors.toList());
- if
(!recalculatedAssignments.equals(ByteUtils.fromBytes(currAssignments))) {
-
change.changeAssignments(ByteUtils.toBytes(recalculatedAssignments));
+ raftGrpSvc = raftMgr.updateRaftGroup(grpId,
assignments.get(part), deltaPeers, raftGrpLsnrSupplier,
+ raftGrpEvtsLsnrSupplier).join();
- changePeersQueue.add(() ->
- updateRaftTopology(
- (List<List<ClusterNode>>)
ByteUtils.fromBytes(currAssignments),
- recalculatedAssignments,
- change.id()));
- }
- });
- }
- })
- .thenCompose((v) -> {
- CompletableFuture<?>[] changePeersFutures = new
CompletableFuture<?>[changePeersQueue.size()];
+ raftGrpSvc.refreshLeader().join();
- int i = 0;
+ if (new
Peer(raftGrpSvc.clusterService().topologyService().localMember().address()).equals(raftGrpSvc.leader()))
{
Review Comment:
> How do we can sure that the leader won't change just after?
It's actually not a problem. In worst case we will have attempt to start
several rebalances to the same set of target peers, because each
onLeaderElected() will also start one. @kgusakov As far as I remember we've
planned to propagate term to chagnePeers to skip such not-a-leader anymore
rebalances.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]