This is an automated email from the ASF dual-hosted git repository.
rickyma pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-uniffle.git
The following commit(s) were added to refs/heads/master by this push:
new 194481233 [#1401] feat(dashboard): Support adding shuffle servers to
the blacklist on the page (#1897)
194481233 is described below
commit 19448123351c82fdcbaf67ca38a01c673e7a5f62
Author: yl09099 <[email protected]>
AuthorDate: Thu Aug 1 11:07:01 2024 +0800
[#1401] feat(dashboard): Support adding shuffle servers to the blacklist on
the page (#1897)
### What changes were proposed in this pull request?
Add the blacklist function from the page.

### Why are the changes needed?
Fix: #1401
### Does this PR introduce _any_ user-facing change?
Yes.
### How was this patch tested?
Page click test.
---
.../apache/uniffle/coordinator/ClusterManager.java | 5 +-
.../uniffle/coordinator/SimpleClusterManager.java | 111 ++++++++++++++++-----
.../coordinator/web/resource/ServerResource.java | 44 +++++---
.../coordinator/SimpleClusterManagerTest.java | 24 ++---
dashboard/src/main/webapp/src/api/api.js | 5 +
.../src/pages/serverstatus/ExcludeNodeList.vue | 106 +++++++++++++++++---
.../webapp/src/pages/serverstatus/NodeListPage.vue | 38 +------
7 files changed, 234 insertions(+), 99 deletions(-)
diff --git
a/coordinator/src/main/java/org/apache/uniffle/coordinator/ClusterManager.java
b/coordinator/src/main/java/org/apache/uniffle/coordinator/ClusterManager.java
index 37462685f..d4a80122d 100644
---
a/coordinator/src/main/java/org/apache/uniffle/coordinator/ClusterManager.java
+++
b/coordinator/src/main/java/org/apache/uniffle/coordinator/ClusterManager.java
@@ -61,7 +61,7 @@ public interface ClusterManager extends Closeable {
*/
List<ServerNode> getUnhealthyServerList();
- Set<String> getExcludeNodes();
+ Set<String> getExcludedNodes();
/** @return number of server nodes in the cluster */
int getNodesNum();
@@ -81,4 +81,7 @@ public interface ClusterManager extends Closeable {
void decommission(String serverId);
void cancelDecommission(String serverId);
+
+ /** Add blacklist. */
+ boolean addExcludedNodes(List<String> excludedNodeIds);
}
diff --git
a/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
b/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
index b03b3110f..b0b16b424 100644
---
a/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
+++
b/coordinator/src/main/java/org/apache/uniffle/coordinator/SimpleClusterManager.java
@@ -18,10 +18,12 @@
package org.apache.uniffle.coordinator;
import java.io.BufferedReader;
+import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
@@ -60,10 +62,9 @@ import
org.apache.uniffle.coordinator.metric.CoordinatorMetrics;
public class SimpleClusterManager implements ClusterManager {
private static final Logger LOG =
LoggerFactory.getLogger(SimpleClusterManager.class);
-
private final Map<String, ServerNode> servers = JavaUtils.newConcurrentMap();
private final Cache<ServerNode, ShuffleServerInternalGrpcClient> clientCache;
- private Set<String> excludeNodes = Sets.newConcurrentHashSet();
+ private Set<String> excludedNodes = Sets.newConcurrentHashSet();
/** ServerNode whose heartbeat is lost */
Set<ServerNode> lostNodes = Sets.newHashSet();
/** Unhealthy ServerNode */
@@ -84,6 +85,7 @@ public class SimpleClusterManager implements ClusterManager {
private boolean startupSilentPeriodEnabled;
private long startupSilentPeriodDurationMs;
private boolean readyForServe = false;
+ private String excludedNodesPath;
public SimpleClusterManager(CoordinatorConf conf, Configuration hadoopConf)
throws Exception {
this.shuffleNodesMax =
@@ -104,17 +106,17 @@ public class SimpleClusterManager implements
ClusterManager {
scheduledExecutorService.scheduleAtFixedRate(
this::nodesCheck, heartbeatTimeout / 3, heartbeatTimeout / 3,
TimeUnit.MILLISECONDS);
- String excludeNodesPath =
+ this.excludedNodesPath =
conf.getString(CoordinatorConf.COORDINATOR_EXCLUDE_NODES_FILE_PATH,
"");
- if (!StringUtils.isEmpty(excludeNodesPath)) {
+ if (!StringUtils.isEmpty(excludedNodesPath)) {
this.hadoopFileSystem =
- HadoopFilesystemProvider.getFilesystem(new Path(excludeNodesPath),
hadoopConf);
+ HadoopFilesystemProvider.getFilesystem(new Path(excludedNodesPath),
hadoopConf);
long updateNodesInterval =
conf.getLong(CoordinatorConf.COORDINATOR_EXCLUDE_NODES_CHECK_INTERVAL);
checkNodesExecutorService =
-
ThreadUtils.getDaemonSingleThreadScheduledExecutor("UpdateExcludeNodes");
+
ThreadUtils.getDaemonSingleThreadScheduledExecutor("UpdateExcludedNodes");
checkNodesExecutorService.scheduleAtFixedRate(
- () -> updateExcludeNodes(excludeNodesPath),
+ () -> updateExcludedNodes(excludedNodesPath),
0,
updateNodesInterval,
TimeUnit.MILLISECONDS);
@@ -179,33 +181,37 @@ public class SimpleClusterManager implements
ClusterManager {
nodesCheck();
}
- private void updateExcludeNodes(String path) {
- int originalExcludeNodesNumber = excludeNodes.size();
+ private synchronized void updateExcludedNodes(String path) {
+ int originalExcludedNodesNumber = excludedNodes.size();
try {
Path hadoopPath = new Path(path);
FileStatus fileStatus = hadoopFileSystem.getFileStatus(hadoopPath);
if (fileStatus != null && fileStatus.isFile()) {
long latestModificationTime = fileStatus.getModificationTime();
if (excludeLastModify.get() != latestModificationTime) {
- parseExcludeNodesFile(hadoopFileSystem.open(hadoopPath));
+ excludedNodes =
parseExcludedNodesFile(hadoopFileSystem.open(hadoopPath));
+ LOG.info(
+ "Updated exclude nodes and {} nodes were marked as excluded
nodes",
+ excludedNodes.size());
+ // update exclude nodes and last modify time
excludeLastModify.set(latestModificationTime);
}
} else {
- excludeNodes = Sets.newConcurrentHashSet();
+ excludedNodes = Sets.newConcurrentHashSet();
}
} catch (FileNotFoundException fileNotFoundException) {
- excludeNodes = Sets.newConcurrentHashSet();
+ excludedNodes = Sets.newConcurrentHashSet();
} catch (Exception e) {
LOG.warn("Error when updating exclude nodes, the exclude nodes file
path: {}.", path, e);
}
- int newlyExcludeNodesNumber = excludeNodes.size();
- if (newlyExcludeNodesNumber != originalExcludeNodesNumber) {
- LOG.info("Exclude nodes number: {}, nodes list: {}",
newlyExcludeNodesNumber, excludeNodes);
+ int newlyExcludedNodesNumber = excludedNodes.size();
+ if (newlyExcludedNodesNumber != originalExcludedNodesNumber) {
+ LOG.info("Exclude nodes number: {}, nodes list: {}",
newlyExcludedNodesNumber, excludedNodes);
}
- CoordinatorMetrics.gaugeExcludeServerNum.set(excludeNodes.size());
+ CoordinatorMetrics.gaugeExcludeServerNum.set(excludedNodes.size());
}
- private void parseExcludeNodesFile(DataInputStream fsDataInputStream) throws
IOException {
+ private Set<String> parseExcludedNodesFile(DataInputStream
fsDataInputStream) throws IOException {
Set<String> nodes = Sets.newConcurrentHashSet();
try (BufferedReader br =
new BufferedReader(new InputStreamReader(fsDataInputStream,
StandardCharsets.UTF_8))) {
@@ -216,10 +222,52 @@ public class SimpleClusterManager implements
ClusterManager {
}
}
}
- // update exclude nodes and last modify time
- excludeNodes = nodes;
- LOG.info(
- "Updated exclude nodes and {} nodes were marked as exclude nodes",
excludeNodes.size());
+ return nodes;
+ }
+
+ private void writeExcludedNodes2File(List<String> excludedNodes) throws
IOException {
+ if (hadoopFileSystem == null) {
+ return;
+ }
+ Path hadoopPath = new Path(excludedNodesPath);
+ FileStatus fileStatus = hadoopFileSystem.getFileStatus(hadoopPath);
+ if (fileStatus != null && fileStatus.isFile()) {
+ String tempExcludedNodesPath = excludedNodesPath.concat("_tmp");
+ Path tmpHadoopPath = new Path(tempExcludedNodesPath);
+ if (hadoopFileSystem.exists(tmpHadoopPath)) {
+ hadoopFileSystem.delete(tmpHadoopPath);
+ }
+ try (BufferedWriter bufferedWriter =
+ new BufferedWriter(
+ new OutputStreamWriter(
+ hadoopFileSystem.create(tmpHadoopPath, true),
StandardCharsets.UTF_8))) {
+ for (String excludedNode : excludedNodes) {
+ bufferedWriter.write(excludedNode);
+ bufferedWriter.newLine();
+ }
+ }
+ hadoopFileSystem.delete(hadoopPath);
+ hadoopFileSystem.rename(tmpHadoopPath, hadoopPath);
+ }
+ }
+
+ private synchronized boolean putInExcludedNodesFile(List<String>
excludedNodes)
+ throws IOException {
+ if (hadoopFileSystem == null) {
+ return false;
+ }
+ Path hadoopPath = new Path(excludedNodesPath);
+ // Obtains the existing excluded node.
+ Set<String> alreadyExistExcludedNodes =
+ parseExcludedNodesFile(hadoopFileSystem.open(hadoopPath));
+ List<String> newAddExcludedNodes =
+ excludedNodes.stream()
+ .filter(node -> !alreadyExistExcludedNodes.contains(node))
+ .collect(Collectors.toList());
+ newAddExcludedNodes.addAll(alreadyExistExcludedNodes);
+ // Writes to the new excluded node.
+ writeExcludedNodes2File(newAddExcludedNodes);
+ return true;
}
@Override
@@ -253,7 +301,7 @@ public class SimpleClusterManager implements ClusterManager
{
if (!ServerStatus.ACTIVE.equals(node.getStatus())) {
continue;
}
- if (!excludeNodes.contains(node.getId()) &&
node.getTags().containsAll(requiredTags)) {
+ if (!excludedNodes.contains(node.getId()) &&
node.getTags().containsAll(requiredTags)) {
availableNodes.add(node);
}
}
@@ -279,7 +327,7 @@ public class SimpleClusterManager implements ClusterManager
{
if (faultyServerIds != null && faultyServerIds.contains(node.getId())) {
return false;
}
- return !excludeNodes.contains(node.getId()) &&
node.getTags().containsAll(requiredTags);
+ return !excludedNodes.contains(node.getId()) &&
node.getTags().containsAll(requiredTags);
}
@Override
@@ -293,8 +341,8 @@ public class SimpleClusterManager implements ClusterManager
{
}
@Override
- public Set<String> getExcludeNodes() {
- return excludeNodes;
+ public Set<String> getExcludedNodes() {
+ return excludedNodes;
}
public Map<String, Set<ServerNode>> getTagToNodes() {
@@ -319,6 +367,19 @@ public class SimpleClusterManager implements
ClusterManager {
return false;
}
+ /** Add blacklist. */
+ @Override
+ public boolean addExcludedNodes(List<String> excludedNodeIds) {
+ try {
+ boolean successFlag = putInExcludedNodesFile(excludedNodeIds);
+ excludedNodes.addAll(excludedNodeIds);
+ return successFlag;
+ } catch (IOException e) {
+ LOG.warn("Because {}, failed to add blacklist.", e.getMessage());
+ return false;
+ }
+ }
+
@VisibleForTesting
public void clear() {
servers.clear();
diff --git
a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/resource/ServerResource.java
b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/resource/ServerResource.java
index c176f065b..8db72a19e 100644
---
a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/resource/ServerResource.java
+++
b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/resource/ServerResource.java
@@ -28,6 +28,7 @@ import java.util.stream.Stream;
import javax.servlet.ServletContext;
import org.apache.commons.collections4.CollectionUtils;
+import org.apache.hbase.thirdparty.javax.ws.rs.Consumes;
import org.apache.hbase.thirdparty.javax.ws.rs.DELETE;
import org.apache.hbase.thirdparty.javax.ws.rs.GET;
import org.apache.hbase.thirdparty.javax.ws.rs.POST;
@@ -78,20 +79,21 @@ public class ServerResource extends BaseResource {
serverList = clusterManager.getLostServerList();
} else if (ServerStatus.EXCLUDED.name().equalsIgnoreCase(status)) {
serverList =
- clusterManager.getExcludeNodes().stream()
- .map(excludeNodeStr -> new ServerNode(excludeNodeStr))
+ clusterManager.getExcludedNodes().stream()
+ .map(ServerNode::new)
.collect(Collectors.toList());
} else {
- serverList = clusterManager.list();
+ List<ServerNode> serverAllList = clusterManager.list();
+ serverList =
+ serverAllList.stream()
+ .filter(node ->
!clusterManager.getExcludedNodes().contains(node.getId()))
+ .collect(Collectors.toList());
}
serverList =
serverList.stream()
.filter(
server -> {
- if (status != null &&
!server.getStatus().name().equalsIgnoreCase(status)) {
- return false;
- }
- return true;
+ return status == null ||
server.getStatus().name().equalsIgnoreCase(status);
})
.collect(Collectors.toList());
serverList.sort(Comparator.comparing(ServerNode::getId));
@@ -186,13 +188,18 @@ public class ServerResource extends BaseResource {
return execute(
() -> {
ClusterManager clusterManager = getClusterManager();
+ List<ServerNode> serverAllList = clusterManager.list();
List<ServerNode> excludeNodes =
- clusterManager.getExcludeNodes().stream()
- .map(exclude -> new ServerNode(exclude))
+ clusterManager.getExcludedNodes().stream()
+ .map(ServerNode::new)
+ .collect(Collectors.toList());
+ List<ServerNode> activeServerList =
+ serverAllList.stream()
+ .filter(node ->
!clusterManager.getExcludedNodes().contains(node.getId()))
.collect(Collectors.toList());
- Map<String, Integer> stringIntegerHash =
+ Map<String, Integer> serverStatusNum =
Stream.of(
- clusterManager.list(),
+ activeServerList,
clusterManager.getLostServerList(),
excludeNodes,
clusterManager.getUnhealthyServerList())
@@ -201,13 +208,13 @@ public class ServerResource extends BaseResource {
.collect(
Collectors.groupingBy(
n -> n.getStatus().name(), Collectors.reducing(0, n
-> 1, Integer::sum)));
- return stringIntegerHash;
+ return serverStatusNum;
});
}
@DELETE
@Path("/deleteServer")
- public Response<String> deleteLostedServer(@QueryParam("serverId") String
serverId) {
+ public Response<String>
handleDeleteLostServerRequest(@QueryParam("serverId") String serverId) {
ClusterManager clusterManager = getClusterManager();
if (clusterManager.deleteLostServerById(serverId)) {
return Response.success("success");
@@ -215,6 +222,17 @@ public class ServerResource extends BaseResource {
return Response.fail("fail");
}
+ @POST
+ @Path("/addExcludeNodes")
+ @Consumes(MediaType.APPLICATION_JSON)
+ public Response<String> handleAddExcludedNodesRequest(Map<String,
List<String>> excludeNodes) {
+ ClusterManager clusterManager = getClusterManager();
+ if (clusterManager.addExcludedNodes(excludeNodes.get("excludeNodes"))) {
+ return Response.success("success");
+ }
+ return Response.fail("fail");
+ }
+
private ClusterManager getClusterManager() {
return (ClusterManager)
servletContext.getAttribute(ClusterManager.class.getCanonicalName());
}
diff --git
a/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
b/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
index 0f2411b88..db72f633e 100644
---
a/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
+++
b/coordinator/src/test/java/org/apache/uniffle/coordinator/SimpleClusterManagerTest.java
@@ -439,11 +439,11 @@ public class SimpleClusterManagerTest {
scm.add(new ServerNode("node2-1999", "ip", 0, 100L, 50L, 20, 10,
testTags));
scm.add(new ServerNode("node3-1999", "ip", 0, 100L, 50L, 20, 10,
testTags));
scm.add(new ServerNode("node4-1999", "ip", 0, 100L, 50L, 20, 10,
testTags));
- assertTrue(scm.getExcludeNodes().isEmpty());
+ assertTrue(scm.getExcludedNodes().isEmpty());
final Set<String> nodes = Sets.newHashSet("node1-1999", "node2-1999");
writeExcludeHosts(excludeNodesPath, nodes);
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().equals(nodes));
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().equals(nodes));
List<ServerNode> availableNodes = scm.getServerList(testTags);
assertEquals(2, availableNodes.size());
Set<String> remainNodes = Sets.newHashSet("node3-1999", "node4-1999");
@@ -452,8 +452,8 @@ public class SimpleClusterManagerTest {
final Set<String> nodes2 = Sets.newHashSet("node3-1999", "node4-1999");
writeExcludeHosts(excludeNodesPath, nodes2);
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().equals(nodes2));
- assertEquals(nodes2, scm.getExcludeNodes());
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().equals(nodes2));
+ assertEquals(nodes2, scm.getExcludedNodes());
final Set<String> comments =
Sets.newHashSet(
@@ -464,26 +464,26 @@ public class SimpleClusterManagerTest {
"# The content of the third comment");
final Set<String> noComments = Sets.newHashSet("node3-1999",
"node4-1999");
writeExcludeHosts(excludeNodesPath, comments);
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().equals(noComments));
- assertEquals(noComments, scm.getExcludeNodes());
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().equals(noComments));
+ assertEquals(noComments, scm.getExcludedNodes());
- Set<String> excludeNodes = scm.getExcludeNodes();
+ Set<String> excludeNodes = scm.getExcludedNodes();
Thread.sleep(3000);
// excludeNodes shouldn't be updated if file has no change
- assertEquals(excludeNodes, scm.getExcludeNodes());
+ assertEquals(excludeNodes, scm.getExcludedNodes());
writeExcludeHosts(excludeNodesPath, Sets.newHashSet());
// excludeNodes is an empty file, set should be empty
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().isEmpty());
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().isEmpty());
final Set<String> nodes3 = Sets.newHashSet("node1-1999");
writeExcludeHosts(excludeNodesPath, nodes3);
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().equals(nodes3));
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().equals(nodes3));
File blacklistFile = new File(excludeNodesPath);
assertTrue(blacklistFile.delete());
// excludeNodes is deleted, set should be empty
- await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludeNodes().isEmpty());
+ await().atMost(3, TimeUnit.SECONDS).until(() ->
scm.getExcludedNodes().isEmpty());
remainNodes = Sets.newHashSet("node1-1999", "node2-1999", "node3-1999",
"node4-1999");
availableNodes = scm.getServerList(testTags);
@@ -515,7 +515,7 @@ public class SimpleClusterManagerTest {
scm.add(new ServerNode("node3-1999", "ip", 0, 100L, 50L, 20, 10,
testTags));
scm.add(new ServerNode("node4-1999", "ip", 0, 100L, 50L, 20, 10,
testTags));
assertEquals(4, scm.getNodesNum());
- assertEquals(2, scm.getExcludeNodes().size());
+ assertEquals(2, scm.getExcludedNodes().size());
}
File blacklistFile = new File(excludeNodesPath);
assertTrue(blacklistFile.delete());
diff --git a/dashboard/src/main/webapp/src/api/api.js
b/dashboard/src/main/webapp/src/api/api.js
index 928be9afb..eb4755183 100644
--- a/dashboard/src/main/webapp/src/api/api.js
+++ b/dashboard/src/main/webapp/src/api/api.js
@@ -89,6 +89,11 @@ export function getShuffleExcludeNodes(params, headers) {
return http.get('/server/nodes?status=excluded', params, headers, 0)
}
+// Create an interface for add blacklist
+export function addShuffleExcludeNodes(params, headers) {
+ return http.post('/server/addExcludeNodes', params, headers, 0)
+}
+
// Total number of interfaces for new App
export function getAppTotal(params, headers) {
return http.get('/app/total', params, headers, 0)
diff --git
a/dashboard/src/main/webapp/src/pages/serverstatus/ExcludeNodeList.vue
b/dashboard/src/main/webapp/src/pages/serverstatus/ExcludeNodeList.vue
index 2b3a5c5ad..c1b5fa0af 100644
--- a/dashboard/src/main/webapp/src/pages/serverstatus/ExcludeNodeList.vue
+++ b/dashboard/src/main/webapp/src/pages/serverstatus/ExcludeNodeList.vue
@@ -17,21 +17,50 @@
<template>
<div>
- <el-table
- :data="pageData.tableData"
- height="550"
- style="width: 100%"
- :default-sort="sortColumn"
- @sort-change="sortChangeEvent"
- >
- <el-table-column prop="id" label="ExcludeNodeId" min-width="180"
:sortable="true" />
- </el-table>
+ <div style="text-align: right">
+ <el-button type="primary" @click="dialogFormVisible = true"> Add Node
</el-button>
+ </div>
+ <div>
+ <el-table
+ :data="pageData.tableData"
+ height="550"
+ style="width: 100%"
+ :default-sort="sortColumn"
+ @sort-change="sortChangeEvent"
+ >
+ <el-table-column prop="id" label="ExcludeNodeId" min-width="180"
:sortable="true" />
+ </el-table>
+ <el-dialog
+ v-model="dialogFormVisible"
+ title="Please enter the server id list to be excluded:"
+ class="dialog-wrapper"
+ >
+ <el-form>
+ <el-form-item :label-width="formLabelWidth">
+ <el-input
+ v-model="textarea"
+ class="textarea-wrapper"
+ :rows="10"
+ type="textarea"
+ placeholder="Please input"
+ />
+ </el-form-item>
+ </el-form>
+ <template #footer>
+ <div class="dialog-footer">
+ <el-button @click="dialogFormVisible = false">Cancel</el-button>
+ <el-button type="primary" @click="confirmAddHandler"> Confirm
</el-button>
+ </div>
+ </template>
+ </el-dialog>
+ </div>
</div>
</template>
<script>
-import { onMounted, reactive } from 'vue'
-import { getShuffleExcludeNodes } from '@/api/api'
+import { onMounted, reactive, ref, inject } from 'vue'
+import { addShuffleExcludeNodes, getShuffleExcludeNodes } from '@/api/api'
import { useCurrentServerStore } from '@/store/useCurrentServerStore'
+import { ElMessage } from 'element-plus'
export default {
setup() {
@@ -44,11 +73,39 @@ export default {
})
const currentServerStore = useCurrentServerStore()
+ const dialogFormVisible = ref(false)
+ const formLabelWidth = '10%'
+ const textarea = ref('')
+
+ /**
+ * Get the callback method of the parent page and update the number of
servers on the page.
+ */
+ const updateTotalPage = inject('updateTotalPage')
+
async function getShuffleExcludeNodesPage() {
const res = await getShuffleExcludeNodes()
pageData.tableData = res.data.data
}
+ async function addShuffleExcludeNodesPage() {
+ try {
+ const excludeNodes = textarea.value.split('\n').map((item) =>
item.trim())
+ const excludeNodesObj = { excludeNodes }
+ const res = await addShuffleExcludeNodes(excludeNodesObj)
+ if (res.status >= 200 && res.status < 300) {
+ if (res.data.data === 'success') {
+ ElMessage.success('Add successfully.')
+ } else {
+ ElMessage.error('Add failed.')
+ }
+ } else {
+ ElMessage.error('Failed to add due to server bad.')
+ }
+ } catch (err) {
+ ElMessage.error('Failed to add due to network exception.')
+ }
+ }
+
// The system obtains data from global variables and requests the
interface to obtain new data after data changes.
currentServerStore.$subscribe((mutable, state) => {
if (state.currentServer) {
@@ -70,8 +127,33 @@ export default {
}
sortColumn[sortInfo.prop] = sortInfo.order
}
+ const confirmAddHandler = () => {
+ dialogFormVisible.value = false
+ addShuffleExcludeNodesPage()
+ // Refreshing the number of blacklists.
+ updateTotalPage()
+ // Refreshing the Blacklist list.
+ getShuffleExcludeNodesPage()
+ }
- return { pageData, sortColumn, sortChangeEvent }
+ return {
+ pageData,
+ sortColumn,
+ sortChangeEvent,
+ confirmAddHandler,
+ dialogFormVisible,
+ formLabelWidth,
+ textarea
+ }
}
}
</script>
+
+<style>
+.textarea-wrapper {
+ width: 90%;
+}
+.dialog-wrapper {
+ width: 50%;
+}
+</style>
diff --git a/dashboard/src/main/webapp/src/pages/serverstatus/NodeListPage.vue
b/dashboard/src/main/webapp/src/pages/serverstatus/NodeListPage.vue
index 7c1ff41a8..db323ab98 100644
--- a/dashboard/src/main/webapp/src/pages/serverstatus/NodeListPage.vue
+++ b/dashboard/src/main/webapp/src/pages/serverstatus/NodeListPage.vue
@@ -131,25 +131,7 @@ export default {
const router = useRouter()
const currentServerStore = useCurrentServerStore()
const sortColumn = reactive({})
- const listPageData = reactive({
- tableData: [
- {
- id: '',
- ip: '',
- grpcPort: 0,
- nettyPort: 0,
- usedMemory: 0,
- preAllocatedMemory: 0,
- availableMemory: 0,
- eventNumInFlush: 0,
- tags: '',
- status: '',
- registrationTime: '',
- timestamp: '',
- jettyPort: 0
- }
- ]
- })
+ const listPageData = reactive({ tableData: [] })
const isShowRemove = ref(false)
async function deleteLostServer(row) {
try {
@@ -281,23 +263,7 @@ export default {
const loadPageData = () => {
isShowRemove.value = false
- listPageData.tableData = [
- {
- id: '',
- ip: '',
- grpcPort: 0,
- nettyPort: 0,
- usedMemory: 0,
- preAllocatedMemory: 0,
- availableMemory: 0,
- eventNumInFlush: 0,
- tags: '',
- status: '',
- registrationTime: '',
- timestamp: '',
- jettyPort: 0
- }
- ]
+ listPageData.tableData = []
if (router.currentRoute.value.name === 'activeNodeList') {
getShuffleActiveNodesPage()
} else if (router.currentRoute.value.name === 'decommissioningNodeList')
{