pivotal-jbarrett commented on a change in pull request #6359:
URL: https://github.com/apache/geode/pull/6359#discussion_r622422397



##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
##########
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.cluster;
+
+import static 
org.apache.geode.redis.internal.RedisConstants.ERROR_UNKNOWN_CLUSTER_SUBCOMMAND;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_REGION_BUCKETS;
+import static org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS_PER_BUCKET;
+import static 
org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction.MemberBuckets;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.partition.PartitionMemberInfo;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.partition.PartitionRegionInfo;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction;
+import org.apache.geode.redis.internal.data.RedisData;
+import org.apache.geode.redis.internal.data.RedisKey;
+import org.apache.geode.redis.internal.executor.AbstractExecutor;
+import org.apache.geode.redis.internal.executor.RedisResponse;
+import org.apache.geode.redis.internal.netty.Command;
+import org.apache.geode.redis.internal.netty.ExecutionHandlerContext;
+
+public class ClusterExecutor extends AbstractExecutor {
+
+  @Override
+  public RedisResponse executeCommand(Command command, ExecutionHandlerContext 
context) {
+
+    List<byte[]> args = command.getProcessedCommand();
+    String subCommand = new String(args.get(1));
+
+    switch (subCommand.toLowerCase()) {
+      case "info":
+        return getInfo(context);
+      case "nodes":
+        return getNodes(context);
+      case "slots":
+        return getSlots(context);
+      default: {
+        return RedisResponse.error(
+            String.format(ERROR_UNKNOWN_CLUSTER_SUBCOMMAND, subCommand));
+      }
+    }
+  }
+
+  private RedisResponse getSlots(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+
+    Map<Integer, String> primaryBucketToMemberMap = new HashMap<>();
+    Map<String, Pair<String, Integer>> memberToHostPortMap = new TreeMap<>();
+    int retrievedBucketCount = 0;
+
+    for (MemberBuckets m : memberBuckets) {
+      memberToHostPortMap.put(m.getMemberId(), Pair.of(m.getHostAddress(), 
m.getPort()));
+      for (Integer id : m.getPrimaryBucketIds()) {
+        primaryBucketToMemberMap.put(id, m.getMemberId());
+        retrievedBucketCount++;
+      }
+    }
+
+    if (retrievedBucketCount != REDIS_REGION_BUCKETS) {
+      return RedisResponse.error("Internal error: bucket count mismatch " + 
retrievedBucketCount
+          + " != " + REDIS_REGION_BUCKETS);
+    }
+
+    int index = 0;
+    List<Object> slots = new ArrayList<>();
+
+    for (int i = 0; i < REDIS_REGION_BUCKETS; i++) {
+      Pair<String, Integer> primaryHostAndPort =
+          memberToHostPortMap.get(primaryBucketToMemberMap.get(i));
+
+      List<Object> entry = new ArrayList<>();
+      entry.add(index * REDIS_SLOTS_PER_BUCKET);
+      entry.add(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      entry.add(Arrays.asList(primaryHostAndPort.getLeft(), 
primaryHostAndPort.getRight()));
+
+      slots.add(entry);
+      index++;
+    }
+
+    return RedisResponse.array(slots);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<MemberBuckets> getMemberBuckets(
+      ExecutionHandlerContext ctx) {
+    Region<RedisKey, RedisData> dataRegion = 
ctx.getRegionProvider().getDataRegion();
+
+    // Really only need this in situations where the cluster is empty and no 
data has been
+    // added yet.
+    PartitionRegionHelper.assignBucketsToPartitions(dataRegion);
+
+    Set<DistributedMember> membersWithDataRegion = new HashSet<>();
+    for (PartitionMemberInfo memberInfo : getRegionMembers(ctx)) {
+      membersWithDataRegion.add(memberInfo.getDistributedMember());
+    }
+
+    ResultCollector<MemberBuckets, List<MemberBuckets>> resultCollector =
+        
FunctionService.onMembers(membersWithDataRegion).execute(BucketInfoRetrievalFunction.ID);
+
+    return resultCollector.getResult();
+  }
+
+  /**
+   * The format being produced is something like this:
+   *
+   * <pre>
+   * 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002 master - 0 
1426238316232 2 connected 5461-10922
+   * 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003 master - 0 
1426238318243 3 connected 10923-16383
+   * e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001 
myself,master - 0 0 1 connected 0-5460
+   * </pre>
+   *
+   * Note that there are no 'slave' entries since Geode does not host all 
secondary data apart from
+   * primary as redis does. The cluster port is provided only for consistency 
with the format
+   * of the output.
+   */
+  private RedisResponse getNodes(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+    String memberId = ctx.getMemberName();
+
+    StringBuilder response = new StringBuilder();
+    for (MemberBuckets m : memberBuckets) {
+      response.append(String.format("%s %s:%d@%d master",
+          m.getMemberId(), m.getHostAddress(), m.getPort(), m.getPort()));
+
+      if (m.getMemberId().equals(memberId)) {
+        response.append(",myself");
+      }
+      response.append(" - 0 0 1 connected");
+
+      for (Integer index : m.getPrimaryBucketIds()) {
+        response.append(" ");
+        response.append(index * REDIS_SLOTS_PER_BUCKET);
+        response.append("-");
+        response.append(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      }
+
+      response.append("\n");
+    }
+
+    return RedisResponse.bulkString(response.toString());
+  }
+
+
+  private RedisResponse getInfo(ExecutionHandlerContext ctx) {
+    int memberCount = getRegionMembers(ctx).size();
+
+    return RedisResponse.bulkString(
+        "cluster_state:ok\r\n"
+            + "cluster_slots_assigned:" + REDIS_SLOTS + "\r\n"
+            + "cluster_slots_ok:" + REDIS_SLOTS + "\r\n"

Review comment:
       This optimization should include some way of sharing hostname and port 
information for all the Redis endpoints. Perhaps it makes sense for this to go 
into some Redis metadata replicated region.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to