pivotal-jbarrett commented on a change in pull request #6359:
URL: https://github.com/apache/geode/pull/6359#discussion_r622421091



##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
##########
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.cluster;
+
+import static 
org.apache.geode.redis.internal.RedisConstants.ERROR_UNKNOWN_CLUSTER_SUBCOMMAND;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_REGION_BUCKETS;
+import static org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS_PER_BUCKET;
+import static 
org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction.MemberBuckets;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.partition.PartitionMemberInfo;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.partition.PartitionRegionInfo;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction;
+import org.apache.geode.redis.internal.data.RedisData;
+import org.apache.geode.redis.internal.data.RedisKey;
+import org.apache.geode.redis.internal.executor.AbstractExecutor;
+import org.apache.geode.redis.internal.executor.RedisResponse;
+import org.apache.geode.redis.internal.netty.Command;
+import org.apache.geode.redis.internal.netty.ExecutionHandlerContext;
+
+public class ClusterExecutor extends AbstractExecutor {
+
+  @Override
+  public RedisResponse executeCommand(Command command, ExecutionHandlerContext 
context) {
+
+    List<byte[]> args = command.getProcessedCommand();
+    String subCommand = new String(args.get(1));
+
+    switch (subCommand.toLowerCase()) {
+      case "info":
+        return getInfo(context);
+      case "nodes":
+        return getNodes(context);
+      case "slots":
+        return getSlots(context);
+      default: {
+        return RedisResponse.error(
+            String.format(ERROR_UNKNOWN_CLUSTER_SUBCOMMAND, subCommand));
+      }
+    }
+  }
+
+  private RedisResponse getSlots(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+
+    Map<Integer, String> primaryBucketToMemberMap = new HashMap<>();
+    Map<String, Pair<String, Integer>> memberToHostPortMap = new TreeMap<>();
+    int retrievedBucketCount = 0;
+
+    for (MemberBuckets m : memberBuckets) {
+      memberToHostPortMap.put(m.getMemberId(), Pair.of(m.getHostAddress(), 
m.getPort()));
+      for (Integer id : m.getPrimaryBucketIds()) {
+        primaryBucketToMemberMap.put(id, m.getMemberId());
+        retrievedBucketCount++;
+      }
+    }
+
+    if (retrievedBucketCount != REDIS_REGION_BUCKETS) {
+      return RedisResponse.error("Internal error: bucket count mismatch " + 
retrievedBucketCount
+          + " != " + REDIS_REGION_BUCKETS);
+    }
+
+    int index = 0;
+    List<Object> slots = new ArrayList<>();
+
+    for (int i = 0; i < REDIS_REGION_BUCKETS; i++) {
+      Pair<String, Integer> primaryHostAndPort =
+          memberToHostPortMap.get(primaryBucketToMemberMap.get(i));
+
+      List<Object> entry = new ArrayList<>();
+      entry.add(index * REDIS_SLOTS_PER_BUCKET);
+      entry.add(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      entry.add(Arrays.asList(primaryHostAndPort.getLeft(), 
primaryHostAndPort.getRight()));
+
+      slots.add(entry);
+      index++;
+    }
+
+    return RedisResponse.array(slots);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<MemberBuckets> getMemberBuckets(
+      ExecutionHandlerContext ctx) {
+    Region<RedisKey, RedisData> dataRegion = 
ctx.getRegionProvider().getDataRegion();
+
+    // Really only need this in situations where the cluster is empty and no 
data has been
+    // added yet.
+    PartitionRegionHelper.assignBucketsToPartitions(dataRegion);
+
+    Set<DistributedMember> membersWithDataRegion = new HashSet<>();
+    for (PartitionMemberInfo memberInfo : getRegionMembers(ctx)) {
+      membersWithDataRegion.add(memberInfo.getDistributedMember());
+    }
+
+    ResultCollector<MemberBuckets, List<MemberBuckets>> resultCollector =
+        
FunctionService.onMembers(membersWithDataRegion).execute(BucketInfoRetrievalFunction.ID);

Review comment:
       It isn't really periodic though, If a bucket move occurs or a client 
otherwise has incorrect slot assignments we need to send MOVED, which will need 
to calculate the current bucket placement. @jdeppe-pivotal said there is a 
optimization coming to address this though.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to