upthewaterspout commented on a change in pull request #6359:
URL: https://github.com/apache/geode/pull/6359#discussion_r620455722



##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/cluster/BucketInfoRetrievalFunction.java
##########
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.cluster;
+
+import java.io.Serializable;
+import java.net.InetAddress;
+import java.util.Set;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionContext;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.internal.cache.LocalDataSet;
+import org.apache.geode.internal.cache.execute.InternalFunction;
+import org.apache.geode.internal.inet.LocalHostUtil;
+import org.apache.geode.redis.internal.RegionProvider;
+import org.apache.geode.redis.internal.data.ByteArrayWrapper;
+import org.apache.geode.redis.internal.data.RedisKey;
+
+public class BucketInfoRetrievalFunction implements InternalFunction<Void> {
+
+  public static final String ID = BucketInfoRetrievalFunction.class.getName();
+  private final String hostAddress;
+  private final int redisPort;
+
+  private BucketInfoRetrievalFunction(String address, int redisPort) {
+    if (address == null || address.isEmpty() || address.equals("0.0.0.0")) {
+      InetAddress localhost = null;
+      try {
+        localhost = LocalHostUtil.getLocalHost();
+      } catch (Exception ignored) {
+      }
+      hostAddress = localhost == null ? "127.0.0.1" : 
localhost.getHostAddress();
+    } else {
+      hostAddress = address;
+    }
+
+    this.redisPort = redisPort;
+  }
+
+  public static void register(String address, int redisPort) {
+    FunctionService.registerFunction(new BucketInfoRetrievalFunction(address, 
redisPort));
+  }
+
+  @Override
+  public void execute(FunctionContext<Void> context) {
+    Region<RedisKey, ByteArrayWrapper> region =
+        context.getCache().getRegion(RegionProvider.REDIS_DATA_REGION);
+
+    String memberId =
+        
context.getCache().getDistributedSystem().getDistributedMember().getUniqueId();
+    LocalDataSet localPrimary = (LocalDataSet) 
PartitionRegionHelper.getLocalPrimaryData(region);

Review comment:
       Since you are using an internal API anyway, you could probably just call 
`getAllLocalPrimaryBucketIds` directly (See the code in 
`PartitionRegionHelper.getLocalPrimaryData`). Not a big deal though.

##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
##########
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.cluster;
+
+import static 
org.apache.geode.redis.internal.RedisConstants.ERROR_UNKNOWN_CLUSTER_SUBCOMMAND;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_REGION_BUCKETS;
+import static org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS_PER_BUCKET;
+import static 
org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction.MemberBuckets;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.partition.PartitionMemberInfo;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.partition.PartitionRegionInfo;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction;
+import org.apache.geode.redis.internal.data.RedisData;
+import org.apache.geode.redis.internal.data.RedisKey;
+import org.apache.geode.redis.internal.executor.AbstractExecutor;
+import org.apache.geode.redis.internal.executor.RedisResponse;
+import org.apache.geode.redis.internal.netty.Command;
+import org.apache.geode.redis.internal.netty.ExecutionHandlerContext;
+
+public class ClusterExecutor extends AbstractExecutor {
+
+  @Override
+  public RedisResponse executeCommand(Command command, ExecutionHandlerContext 
context) {
+
+    List<byte[]> args = command.getProcessedCommand();
+    String subCommand = new String(args.get(1));
+
+    switch (subCommand.toLowerCase()) {
+      case "info":
+        return getInfo(context);
+      case "nodes":
+        return getNodes(context);
+      case "slots":
+        return getSlots(context);
+      default: {
+        return RedisResponse.error(
+            String.format(ERROR_UNKNOWN_CLUSTER_SUBCOMMAND, subCommand));
+      }
+    }
+  }
+
+  private RedisResponse getSlots(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+
+    Map<Integer, String> primaryBucketToMemberMap = new HashMap<>();
+    Map<String, Pair<String, Integer>> memberToHostPortMap = new TreeMap<>();
+    int retrievedBucketCount = 0;
+
+    for (MemberBuckets m : memberBuckets) {
+      memberToHostPortMap.put(m.getMemberId(), Pair.of(m.getHostAddress(), 
m.getPort()));
+      for (Integer id : m.getPrimaryBucketIds()) {
+        primaryBucketToMemberMap.put(id, m.getMemberId());
+        retrievedBucketCount++;
+      }
+    }
+
+    if (retrievedBucketCount != REDIS_REGION_BUCKETS) {
+      return RedisResponse.error("Internal error: bucket count mismatch " + 
retrievedBucketCount
+          + " != " + REDIS_REGION_BUCKETS);
+    }
+
+    int index = 0;
+    List<Object> slots = new ArrayList<>();
+
+    for (int i = 0; i < REDIS_REGION_BUCKETS; i++) {
+      Pair<String, Integer> primaryHostAndPort =
+          memberToHostPortMap.get(primaryBucketToMemberMap.get(i));
+
+      List<Object> entry = new ArrayList<>();
+      entry.add(index * REDIS_SLOTS_PER_BUCKET);
+      entry.add(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      entry.add(Arrays.asList(primaryHostAndPort.getLeft(), 
primaryHostAndPort.getRight()));
+
+      slots.add(entry);
+      index++;
+    }
+
+    return RedisResponse.array(slots);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<MemberBuckets> getMemberBuckets(
+      ExecutionHandlerContext ctx) {
+    Region<RedisKey, RedisData> dataRegion = 
ctx.getRegionProvider().getDataRegion();
+
+    // Really only need this in situations where the cluster is empty and no 
data has been
+    // added yet.
+    PartitionRegionHelper.assignBucketsToPartitions(dataRegion);
+
+    Set<DistributedMember> membersWithDataRegion = new HashSet<>();
+    for (PartitionMemberInfo memberInfo : getRegionMembers(ctx)) {
+      membersWithDataRegion.add(memberInfo.getDistributedMember());
+    }
+
+    ResultCollector<MemberBuckets, List<MemberBuckets>> resultCollector =
+        
FunctionService.onMembers(membersWithDataRegion).execute(BucketInfoRetrievalFunction.ID);

Review comment:
       It's somewhat unfortunate we need to use a function at all to get this 
information. The location of the *buckets* is actually known to all members of 
the PR, so we can find out where all the primaries are without a function. I 
think the part we don't know is the redis bind address on the other members.
   
   Is it worth improving the performance of these commands, or are they so 
infrequent we don't care? If it is worth it, we can add another story to figure 
out how to distribute the redis bind addresses (or maybe just cache them?).

##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
##########
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.cluster;
+
+import static 
org.apache.geode.redis.internal.RedisConstants.ERROR_UNKNOWN_CLUSTER_SUBCOMMAND;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_REGION_BUCKETS;
+import static org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS_PER_BUCKET;
+import static 
org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction.MemberBuckets;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.partition.PartitionMemberInfo;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.partition.PartitionRegionInfo;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction;
+import org.apache.geode.redis.internal.data.RedisData;
+import org.apache.geode.redis.internal.data.RedisKey;
+import org.apache.geode.redis.internal.executor.AbstractExecutor;
+import org.apache.geode.redis.internal.executor.RedisResponse;
+import org.apache.geode.redis.internal.netty.Command;
+import org.apache.geode.redis.internal.netty.ExecutionHandlerContext;
+
+public class ClusterExecutor extends AbstractExecutor {
+
+  @Override
+  public RedisResponse executeCommand(Command command, ExecutionHandlerContext 
context) {
+
+    List<byte[]> args = command.getProcessedCommand();
+    String subCommand = new String(args.get(1));
+
+    switch (subCommand.toLowerCase()) {
+      case "info":
+        return getInfo(context);
+      case "nodes":
+        return getNodes(context);
+      case "slots":
+        return getSlots(context);
+      default: {
+        return RedisResponse.error(
+            String.format(ERROR_UNKNOWN_CLUSTER_SUBCOMMAND, subCommand));
+      }
+    }
+  }
+
+  private RedisResponse getSlots(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+
+    Map<Integer, String> primaryBucketToMemberMap = new HashMap<>();
+    Map<String, Pair<String, Integer>> memberToHostPortMap = new TreeMap<>();
+    int retrievedBucketCount = 0;
+
+    for (MemberBuckets m : memberBuckets) {
+      memberToHostPortMap.put(m.getMemberId(), Pair.of(m.getHostAddress(), 
m.getPort()));
+      for (Integer id : m.getPrimaryBucketIds()) {
+        primaryBucketToMemberMap.put(id, m.getMemberId());
+        retrievedBucketCount++;
+      }
+    }
+
+    if (retrievedBucketCount != REDIS_REGION_BUCKETS) {
+      return RedisResponse.error("Internal error: bucket count mismatch " + 
retrievedBucketCount
+          + " != " + REDIS_REGION_BUCKETS);
+    }
+
+    int index = 0;
+    List<Object> slots = new ArrayList<>();
+
+    for (int i = 0; i < REDIS_REGION_BUCKETS; i++) {
+      Pair<String, Integer> primaryHostAndPort =
+          memberToHostPortMap.get(primaryBucketToMemberMap.get(i));
+
+      List<Object> entry = new ArrayList<>();
+      entry.add(index * REDIS_SLOTS_PER_BUCKET);
+      entry.add(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      entry.add(Arrays.asList(primaryHostAndPort.getLeft(), 
primaryHostAndPort.getRight()));
+
+      slots.add(entry);
+      index++;
+    }
+
+    return RedisResponse.array(slots);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<MemberBuckets> getMemberBuckets(
+      ExecutionHandlerContext ctx) {
+    Region<RedisKey, RedisData> dataRegion = 
ctx.getRegionProvider().getDataRegion();
+
+    // Really only need this in situations where the cluster is empty and no 
data has been
+    // added yet.
+    PartitionRegionHelper.assignBucketsToPartitions(dataRegion);

Review comment:
       You might want to double check if the cluster is empty first before 
calling assignBucketsToPartitions. It looks like maybe this function gets a 
dlock before checking? I'm not sure why assignBucketsToPartitions isn't 
checking that already.

##########
File path: 
geode-apis-compatible-with-redis/src/main/java/org/apache/geode/redis/internal/executor/cluster/ClusterExecutor.java
##########
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to You under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software 
distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 
KIND, either express
+ * or implied. See the License for the specific language governing permissions 
and limitations under
+ * the License.
+ */
+
+package org.apache.geode.redis.internal.executor.cluster;
+
+import static 
org.apache.geode.redis.internal.RedisConstants.ERROR_UNKNOWN_CLUSTER_SUBCOMMAND;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_REGION_BUCKETS;
+import static org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS;
+import static 
org.apache.geode.redis.internal.RegionProvider.REDIS_SLOTS_PER_BUCKET;
+import static 
org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction.MemberBuckets;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import org.apache.commons.lang3.tuple.Pair;
+
+import org.apache.geode.cache.Region;
+import org.apache.geode.cache.execute.FunctionService;
+import org.apache.geode.cache.execute.ResultCollector;
+import org.apache.geode.cache.partition.PartitionMemberInfo;
+import org.apache.geode.cache.partition.PartitionRegionHelper;
+import org.apache.geode.cache.partition.PartitionRegionInfo;
+import org.apache.geode.distributed.DistributedMember;
+import org.apache.geode.redis.internal.cluster.BucketInfoRetrievalFunction;
+import org.apache.geode.redis.internal.data.RedisData;
+import org.apache.geode.redis.internal.data.RedisKey;
+import org.apache.geode.redis.internal.executor.AbstractExecutor;
+import org.apache.geode.redis.internal.executor.RedisResponse;
+import org.apache.geode.redis.internal.netty.Command;
+import org.apache.geode.redis.internal.netty.ExecutionHandlerContext;
+
+public class ClusterExecutor extends AbstractExecutor {
+
+  @Override
+  public RedisResponse executeCommand(Command command, ExecutionHandlerContext 
context) {
+
+    List<byte[]> args = command.getProcessedCommand();
+    String subCommand = new String(args.get(1));
+
+    switch (subCommand.toLowerCase()) {
+      case "info":
+        return getInfo(context);
+      case "nodes":
+        return getNodes(context);
+      case "slots":
+        return getSlots(context);
+      default: {
+        return RedisResponse.error(
+            String.format(ERROR_UNKNOWN_CLUSTER_SUBCOMMAND, subCommand));
+      }
+    }
+  }
+
+  private RedisResponse getSlots(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+
+    Map<Integer, String> primaryBucketToMemberMap = new HashMap<>();
+    Map<String, Pair<String, Integer>> memberToHostPortMap = new TreeMap<>();
+    int retrievedBucketCount = 0;
+
+    for (MemberBuckets m : memberBuckets) {
+      memberToHostPortMap.put(m.getMemberId(), Pair.of(m.getHostAddress(), 
m.getPort()));
+      for (Integer id : m.getPrimaryBucketIds()) {
+        primaryBucketToMemberMap.put(id, m.getMemberId());
+        retrievedBucketCount++;
+      }
+    }
+
+    if (retrievedBucketCount != REDIS_REGION_BUCKETS) {
+      return RedisResponse.error("Internal error: bucket count mismatch " + 
retrievedBucketCount
+          + " != " + REDIS_REGION_BUCKETS);
+    }
+
+    int index = 0;
+    List<Object> slots = new ArrayList<>();
+
+    for (int i = 0; i < REDIS_REGION_BUCKETS; i++) {
+      Pair<String, Integer> primaryHostAndPort =
+          memberToHostPortMap.get(primaryBucketToMemberMap.get(i));
+
+      List<Object> entry = new ArrayList<>();
+      entry.add(index * REDIS_SLOTS_PER_BUCKET);
+      entry.add(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      entry.add(Arrays.asList(primaryHostAndPort.getLeft(), 
primaryHostAndPort.getRight()));
+
+      slots.add(entry);
+      index++;
+    }
+
+    return RedisResponse.array(slots);
+  }
+
+  @SuppressWarnings("unchecked")
+  private List<MemberBuckets> getMemberBuckets(
+      ExecutionHandlerContext ctx) {
+    Region<RedisKey, RedisData> dataRegion = 
ctx.getRegionProvider().getDataRegion();
+
+    // Really only need this in situations where the cluster is empty and no 
data has been
+    // added yet.
+    PartitionRegionHelper.assignBucketsToPartitions(dataRegion);
+
+    Set<DistributedMember> membersWithDataRegion = new HashSet<>();
+    for (PartitionMemberInfo memberInfo : getRegionMembers(ctx)) {
+      membersWithDataRegion.add(memberInfo.getDistributedMember());
+    }
+
+    ResultCollector<MemberBuckets, List<MemberBuckets>> resultCollector =
+        
FunctionService.onMembers(membersWithDataRegion).execute(BucketInfoRetrievalFunction.ID);
+
+    return resultCollector.getResult();
+  }
+
+  /**
+   * The format being produced is something like this:
+   *
+   * <pre>
+   * 67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1 127.0.0.1:30002@31002 master - 0 
1426238316232 2 connected 5461-10922
+   * 292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f 127.0.0.1:30003@31003 master - 0 
1426238318243 3 connected 10923-16383
+   * e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:30001@31001 
myself,master - 0 0 1 connected 0-5460
+   * </pre>
+   *
+   * Note that there are no 'slave' entries since Geode does not host all 
secondary data apart from
+   * primary as redis does. The cluster port is provided only for consistency 
with the format
+   * of the output.
+   */
+  private RedisResponse getNodes(ExecutionHandlerContext ctx) {
+    List<MemberBuckets> memberBuckets = getMemberBuckets(ctx);
+    String memberId = ctx.getMemberName();
+
+    StringBuilder response = new StringBuilder();
+    for (MemberBuckets m : memberBuckets) {
+      response.append(String.format("%s %s:%d@%d master",
+          m.getMemberId(), m.getHostAddress(), m.getPort(), m.getPort()));
+
+      if (m.getMemberId().equals(memberId)) {
+        response.append(",myself");
+      }
+      response.append(" - 0 0 1 connected");
+
+      for (Integer index : m.getPrimaryBucketIds()) {
+        response.append(" ");
+        response.append(index * REDIS_SLOTS_PER_BUCKET);
+        response.append("-");
+        response.append(((index + 1) * REDIS_SLOTS_PER_BUCKET) - 1);
+      }
+
+      response.append("\n");
+    }
+
+    return RedisResponse.bulkString(response.toString());
+  }
+
+
+  private RedisResponse getInfo(ExecutionHandlerContext ctx) {
+    int memberCount = getRegionMembers(ctx).size();
+
+    return RedisResponse.bulkString(
+        "cluster_state:ok\r\n"
+            + "cluster_slots_assigned:" + REDIS_SLOTS + "\r\n"
+            + "cluster_slots_ok:" + REDIS_SLOTS + "\r\n"

Review comment:
       At some point we may actually have buckets that are offline or missing. 
I don't know if there is any value in computing these numbers instead of 
hardcoding them. Maybe not right now.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to