fapifta commented on a change in pull request #2987:
URL: https://github.com/apache/ozone/pull/2987#discussion_r790667350



##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
##########
@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import com.google.gson.*;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.common.OzoneChecksumException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.shell.OzoneAddress;
+import org.apache.hadoop.ozone.shell.keys.KeyHandler;
+import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException;
+import org.jetbrains.annotations.NotNull;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Map;
+
+/**
+ * Class that downloads every replica for all the blocks associated with a
+ * given key. It also generates a manifest file with information about the
+ * downloaded replicas.
+ */
[email protected](name = "read-replicas",
+    description = "Reads every replica for all the blocks associated with a " +
+        "given key.")
+@MetaInfServices(SubcommandWithParent.class)
+public class ReadReplicas extends KeyHandler implements SubcommandWithParent {
+
+  private ClientProtocol clientProtocol;
+  private ClientProtocol clientProtocolWithoutChecksum;
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  protected void execute(OzoneClient client, OzoneAddress address)
+      throws IOException, OzoneClientException {
+
+    clientProtocol = client.getObjectStore().getClientProxy();
+
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneClientConfig clientConfig = configuration
+        .getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumVerify(false);
+    configuration.setFromObject(clientConfig);
+    clientProtocolWithoutChecksum = new RpcClient(configuration, null);
+
+    address.ensureKeyAddress();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
+
+    OzoneKeyDetails keyInfoDetails
+        = clientProtocol.getKeyDetails(volumeName, bucketName, keyName);
+
+    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas
+        = clientProtocol.getKeysEveryReplicas(volumeName, bucketName, keyName);
+
+    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        replicasWithoutChecksum = clientProtocolWithoutChecksum
+        .getKeysEveryReplicas(volumeName, bucketName, keyName);
+
+    String directoryName = createDirectory(volumeName, bucketName, keyName);
+
+    JsonObject result = new JsonObject();
+    result.addProperty("filename",
+        volumeName + "/" + bucketName + "/" + keyName);
+    result.addProperty("datasize", keyInfoDetails.getDataSize());
+
+    JsonArray blocks = new JsonArray();
+    downloadReplicasAndCreateManifest(keyName, replicas,
+        replicasWithoutChecksum, directoryName, blocks);
+    result.add("blocks", blocks);
+
+    Gson gson = new GsonBuilder().setPrettyPrinting().create();
+    String prettyJson = gson.toJson(result);
+
+    String manifestFileName = keyName + "_manifest";
+    System.out.println("Writing manifest file : " + manifestFileName);
+    File manifestFile
+        = new File("/opt/hadoop/" + directoryName + "/" + manifestFileName);
+    Files.write(manifestFile.toPath(),
+        prettyJson.getBytes(StandardCharsets.UTF_8));
+  }
+
+  private void downloadReplicasAndCreateManifest(
+      String keyName,
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas,
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+          replicasWithoutChecksum,
+      String directoryName, JsonArray blocks) throws IOException {
+    int blockIndex = 0;
+
+    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        block : replicas.entrySet()) {
+      JsonObject blockJson = new JsonObject();
+      JsonArray replicasJson = new JsonArray();
+
+      blockIndex += 1;
+      blockJson.addProperty("blockIndex", blockIndex);
+      blockJson.addProperty("containerId", block.getKey().getContainerID());
+      blockJson.addProperty("localId", block.getKey().getLocalID());
+      blockJson.addProperty("length", block.getKey().getLength());
+      blockJson.addProperty("offset", block.getKey().getOffset());
+
+      for (Map.Entry<DatanodeDetails, OzoneInputStream>
+          replica : block.getValue().entrySet()) {
+        JsonObject replicaJson = new JsonObject();
+
+        replicaJson.addProperty("hostname",
+            replica.getKey().getHostName());
+        replicaJson.addProperty("uuid",
+            replica.getKey().getUuidString());
+
+        OzoneInputStream is = replica.getValue();
+        String fileName = keyName + "_block" + blockIndex + "_" +
+            replica.getKey().getHostName();
+        System.out.println("Writing : " + fileName);
+        File replicaFile
+            = new File("/opt/hadoop/" + directoryName + "/" + fileName);
+
+        try {
+          Files.copy(is, replicaFile.toPath(),
+              StandardCopyOption.REPLACE_EXISTING);
+        } catch (IOException e) {
+          Throwable cause = e.getCause();
+          replicaJson.addProperty("exception", e.getMessage());
+          if(cause instanceof OzoneChecksumException) {
+            BlockID blockID = block.getKey().getBlockID();
+            String datanodeUUID = replica.getKey().getUuidString();
+            is = getInputStreamWithoutChecksum(replicasWithoutChecksum,
+                datanodeUUID, blockID);
+            Files.copy(is, replicaFile.toPath(),
+                StandardCopyOption.REPLACE_EXISTING);
+          } else if(cause instanceof StatusRuntimeException) {
+            break;
+          }
+        } finally {
+          is.close();
+        }
+        replicasJson.add(replicaJson);
+      }
+      blockJson.add("replicas", replicasJson);
+      blocks.add(blockJson);
+    }
+  }
+
+  private OzoneInputStream getInputStreamWithoutChecksum(
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+          replicasWithoutChecksum, String datanodeUUID, BlockID blockID) {
+    OzoneInputStream is = new OzoneInputStream();
+    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        block : replicasWithoutChecksum.entrySet()) {
+      if(block.getKey().getBlockID().equals(blockID)) {
+        for (Map.Entry<DatanodeDetails, OzoneInputStream>
+            replica : block.getValue().entrySet()) {
+          if(replica.getKey().getUuidString().equals(datanodeUUID)) {
+            is = replica.getValue();
+          }
+        }
+      }
+    }
+    return is;
+  }
+
+  @NotNull
+  private String createDirectory(String volumeName, String bucketName,
+                                 String keyName) {
+    String fileSuffix
+        = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
+    String directoryName = volumeName + "_" + bucketName + "_" + keyName +
+        "_" + fileSuffix;
+    System.out.println("Creating directory : " + directoryName);
+    File dir = new File("/opt/hadoop/" + directoryName);
+    if (!dir.exists()){
+      if(dir.mkdir()) {
+        System.out.println("Successfully created!");
+      } else {
+        System.out.println("Something went wrong.");

Review comment:
       We might call this method at the very beginning of execution, and throw 
an exception here if the output directory can not be created, so that we fail 
early, and do not start gathering data that we can not save.

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
##########
@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import com.google.gson.*;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.common.OzoneChecksumException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.shell.OzoneAddress;
+import org.apache.hadoop.ozone.shell.keys.KeyHandler;
+import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException;
+import org.jetbrains.annotations.NotNull;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Map;
+
+/**
+ * Class that downloads every replica for all the blocks associated with a
+ * given key. It also generates a manifest file with information about the
+ * downloaded replicas.
+ */
[email protected](name = "read-replicas",
+    description = "Reads every replica for all the blocks associated with a " +
+        "given key.")
+@MetaInfServices(SubcommandWithParent.class)
+public class ReadReplicas extends KeyHandler implements SubcommandWithParent {
+
+  private ClientProtocol clientProtocol;
+  private ClientProtocol clientProtocolWithoutChecksum;
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  protected void execute(OzoneClient client, OzoneAddress address)
+      throws IOException, OzoneClientException {
+
+    clientProtocol = client.getObjectStore().getClientProxy();
+
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneClientConfig clientConfig = configuration
+        .getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumVerify(false);
+    configuration.setFromObject(clientConfig);
+    clientProtocolWithoutChecksum = new RpcClient(configuration, null);
+
+    address.ensureKeyAddress();
+    String volumeName = address.getVolumeName();
+    String bucketName = address.getBucketName();
+    String keyName = address.getKeyName();
+
+    OzoneKeyDetails keyInfoDetails
+        = clientProtocol.getKeyDetails(volumeName, bucketName, keyName);
+
+    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas
+        = clientProtocol.getKeysEveryReplicas(volumeName, bucketName, keyName);
+
+    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        replicasWithoutChecksum = clientProtocolWithoutChecksum
+        .getKeysEveryReplicas(volumeName, bucketName, keyName);
+
+    String directoryName = createDirectory(volumeName, bucketName, keyName);
+
+    JsonObject result = new JsonObject();
+    result.addProperty("filename",
+        volumeName + "/" + bucketName + "/" + keyName);
+    result.addProperty("datasize", keyInfoDetails.getDataSize());
+
+    JsonArray blocks = new JsonArray();
+    downloadReplicasAndCreateManifest(keyName, replicas,
+        replicasWithoutChecksum, directoryName, blocks);
+    result.add("blocks", blocks);
+
+    Gson gson = new GsonBuilder().setPrettyPrinting().create();
+    String prettyJson = gson.toJson(result);
+
+    String manifestFileName = keyName + "_manifest";
+    System.out.println("Writing manifest file : " + manifestFileName);
+    File manifestFile
+        = new File("/opt/hadoop/" + directoryName + "/" + manifestFileName);
+    Files.write(manifestFile.toPath(),
+        prettyJson.getBytes(StandardCharsets.UTF_8));
+  }
+
+  private void downloadReplicasAndCreateManifest(
+      String keyName,
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas,
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+          replicasWithoutChecksum,
+      String directoryName, JsonArray blocks) throws IOException {
+    int blockIndex = 0;
+
+    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        block : replicas.entrySet()) {
+      JsonObject blockJson = new JsonObject();
+      JsonArray replicasJson = new JsonArray();
+
+      blockIndex += 1;
+      blockJson.addProperty("blockIndex", blockIndex);
+      blockJson.addProperty("containerId", block.getKey().getContainerID());
+      blockJson.addProperty("localId", block.getKey().getLocalID());
+      blockJson.addProperty("length", block.getKey().getLength());
+      blockJson.addProperty("offset", block.getKey().getOffset());
+
+      for (Map.Entry<DatanodeDetails, OzoneInputStream>
+          replica : block.getValue().entrySet()) {
+        JsonObject replicaJson = new JsonObject();
+
+        replicaJson.addProperty("hostname",
+            replica.getKey().getHostName());
+        replicaJson.addProperty("uuid",
+            replica.getKey().getUuidString());
+
+        OzoneInputStream is = replica.getValue();
+        String fileName = keyName + "_block" + blockIndex + "_" +
+            replica.getKey().getHostName();
+        System.out.println("Writing : " + fileName);
+        File replicaFile
+            = new File("/opt/hadoop/" + directoryName + "/" + fileName);
+
+        try {
+          Files.copy(is, replicaFile.toPath(),
+              StandardCopyOption.REPLACE_EXISTING);
+        } catch (IOException e) {
+          Throwable cause = e.getCause();
+          replicaJson.addProperty("exception", e.getMessage());
+          if(cause instanceof OzoneChecksumException) {
+            BlockID blockID = block.getKey().getBlockID();
+            String datanodeUUID = replica.getKey().getUuidString();
+            is = getInputStreamWithoutChecksum(replicasWithoutChecksum,
+                datanodeUUID, blockID);
+            Files.copy(is, replicaFile.toPath(),
+                StandardCopyOption.REPLACE_EXISTING);
+          } else if(cause instanceof StatusRuntimeException) {
+            break;
+          }
+        } finally {
+          is.close();
+        }
+        replicasJson.add(replicaJson);
+      }
+      blockJson.add("replicas", replicasJson);
+      blocks.add(blockJson);
+    }
+  }
+
+  private OzoneInputStream getInputStreamWithoutChecksum(
+      Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+          replicasWithoutChecksum, String datanodeUUID, BlockID blockID) {
+    OzoneInputStream is = new OzoneInputStream();
+    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
+        block : replicasWithoutChecksum.entrySet()) {
+      if(block.getKey().getBlockID().equals(blockID)) {
+        for (Map.Entry<DatanodeDetails, OzoneInputStream>
+            replica : block.getValue().entrySet()) {
+          if(replica.getKey().getUuidString().equals(datanodeUUID)) {
+            is = replica.getValue();
+          }
+        }
+      }
+    }
+    return is;
+  }
+
+  @NotNull
+  private String createDirectory(String volumeName, String bucketName,
+                                 String keyName) {
+    String fileSuffix
+        = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
+    String directoryName = volumeName + "_" + bucketName + "_" + keyName +
+        "_" + fileSuffix;
+    System.out.println("Creating directory : " + directoryName);
+    File dir = new File("/opt/hadoop/" + directoryName);

Review comment:
       `/opt/hadoop/` as a prefix for the directory name should not be 
hardcoded here, we should take that as a parameter defined for the command. For 
example named as -o, or --outputDir or something similar.
   
   This also goes for the path used for block files in line 154.

##########
File path: 
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java
##########
@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.debug;
+
+import com.google.gson.*;
+import org.apache.hadoop.hdds.cli.SubcommandWithParent;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
+import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.io.OzoneInputStream;
+import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.client.rpc.RpcClient;
+import org.apache.hadoop.ozone.common.OzoneChecksumException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.shell.OzoneAddress;
+import org.apache.hadoop.ozone.shell.keys.KeyHandler;
+import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException;
+import org.jetbrains.annotations.NotNull;
+import org.kohsuke.MetaInfServices;
+import picocli.CommandLine;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Map;
+
+/**
+ * Class that downloads every replica for all the blocks associated with a
+ * given key. It also generates a manifest file with information about the
+ * downloaded replicas.
+ */
[email protected](name = "read-replicas",
+    description = "Reads every replica for all the blocks associated with a " +
+        "given key.")
+@MetaInfServices(SubcommandWithParent.class)
+public class ReadReplicas extends KeyHandler implements SubcommandWithParent {
+
+  private ClientProtocol clientProtocol;
+  private ClientProtocol clientProtocolWithoutChecksum;
+
+  @Override
+  public Class<?> getParentType() {
+    return OzoneDebug.class;
+  }
+
+  @Override
+  protected void execute(OzoneClient client, OzoneAddress address)
+      throws IOException, OzoneClientException {
+
+    clientProtocol = client.getObjectStore().getClientProxy();
+
+    OzoneConfiguration configuration = new OzoneConfiguration();
+    OzoneClientConfig clientConfig = configuration
+        .getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumVerify(false);
+    configuration.setFromObject(clientConfig);
+    clientProtocolWithoutChecksum = new RpcClient(configuration, null);

Review comment:
       The configuration object here can be created a bit more easily. The 
ultimate goal would be to have an identical configuration for the two clients 
one with checksum verification turned on, the other with it turned off.
   
   You may use the copy constructor of OzoneConfiguration and get the original 
config by getConf() as the base config to copy, while to set the property you 
can use the setBoolean(String, boolean) method to set to property value without 
converting OzoneConfiguration to OzoneClientConfig and then set it back.
   
   An other case I just realized while writing this up is when checksum 
verification in the client's base config is turned off, as in that case both 
clients will have it turned off, and none of them will show checksum problems.

##########
File path: 
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
##########
@@ -920,6 +926,64 @@ public OzoneInputStream getKey(
     return getInputStreamWithRetryFunction(keyInfo);
   }
 
+  @Override
+  public Map< OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream> >
+      getKeysEveryReplicas(String volumeName,
+                         String bucketName,
+                         String keyName) throws IOException {
+
+    Map< OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream> > result
+        = new LinkedHashMap<>();
+
+    verifyVolumeName(volumeName);
+    verifyBucketName(bucketName);
+    Preconditions.checkNotNull(keyName);
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setRefreshPipeline(true)
+        .setSortDatanodesInPipeline(topologyAwareReadEnabled)

Review comment:
       As we download all data, we do not need to sort the DataNodes for this 
request even in topology aware environments, and if we skip this, we spare some 
cycles on the OM side if we skip sorting, so I would like to ask to remove this 
call, as it defaults to false.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to