bharatviswa504 commented on a change in pull request #1607: URL: https://github.com/apache/ozone/pull/1607#discussion_r529972330
########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java ########## @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles DeleteKey request layout version V1. + */ +public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyDeleteRequestV1.class); + + public OMKeyDeleteRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + deleteKeyRequest.getKeyArgs(); + Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyDeletes(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + Result result = null; + OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = + OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName, + bucketName, keyName, 0); + + if (keyStatus == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); Review comment: Minor: Can we add KeyName also as part of the exception message. ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java ########## @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles DeleteKey request layout version V1. + */ +public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyDeleteRequestV1.class); + + public OMKeyDeleteRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + deleteKeyRequest.getKeyArgs(); + Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyDeletes(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + Result result = null; + OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = + OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName, + bucketName, keyName, 0); + + if (keyStatus == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + + // Set the UpdateID to current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + String ozonePathKey = omMetadataManager.getOzonePathKey( + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + if (keyStatus.isDirectory()) { + // Update dir cache. + omMetadataManager.getDirectoryTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } else { + // Update table cache. + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } + + omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); + + long quotaReleased = sumBlockLengths(omKeyInfo); Review comment: In case of directory OmKeyInfo will not have any blocks, We need to get the bytesUsed from all the keys in the directory, but if we do that it will be an expensive operation. Just some thought: Might be also update byteUsed at directory level also, so we can sum up all byteUsed at directory level. Need to think more here. ########## File path: hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java ########## @@ -498,6 +498,14 @@ public boolean delete(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); + + String layOutVersion = adapter.getBucketLayoutVersion(); + if (layOutVersion != null && + OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) { + String key = pathToKey(f); + return adapter.deleteObject(key); Review comment: I think here if recursive==false, and directory is not empty we should throw OMException("Directory is not empty") if recursive==true, directory is empty or not, we should delete directory. Current Code in BasicOzoneFileSystem.java: ``` if (getStatus().isDirectory() && !this.recursive && listStatus(f).length != 0) { throw new PathIsNotEmptyDirectoryException(f.toString()); } ``` Javadoc: ``` * @param recursive if path is a directory and set to * true, the directory is deleted else throws an exception. In * case of a file the recursive can be set to either true or false. ``` ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponseV1.java ########## @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + +/** + * Response for DeleteKey request. + */ +@CleanupTableInfo(cleanupTables = {KEY_TABLE, DELETED_TABLE}) Review comment: KEY_TABLE -> FILE_TABLE ########## File path: hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java ########## @@ -498,6 +498,14 @@ public boolean delete(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); + + String layOutVersion = adapter.getBucketLayoutVersion(); + if (layOutVersion != null && + OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) { + String key = pathToKey(f); + return adapter.deleteObject(key); + } + Review comment: For V1, we missed the root check delete at the client end. For old buckets, this check is there. ``` if (f.isRoot()) { LOG.warn("Cannot delete root directory."); return false; } ``` ########## File path: hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java ########## @@ -475,6 +475,15 @@ public void testFileSystem() throws Exception { testSeekOnFileLength(); tableCleanup(); + + testFileDelete(); + tableCleanup(); + + testDeleteRoot(); + tableCleanup(); + + testRecursiveDelete(); + tableCleanup(); } /** Review comment: Can we rename tableCleanup to deleteRootDir or something meaningful? And also update Javadoc now delete is supported for V1 with this patch. ########## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestV1.java ########## @@ -0,0 +1,195 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyDeleteResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles DeleteKey request layout version V1. + */ +public class OMKeyDeleteRequestV1 extends OMKeyDeleteRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyDeleteRequestV1.class); + + public OMKeyDeleteRequestV1(OMRequest omRequest) { + super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); + + OzoneManagerProtocolProtos.KeyArgs keyArgs = + deleteKeyRequest.getKeyArgs(); + Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs); + + String volumeName = keyArgs.getVolumeName(); + String bucketName = keyArgs.getBucketName(); + String keyName = keyArgs.getKeyName(); + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumKeyDeletes(); + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + IOException exception = null; + boolean acquiredLock = false; + OMClientResponse omClientResponse = null; + Result result = null; + OmVolumeArgs omVolumeArgs = null; + OmBucketInfo omBucketInfo = null; + try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + // Validate bucket and volume exists or not. + validateBucketAndVolume(omMetadataManager, volumeName, bucketName); + + OzoneFileStatus keyStatus = + OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName, + bucketName, keyName, 0); + + if (keyStatus == null) { + throw new OMException("Key not found", KEY_NOT_FOUND); + } + + OmKeyInfo omKeyInfo = keyStatus.getKeyInfo(); + + // Set the UpdateID to current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + + String ozonePathKey = omMetadataManager.getOzonePathKey( + omKeyInfo.getParentObjectID(), omKeyInfo.getFileName()); + + if (keyStatus.isDirectory()) { + // Update dir cache. + omMetadataManager.getDirectoryTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } else { + // Update table cache. + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(ozonePathKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + } + + omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); + + long quotaReleased = sumBlockLengths(omKeyInfo); Review comment: There is a discussion going on to remove bytesUsed from VolumeArgs once HDDS-4308, we might need to revisit once after that went in. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
