This is an automated email from the ASF dual-hosted git repository.
aengineer pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 014e17a HDDS-1166. Fix checkstyle line length issues. Contributed by
Nandakumar.
014e17a is described below
commit 014e17af783891699350b44b436071b1635868a8
Author: Anu Engineer <[email protected]>
AuthorDate: Sat Feb 23 20:31:39 2019 -0800
HDDS-1166. Fix checkstyle line length issues.
Contributed by Nandakumar.
---
.../CloseContainerCommandHandler.java | 3 ++-
.../TestCloseContainerCommandHandler.java | 21 +++++++++++-------
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 19 ++++++++++------
.../hadoop/hdds/scm/block/DeletedBlockLogImpl.java | 25 +++++++++++++++-------
.../metadata/DeletedBlocksTransactionCodec.java | 3 ++-
.../hdds/scm/metadata/SCMMetadataStoreRDBImpl.java | 13 ++++++-----
.../hadoop/hdds/scm/server/SCMConfigurator.java | 3 ++-
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 15 ++++++++-----
8 files changed, 66 insertions(+), 36 deletions(-)
diff --git
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index 7ca892d..65cbde0 100644
---
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
+++
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -86,7 +86,8 @@ public class CloseContainerCommandHandler implements
CommandHandler {
return;
}
- if (container.getContainerState() ==
ContainerProtos.ContainerDataProto.State.CLOSED) {
+ if (container.getContainerState() ==
+ ContainerProtos.ContainerDataProto.State.CLOSED) {
// Closing a container is an idempotent operation.
return;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
index 7079770..7d8b3d6 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java
@@ -220,17 +220,20 @@ public class TestCloseContainerCommandHandler {
throws Exception {
final OzoneConfiguration conf = new OzoneConfiguration();
final DatanodeDetails datanodeDetails = randomDatanodeDetails();
- final OzoneContainer ozoneContainer = getOzoneContainer(conf,
datanodeDetails);
+ final OzoneContainer ozoneContainer = getOzoneContainer(
+ conf, datanodeDetails);
ozoneContainer.start();
try {
- final Container container = createContainer(conf, datanodeDetails,
ozoneContainer);
+ final Container container = createContainer(
+ conf, datanodeDetails, ozoneContainer);
Mockito.verify(context.getParent(),
Mockito.times(1)).triggerHeartbeat();
final long containerId = container.getContainerData().getContainerID();
final PipelineID pipelineId = PipelineID.valueOf(UUID.fromString(
container.getContainerData().getOriginPipelineId()));
- final CloseContainerCommandHandler closeHandler = new
CloseContainerCommandHandler();
+ final CloseContainerCommandHandler closeHandler =
+ new CloseContainerCommandHandler();
final CloseContainerCommand closeCommand = new CloseContainerCommand(
containerId, pipelineId);
@@ -240,12 +243,14 @@ public class TestCloseContainerCommandHandler {
ozoneContainer.getContainerSet().getContainer(containerId)
.getContainerState());
- // The container is closed, now we send close command with pipeline id
which doesn't exist.
- // This should cause the datanode to trigger quasi close, since the
container is already
- // closed, this should do nothing. The command should not fail either.
+ // The container is closed, now we send close command with
+ // pipeline id which doesn't exist.
+ // This should cause the datanode to trigger quasi close, since the
+ // container is already closed, this should do nothing.
+ // The command should not fail either.
final PipelineID randomPipeline = PipelineID.randomId();
- final CloseContainerCommand quasiCloseCommand = new
CloseContainerCommand(
- containerId, randomPipeline);
+ final CloseContainerCommand quasiCloseCommand =
+ new CloseContainerCommand(containerId, randomPipeline);
closeHandler.handle(quasiCloseCommand, ozoneContainer, context, null);
Assert.assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED,
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index a4757ee..dea7c02 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -50,11 +50,16 @@ import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import static
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
+ .INVALID_BLOCK_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys
+ .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
/** Block Manager manages the block access for SCM. */
public class BlockManagerImpl implements EventHandler<Boolean>,
@@ -83,8 +88,8 @@ public class BlockManagerImpl implements
EventHandler<Boolean>,
* @param scm
* @throws IOException
*/
- public BlockManagerImpl(final Configuration conf, StorageContainerManager
scm)
- throws IOException {
+ public BlockManagerImpl(final Configuration conf,
+ final StorageContainerManager scm) {
Objects.requireNonNull(scm, "SCM cannot be null");
this.pipelineManager = scm.getPipelineManager();
this.containerManager = scm.getContainerManager();
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 5ff34f5..570216f 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -31,10 +31,15 @@ import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import
org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto
+ .DeleteBlockTransactionResult;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.command
+ .CommandStatusReportHandler.DeleteBlockStatus;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
@@ -50,8 +55,10 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.lang.Math.min;
-import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
-import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys
+ .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
/**
* A implement class of {@link DeletedBlockLog}, and it uses
@@ -328,12 +335,14 @@ public class DeletedBlockLogImpl
? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
scmMetadataStore.getDeletedBlocksTXTable().iterator()) {
while (iter.hasNext()) {
- Table.KeyValue<Long, DeletedBlocksTransaction> keyValue =
iter.next();
+ Table.KeyValue<Long, DeletedBlocksTransaction> keyValue =
+ iter.next();
DeletedBlocksTransaction block = keyValue.getValue();
if (block.getCount() > -1 && block.getCount() <= maxRetry) {
if (transactions.addTransaction(block,
transactionToDNsCommitMap.get(block.getTxID()))) {
- deleteTransactionMap.put(block.getContainerID(),
block.getTxID());
+ deleteTransactionMap.put(block.getContainerID(),
+ block.getTxID());
transactionToDNsCommitMap
.putIfAbsent(block.getTxID(), new ConcurrentHashSet<>());
}
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
index ec186b8..8e30a0d 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/DeletedBlocksTransactionCodec.java
@@ -22,7 +22,8 @@ package org.apache.hadoop.hdds.scm.metadata;
import com.google.protobuf.InvalidProtocolBufferException;
import java.io.IOException;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.utils.db.Codec;
/**
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
index e2983be..fde754c 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreRDBImpl.java
@@ -24,12 +24,14 @@ import java.security.cert.X509Certificate;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import java.io.IOException;
-import
org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore;
+import org.apache.hadoop.hdds.security.x509.certificate.authority
+ .CertificateStore;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.utils.db.DBStore;
import org.apache.hadoop.utils.db.DBStoreBuilder;
import org.apache.hadoop.utils.db.Table;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.utils.db.TableIterator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -75,7 +77,8 @@ public class SCMMetadataStoreRDBImpl implements
SCMMetadataStore {
* @param config - Ozone Configuration.
* @throws IOException - on Failure.
*/
- public SCMMetadataStoreRDBImpl(OzoneConfiguration config) throws IOException
{
+ public SCMMetadataStoreRDBImpl(OzoneConfiguration config)
+ throws IOException {
this.configuration = config;
start(this.configuration);
this.txID = new AtomicLong(this.getLargestRecordedTXID());
@@ -187,8 +190,8 @@ public class SCMMetadataStoreRDBImpl implements
SCMMetadataStore {
private void checkTableStatus(Table table, String name) throws IOException {
String logMessage = "Unable to get a reference to %s table. Cannot " +
"continue.";
- String errMsg = "Inconsistent DB state, Table - %s. Please check the logs"
+
- "for more info.";
+ String errMsg = "Inconsistent DB state, Table - %s. Please check the" +
+ " logs for more info.";
if (table == null) {
LOG.error(String.format(logMessage, name));
throw new IOException(String.format(errMsg, name));
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
index bca9d57..9c955033 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java
@@ -27,7 +27,8 @@ import
org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import
org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
+import org.apache.hadoop.hdds.security.x509.certificate.authority
+ .CertificateServer;
/**
* This class acts as an SCM builder Class. This class is important for us
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index b5d9e4b..a0ffea2 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -18,9 +18,12 @@ package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
-import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.PipelineActionsProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -28,7 +31,8 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import
org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
+import org.apache.hadoop.hdds.scm.server
+ .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
import org.apache.hadoop.hdds.scm.server
.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -54,7 +58,8 @@ import
org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.common.Storage;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client
+ .AuthenticationException;
import java.io.File;
import java.io.IOException;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]