This is an automated email from the ASF dual-hosted git repository. sammichen pushed a commit to branch ozone-0.6.0 in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 82657a7ea1c66c39d9a7bf409be2ea96728e0d03 Author: bshashikant <[email protected]> AuthorDate: Thu Jul 16 16:08:56 2020 +0530 HDDS-3807. Propagate raft log disks info to SCM from datanode. (#1107) (cherry picked from commit de027855798bf3b891b8d3c00dc8e59531f98781) --- .../common/impl/StorageLocationReport.java | 11 ++-- .../common/transport/server/XceiverServerSpi.java | 12 ++++- .../transport/server/ratis/XceiverServerRatis.java | 58 ++++++++++++++++++++-- .../container/common/volume/MutableVolumeSet.java | 10 +--- .../ozone/container/ozoneimpl/OzoneContainer.java | 18 ++++++- .../container/ozoneimpl/TestOzoneContainer.java | 51 +++++++++++++++++-- .../apache/hadoop/hdds/utils/HddsServerUtil.java | 16 +++--- .../proto/ScmServerDatanodeHeartbeatProtocol.proto | 6 +++ 8 files changed, 152 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 061d09b..2ad7f0d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -88,11 +88,14 @@ public final class StorageLocationReport implements return storageType; } + private StorageTypeProto getStorageTypeProto() throws IOException { + return getStorageTypeProto(getStorageType()); + } - private StorageTypeProto getStorageTypeProto() throws - IOException { + public static StorageTypeProto getStorageTypeProto(StorageType type) + throws IOException { StorageTypeProto storageTypeProto; - switch (getStorageType()) { + switch (type) { case SSD: storageTypeProto = StorageTypeProto.SSD; break; @@ -145,7 +148,7 @@ public final class StorageLocationReport implements * @return SCMStorageReport * @throws IOException In case, the storage type specified is invalid. */ - public StorageReportProto getProtoBufMessage() throws IOException{ + public StorageReportProto getProtoBufMessage() throws IOException { StorageReportProto.Builder srb = StorageReportProto.newBuilder(); return srb.setStorageUuid(getId()) .setCapacity(getCapacity()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java index 01f463c..d8dfefd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReport; - +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import java.io.IOException; import java.util.Collection; import java.util.List; @@ -83,4 +84,13 @@ public interface XceiverServerSpi { * @return list of report for each pipeline. */ List<PipelineReport> getPipelineReport(); + + /** + * Get storage report for the XceiverServer instance. + * @return list of report for each storage location. + */ + default List<MetadataStorageReportProto> getStorageReport() throws + IOException { + return null; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 3e6ac10..c751c5b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -22,7 +22,6 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -30,6 +29,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.UUID; +import java.util.EnumMap; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; @@ -46,6 +46,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; @@ -59,11 +60,14 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; @@ -136,6 +140,11 @@ public final class XceiverServerRatis implements XceiverServerSpi { // Timeout used while calling submitRequest directly. private long requestTimeout; + /** + * Maintains a list of active volumes per StorageType. + */ + private EnumMap<StorageType, List<String>> ratisVolumeMap; + private XceiverServerRatis(DatanodeDetails dd, int port, ContainerDispatcher dispatcher, ContainerController containerController, StateContext context, GrpcTlsConfig tlsConfig, ConfigurationSource conf) @@ -163,6 +172,7 @@ public final class XceiverServerRatis implements XceiverServerSpi { HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT, HddsConfigKeys.HDDS_DATANODE_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); + initializeRatisVolumeMap(); } private ContainerStateMachine getStateMachine(RaftGroupId gid) { @@ -213,9 +223,12 @@ public final class XceiverServerRatis implements XceiverServerSpi { setNodeFailureTimeout(properties); // Set the ratis storage directory - String storageDir = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - RaftServerConfigKeys.setStorageDir(properties, - Collections.singletonList(new File(storageDir))); + Collection<String> storageDirPaths = + HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); + List<File> storageDirs= new ArrayList<>(storageDirPaths.size()); + storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d))); + + RaftServerConfigKeys.setStorageDir(properties, storageDirs); // For grpc set the maximum message size GrpcConfigKeys.setMessageSizeMax(properties, @@ -526,6 +539,43 @@ public final class XceiverServerRatis implements XceiverServerSpi { } } + private void initializeRatisVolumeMap() throws IOException { + ratisVolumeMap = new EnumMap<>(StorageType.class); + Collection<String> rawLocations = HddsServerUtil. + getOzoneDatanodeRatisDirectory(conf); + + for (String locationString : rawLocations) { + try { + StorageLocation location = StorageLocation.parse(locationString); + StorageType type = location.getStorageType(); + ratisVolumeMap.computeIfAbsent(type, k -> new ArrayList<String>(1)); + ratisVolumeMap.get(location.getStorageType()). + add(location.getUri().getPath()); + + } catch (IOException e) { + LOG.error("Failed to parse the storage location: " + + locationString, e); + } + } + } + + @Override + public List<MetadataStorageReportProto> getStorageReport() + throws IOException { + List<MetadataStorageReportProto> reportProto = new ArrayList<>(); + for (StorageType storageType : ratisVolumeMap.keySet()) { + for (String path : ratisVolumeMap.get(storageType)) { + MetadataStorageReportProto.Builder builder = MetadataStorageReportProto. + newBuilder(); + builder.setStorageLocation(path); + builder.setStorageType(StorageLocationReport. + getStorageTypeProto(storageType)); + reportProto.add(builder.build()); + } + } + return reportProto; + } + private RaftClientRequest createRaftClientRequest( ContainerCommandRequestProto request, HddsProtos.PipelineID pipelineID, RaftClientRequest.Type type) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index bc61811..b8c6067 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -36,8 +36,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.common.InconsistentStorageStateException; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -487,7 +485,7 @@ public class MutableVolumeSet implements VolumeSet { return ImmutableMap.copyOf(volumeStateMap); } - public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() + public StorageLocationReport[] getStorageReport() throws IOException { boolean failed; this.readLock(); @@ -540,11 +538,7 @@ public class MutableVolumeSet implements VolumeSet { StorageLocationReport r = builder.build(); reports[counter++] = r; } - NodeReportProto.Builder nrb = NodeReportProto.newBuilder(); - for (int i = 0; i < reports.length; i++) { - nrb.addStorageReport(reports[i].getProtoBufMessage()); - } - return nrb.build(); + return reports; } finally { this.readUnlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index bbbec25..62fd5a4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -294,8 +295,21 @@ public class OzoneContainer { * Returns node report of container storage usage. */ public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport() - throws IOException { - return volumeSet.getNodeReport(); + throws IOException { + StorageLocationReport[] reports = volumeSet.getStorageReport(); + StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb + = StorageContainerDatanodeProtocolProtos. + NodeReportProto.newBuilder(); + for (int i = 0; i < reports.length; i++) { + nrb.addStorageReport(reports[i].getProtoBufMessage()); + } + List<StorageContainerDatanodeProtocolProtos. + MetadataStorageReportProto> metadataReport = + writeChannel.getStorageReport(); + if (metadataReport != null) { + nrb.addAllMetadataStorageReport(metadataReport); + } + return nrb.build(); } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1056a0d..2bb52f6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -45,10 +46,7 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; +import org.junit.*; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; @@ -170,6 +168,51 @@ public class TestOzoneContainer { } @Test + public void testBuildNodeReport() throws Exception { + String path = folder.getRoot() + .getAbsolutePath(); + conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + String.join(",", + path + "/ratis1", path + "/ratis2", path + "ratis3")); + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(3, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + + } + + @Test + public void testBuildNodeReportWithDefaultRatisLogDir() throws Exception { + DatanodeStateMachine stateMachine = Mockito.mock( + DatanodeStateMachine.class); + StateContext context = Mockito.mock(StateContext.class); + Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); + Mockito.when(context.getParent()).thenReturn(stateMachine); + // When OzoneContainer is started, the containers from disk should be + // loaded into the containerSet. + // Also expected to initialize committed space for each volume. + OzoneContainer ozoneContainer = new + OzoneContainer(datanodeDetails, conf, context, null); + Assert.assertEquals(volumeSet.getVolumesList().size(), + ozoneContainer.getNodeReport().getStorageReportList().size()); + Assert.assertEquals(1, + ozoneContainer.getNodeReport().getMetadataStorageReportList() + .size()); + } + + + @Test public void testContainerCreateDiskFull() throws Exception { long containerSize = (long) StorageUnit.MB.toBytes(100); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index ee51142..8e7f326 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hdds.utils; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; import java.util.Optional; import java.util.OptionalInt; import java.util.concurrent.TimeUnit; @@ -51,7 +53,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.security.UserGroupInformation; -import com.google.common.base.Strings; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; @@ -344,15 +345,16 @@ public final class HddsServerUtil { OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } - public static String getOzoneDatanodeRatisDirectory( + public static Collection<String> getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { - String storageDir = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + Collection<String> rawLocations = conf.getTrimmedStringCollection( + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); - if (Strings.isNullOrEmpty(storageDir)) { - storageDir = ServerUtils.getDefaultRatisDirectory(conf); + if (rawLocations.isEmpty()) { + rawLocations = new ArrayList<>(1); + rawLocations.add(ServerUtils.getDefaultRatisDirectory(conf)); } - return storageDir; + return rawLocations; } /** diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 191a81a..00c8fdb 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -146,6 +146,7 @@ message SCMNodeAddressList { */ message NodeReportProto { repeated StorageReportProto storageReport = 1; + repeated MetadataStorageReportProto metadataStorageReport = 2; } message StorageReportProto { @@ -158,6 +159,11 @@ message StorageReportProto { optional bool failed = 7 [default = false]; } +message MetadataStorageReportProto { + required string storageLocation = 1; + optional StorageTypeProto storageType = 2 [default = DISK]; +} + /** * Types of recognized storage media. */ --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
