This is an automated email from the ASF dual-hosted git repository. yqlin pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push: new fb851c9 HDDS-1232. Recon Container DB service definition. Contributed by Aravindan Vijayan. fb851c9 is described below commit fb851c94817e69ffa75d2b87f496c658c273b73b Author: Yiqun Lin <yq...@apache.org> AuthorDate: Fri Mar 8 16:59:41 2019 +0800 HDDS-1232. Recon Container DB service definition. Contributed by Aravindan Vijayan. --- .../apache/hadoop/utils/LevelDBStoreIterator.java | 5 + .../org/apache/hadoop/utils/MetaStoreIterator.java | 5 + .../apache/hadoop/utils/RocksDBStoreIterator.java | 5 + .../common/src/main/resources/ozone-default.xml | 94 +++++++++++++ .../org/apache/hadoop/utils/TestMetadataStore.java | 52 ++++++++ .../org/apache/hadoop/hdds/server/ServerUtils.java | 30 +++-- hadoop-ozone/dist/pom.xml | 7 + hadoop-ozone/integration-test/pom.xml | 4 + .../hadoop/ozone/TestOzoneConfigurationFields.java | 2 + hadoop-ozone/ozone-recon/pom.xml | 13 +- ...onControllerModule.java => ReconConstants.java} | 20 ++- .../hadoop/ozone/recon/ReconControllerModule.java | 7 + .../apache/hadoop/ozone/recon/ReconHttpServer.java | 20 +-- ...nfiguration.java => ReconServerConfigKeys.java} | 12 +- .../types/ContainerKeyPrefix.java} | 36 +++-- .../recon/spi/ContainerDBServiceProvider.java | 58 ++++++++ .../ozone/recon/spi/ReconContainerDBProvider.java | 77 +++++++++++ .../spi/impl/ContainerDBServiceProviderImpl.java | 138 +++++++++++++++++++ .../impl/package-info.java} | 20 +-- .../impl/TestContainerDBServiceProviderImpl.java | 148 +++++++++++++++++++++ .../hadoop/ozone/recon/spi/impl/package-info.java} | 19 +-- hadoop-ozone/pom.xml | 5 + 22 files changed, 697 insertions(+), 80 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java index 7b62f7a..92051dd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStoreIterator.java @@ -61,4 +61,9 @@ public class LevelDBStoreIterator implements MetaStoreIterator<KeyValue> { public void seekToLast() { levelDBIterator.seekToLast(); } + + @Override + public void prefixSeek(byte[] prefix) { + levelDBIterator.seek(prefix); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java index 52d0a3e..15ded0d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java @@ -36,4 +36,9 @@ public interface MetaStoreIterator<T> extends Iterator<T> { */ void seekToLast(); + /** + * seek with prefix. + */ + void prefixSeek(byte[] prefix); + } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java index 6e9b695..161d5de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreIterator.java @@ -63,4 +63,9 @@ public class RocksDBStoreIterator implements MetaStoreIterator<KeyValue> { rocksDBIterator.seekToLast(); } + @Override + public void prefixSeek(byte[] prefix) { + rocksDBIterator.seek(prefix); + } + } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a95d9d1..a0b4c52 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2144,4 +2144,98 @@ milliseconds. </description> </property> + <property> + <name>ozone.recon.http.enabled</name> + <value>true</value> + <tag>RECON, MANAGEMENT</tag> + <description> + Property to enable or disable Recon web user interface. + </description> + </property> + <property> + <name>ozone.recon.http-address</name> + <value>0.0.0.0:9888</value> + <tag>RECON, MANAGEMENT</tag> + <description> + The address and the base port where the Recon web UI will listen on. + + If the port is 0, then the server will start on a free port. However, it + is best to specify a well-known port, so it is easy to connect and see + the Recon management UI. + </description> + </property> + <property> + <name>ozone.recon.http-bind-host</name> + <value>0.0.0.0</value> + <tag>RECON, MANAGEMENT</tag> + <description> + The actual address the Recon server will bind to. If this optional + the address is set, it overrides only the hostname portion of + ozone.recon.http-address. + </description> + </property> + <property> + <name>ozone.recon.https-bind-host</name> + <value>0.0.0.0</value> + <tag>RECON, MANAGEMENT, SECURITY</tag> + <description> + The actual address the Recon web server will bind to using HTTPS. + If this optional address is set, it overrides only the hostname portion of + ozone.recon.https-address. + </description> + </property> + <property> + <name>ozone.recon.https-address</name> + <value>0.0.0.0:9889</value> + <tag>RECON, MANAGEMENT, SECURITY</tag> + <description> + The address and the base port where the Recon web UI will listen + on using HTTPS. If the port is 0 then the server will start on a free + port. + </description> + </property> + <property> + <name>ozone.recon.keytab.file</name> + <value/> + <tag>RECON, SECURITY</tag> + <description> + The keytab file for Kerberos authentication in Recon. + </description> + </property> + <property> + <name>ozone.recon.authentication.kerberos.principal</name> + <value/> + <tag>RECON</tag> + <description>The server principal used by Ozone Recon server. This is + typically set to HTTP/_h...@realm.tld The SPNEGO server principal + begins with the prefix HTTP/ by convention. + </description> + </property> + <property> + <name>ozone.recon.container.db.cache.size.mb</name> + <value>128</value> + <tag>RECON, PERFORMANCE</tag> + <description> + The size of Recon DB cache in MB that used for caching files. + This value is set to an abnormally low value in the default configuration. + That is to make unit testing easy. Generally, this value should be set to + something like 16GB or more, if you intend to use Recon at scale. + + A large value for this key allows a proportionally larger amount of Recon + container DB to be cached in memory. This makes Recon Container-Key + operations faster. + </description> + </property> + <property> + <name>ozone.recon.db.dirs</name> + <value/> + <tag>OZONE, RECON, STORAGE, PERFORMANCE</tag> + <description> + Directory where the Recon Server stores its metadata. This should + be specified as a single directory. If the directory does not + exist then the Recon will attempt to create it. + If undefined, then the Recon will log a warning and fallback to + ozone.metadata.dirs. + </description> + </property> </configuration> diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java index 5da8fbc..96d818b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestMetadataStore.java @@ -163,6 +163,58 @@ public class TestMetadataStore { } + + @Test + public void testIteratorPrefixSeek() throws Exception { + Configuration conf = new OzoneConfiguration(); + conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); + File dbDir = GenericTestUtils.getRandomizedTestDir(); + MetadataStore dbStore = MetadataStoreBuilder.newBuilder() + .setConf(conf) + .setCreateIfMissing(true) + .setDbFile(dbDir) + .build(); + + for (int i = 0; i < 5; i++) { + dbStore.put(getBytes("a" + i), getBytes("a-value" + i)); + } + + for (int i = 0; i < 5; i++) { + dbStore.put(getBytes("b" + i), getBytes("b-value" + i)); + } + + for (int i = 0; i < 5; i++) { + dbStore.put(getBytes("c" + i), getBytes("c-value" + i)); + } + + for (int i = 5; i < 10; i++) { + dbStore.put(getBytes("b" + i), getBytes("b-value" + i)); + } + + for (int i = 5; i < 10; i++) { + dbStore.put(getBytes("a" + i), getBytes("a-value" + i)); + } + + + MetaStoreIterator<KeyValue> metaStoreIterator = dbStore.iterator(); + metaStoreIterator.prefixSeek(getBytes("b")); + int i = 0; + while (metaStoreIterator.hasNext()) { + KeyValue val = metaStoreIterator.next(); + String key = getString(val.getKey()); + if (key.startsWith("b")) { + assertEquals("b-value" + i, getString(val.getValue())); + } else { + break; + } + i++; + } + assertTrue(i == 10); + dbStore.close(); + dbStore.destroy(); + FileUtils.deleteDirectory(dbDir); + } + @Test public void testMetaStoreConfigDifferentFromType() throws IOException { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index fae6839..ab5d2ec 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -125,13 +125,29 @@ public final class ServerUtils { * @return */ public static File getScmDbDir(Configuration conf) { - final Collection<String> metadirs = conf.getTrimmedStringCollection( - ScmConfigKeys.OZONE_SCM_DB_DIRS); + + File metadataDir = getDirWithFallBackToOzoneMetadata(conf, ScmConfigKeys + .OZONE_SCM_DB_DIRS, "SCM"); + if (metadataDir != null) { + return metadataDir; + } + + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", + ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS); + return getOzoneMetaDirPath(conf); + } + + public static File getDirWithFallBackToOzoneMetadata(Configuration conf, + String key, + String componentName) { + final Collection<String> metadirs = conf.getTrimmedStringCollection(key); if (metadirs.size() > 1) { throw new IllegalArgumentException( - "Bad config setting " + ScmConfigKeys.OZONE_SCM_DB_DIRS + - ". SCM does not support multiple metadata dirs currently"); + "Bad config setting " + key + + ". " + componentName + + " does not support multiple metadata dirs currently"); } if (metadirs.size() == 1) { @@ -143,11 +159,7 @@ public final class ServerUtils { } return dbDirPath; } - - LOG.warn("{} is not configured. We recommend adding this setting. " + - "Falling back to {} instead.", - ScmConfigKeys.OZONE_SCM_DB_DIRS, HddsConfigKeys.OZONE_METADATA_DIRS); - return getOzoneMetaDirPath(conf); + return null; } /** diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 5e9cbf5..d9fc915 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -103,6 +103,13 @@ <classifier>classpath</classifier> <destFileName>hadoop-ozone-datanode.classpath</destFileName> </artifactItem> + <artifactItem> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-ozone-recon</artifactId> + <version>${ozone.version}</version> + <classifier>classpath</classifier> + <destFileName>hadoop-ozone-recon.classpath</destFileName> + </artifactItem> </artifactItems> </configuration> </execution> diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 349d69f..5cc6c9e 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -54,6 +54,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-ozone-recon</artifactId> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-ozone-client</artifactId> </dependency> <dependency> diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 30f0749..fa14f47 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.TestConfigurationFieldsBase; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; import org.apache.hadoop.ozone.s3.S3GatewayConfigKeys; /** @@ -34,6 +35,7 @@ public class TestOzoneConfigurationFields extends TestConfigurationFieldsBase { configurationClasses = new Class[] {OzoneConfigKeys.class, ScmConfigKeys.class, OMConfigKeys.class, HddsConfigKeys.class, + ReconServerConfigKeys.class, S3GatewayConfigKeys.class}; errorIfMissingConfigProps = true; errorIfMissingXmlProps = true; diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml index b8e4187..c8bff4c 100644 --- a/hadoop-ozone/ozone-recon/pom.xml +++ b/hadoop-ozone/ozone-recon/pom.xml @@ -22,7 +22,7 @@ </parent> <name>Apache Hadoop Ozone Recon</name> <modelVersion>4.0.0</modelVersion> - <artifactId>ozone-recon</artifactId> + <artifactId>hadoop-ozone-recon</artifactId> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> @@ -50,5 +50,16 @@ <artifactId>guice-assistedinject</artifactId> <version>4.1.0</version> </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.mockito</groupId> + <artifactId>mockito-all</artifactId> + <version>1.10.19</version> + <scope>test</scope> + </dependency> </dependencies> </project> \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java similarity index 67% copy from hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java copy to hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 1a90e70..1ea132a 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -15,22 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.recon; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +package org.apache.hadoop.ozone.recon; -import com.google.inject.AbstractModule; -import com.google.inject.Singleton; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB_SUFFIX; /** - * Guice controller that defines concrete bindings. + * Recon Server constants file. */ -public class ReconControllerModule extends AbstractModule { - @Override - protected void configure() { - bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class); - bind(ReconHttpServer.class).in(Singleton.class); - } +public final class ReconConstants { + private ReconConstants() { + // Never Constructed + } + public static final String RECON_CONTAINER_DB = "recon-" + + CONTAINER_DB_SUFFIX; } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index 1a90e70..f282b9a 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -18,6 +18,10 @@ package org.apache.hadoop.ozone.recon; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.recon.spi.ReconContainerDBProvider; +import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.ContainerDBServiceProviderImpl; +import org.apache.hadoop.utils.MetadataStore; import com.google.inject.AbstractModule; import com.google.inject.Singleton; @@ -30,6 +34,9 @@ public class ReconControllerModule extends AbstractModule { protected void configure() { bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class); bind(ReconHttpServer.class).in(Singleton.class); + bind(MetadataStore.class).toProvider(ReconContainerDBProvider.class); + bind(ContainerDBServiceProvider.class) + .to(ContainerDBServiceProviderImpl.class); } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java index 72818c5..e7dcb0c 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconHttpServer.java @@ -37,52 +37,52 @@ public class ReconHttpServer extends BaseHttpServer { @Override protected String getHttpAddressKey() { - return ReconServerConfiguration.OZONE_RECON_HTTP_ADDRESS_KEY; + return ReconServerConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; } @Override protected String getHttpsAddressKey() { - return ReconServerConfiguration.OZONE_RECON_HTTPS_ADDRESS_KEY; + return ReconServerConfigKeys.OZONE_RECON_HTTPS_ADDRESS_KEY; } @Override protected String getHttpBindHostKey() { - return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_HOST_KEY; + return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_KEY; } @Override protected String getHttpsBindHostKey() { - return ReconServerConfiguration.OZONE_RECON_HTTPS_BIND_HOST_KEY; + return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_HOST_KEY; } @Override protected String getBindHostDefault() { - return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_HOST_DEFAULT; + return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_HOST_DEFAULT; } @Override protected int getHttpBindPortDefault() { - return ReconServerConfiguration.OZONE_RECON_HTTP_BIND_PORT_DEFAULT; + return ReconServerConfigKeys.OZONE_RECON_HTTP_BIND_PORT_DEFAULT; } @Override protected int getHttpsBindPortDefault() { - return ReconServerConfiguration.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT; + return ReconServerConfigKeys.OZONE_RECON_HTTPS_BIND_PORT_DEFAULT; } @Override protected String getKeytabFile() { - return ReconServerConfiguration.OZONE_RECON_KEYTAB_FILE; + return ReconServerConfigKeys.OZONE_RECON_KEYTAB_FILE; } @Override protected String getSpnegoPrincipal() { - return ReconServerConfiguration + return ReconServerConfigKeys .OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; } @Override protected String getEnabledKey() { - return ReconServerConfiguration.OZONE_RECON_HTTP_ENABLED_KEY; + return ReconServerConfigKeys.OZONE_RECON_HTTP_ENABLED_KEY; } } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfiguration.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java similarity index 85% rename from hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfiguration.java rename to hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 78281bc..5e4b732 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfiguration.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; */ @InterfaceAudience.Public @InterfaceStability.Unstable -public final class ReconServerConfiguration { +public final class ReconServerConfigKeys { public static final String OZONE_RECON_HTTP_ENABLED_KEY = "ozone.recon.http.enabled"; @@ -45,12 +45,16 @@ public final class ReconServerConfiguration { public static final int OZONE_RECON_HTTPS_BIND_PORT_DEFAULT = 9889; public static final String OZONE_RECON_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL = "ozone.recon.authentication.kerberos.principal"; - public static final String OZONE_RECON_DOMAIN_NAME = - "ozone.recon.domain.name"; + + public static final String OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB = + "ozone.recon.container.db.cache.size.mb"; + public static final int OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT = 128; + + public static final String OZONE_RECON_DB_DIRS = "ozone.recon.db.dirs"; /** * Private constructor for utility class. */ - private ReconServerConfiguration() { + private ReconServerConfigKeys() { } } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java similarity index 54% copy from hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java copy to hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java index 1a90e70..064dc5c 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerKeyPrefix.java @@ -15,22 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.recon; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import com.google.inject.AbstractModule; -import com.google.inject.Singleton; +package org.apache.hadoop.ozone.recon.api.types; /** - * Guice controller that defines concrete bindings. + * Class to encapsulate the Key information needed for the Recon container DB. + * Currently, it is containerId and key prefix. */ -public class ReconControllerModule extends AbstractModule { - @Override - protected void configure() { - bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class); - bind(ReconHttpServer.class).in(Singleton.class); +public class ContainerKeyPrefix { + + private long containerId; + private String keyPrefix; + + public ContainerKeyPrefix(long containerId, String keyPrefix) { + this.containerId = containerId; + this.keyPrefix = keyPrefix; } + public long getContainerId() { + return containerId; + } + public void setContainerId(long containerId) { + this.containerId = containerId; + } + + public String getKeyPrefix() { + return keyPrefix; + } + + public void setKeyPrefix(String keyPrefix) { + this.keyPrefix = keyPrefix; + } } diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java new file mode 100644 index 0000000..b2acc1d --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ContainerDBServiceProvider.java @@ -0,0 +1,58 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.spi; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; + +/** + * The Recon Container DB Service interface. + */ +@InterfaceStability.Unstable +public interface ContainerDBServiceProvider { + + /** + * Store the container to Key prefix mapping into the Recon Container DB. + * + * @param containerKeyPrefix the containerId, key-prefix tuple. + * @param count Count of Keys with that prefix. + */ + void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, + Integer count) throws IOException; + + /** + * Get the stored key prefix count for the given containerId, key prefix. + * + * @param containerKeyPrefix the containerId, key-prefix tuple. + * @return count of keys with that prefix. + */ + Integer getCountForForContainerKeyPrefix( + ContainerKeyPrefix containerKeyPrefix) throws IOException; + + /** + * Get the stored key prefixes for the given containerId. + * + * @param containerId the given containerId. + * @return Map of Key prefix -> count. + */ + Map<String, Integer> getKeyPrefixesForContainer(long containerId); +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerDBProvider.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerDBProvider.java new file mode 100644 index 0000000..2227d49 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/ReconContainerDBProvider.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.spi; + +import static org.apache.hadoop.ozone.recon.ReconConstants. + RECON_CONTAINER_DB; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys. + OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys. + OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys. + OZONE_RECON_DB_DIRS; + +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.utils.MetadataStoreBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.inject.Inject; +import com.google.inject.Provider; + +/** + * Provider for the Recon container DB (Metadata store). + */ +public class ReconContainerDBProvider implements + Provider<MetadataStore> { + + @VisibleForTesting + private static final Logger LOG = + LoggerFactory.getLogger(ReconContainerDBProvider.class); + + @Inject + private OzoneConfiguration configuration; + + @Override + public MetadataStore get() { + File metaDir = ServerUtils.getDirWithFallBackToOzoneMetadata(configuration, + OZONE_RECON_DB_DIRS, "Recon"); + File containerDBPath = new File(metaDir, RECON_CONTAINER_DB); + int cacheSize = configuration.getInt(OZONE_RECON_CONTAINER_DB_CACHE_SIZE_MB, + OZONE_RECON_CONTAINER_DB_CACHE_SIZE_DEFAULT); + + try { + return MetadataStoreBuilder.newBuilder() + .setConf(configuration) + .setDbFile(containerDBPath) + .setCacheSize(cacheSize * OzoneConsts.MB) + .build(); + } catch (IOException ioEx) { + LOG.error("Unable to initialize Recon container metadata store.", ioEx); + } + return null; + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java new file mode 100644 index 0000000..8706f8d --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerDBServiceProviderImpl.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.spi.impl; + +import static org.apache.commons.compress.utils.CharsetNames.UTF_8; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +import javax.inject.Inject; +import javax.inject.Singleton; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; +import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; +import org.apache.hadoop.utils.MetaStoreIterator; +import org.apache.hadoop.utils.MetadataStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.primitives.Longs; + +/** + * Implementation of the Recon Container DB Service. + */ +@Singleton +public class ContainerDBServiceProviderImpl + implements ContainerDBServiceProvider { + + private static final Logger LOG = + LoggerFactory.getLogger(ContainerDBServiceProviderImpl.class); + private final static String KEY_DELIMITER = "_"; + + @Inject + private MetadataStore containerDBStore; + + /** + * Concatenate the containerId and Key Prefix using a delimiter and store the + * count into the container DB store. + * + * @param containerKeyPrefix the containerId, key-prefix tuple. + * @param count Count of the keys matching that prefix. + * @throws IOException + */ + @Override + public void storeContainerKeyMapping(ContainerKeyPrefix containerKeyPrefix, + Integer count) + throws IOException { + byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix + .getContainerId()); + byte[] keyPrefixBytes = (KEY_DELIMITER + containerKeyPrefix.getKeyPrefix()) + .getBytes(UTF_8); + byte[] dbKey = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes); + byte[] dbValue = ByteBuffer.allocate(Integer.BYTES).putInt(count).array(); + containerDBStore.put(dbKey, dbValue); + } + + /** + * Put together the key from the passed in object and get the count from + * the container DB store. + * + * @param containerKeyPrefix the containerId, key-prefix tuple. + * @return count of keys matching the containerId, key-prefix. + * @throws IOException + */ + @Override + public Integer getCountForForContainerKeyPrefix( + ContainerKeyPrefix containerKeyPrefix) throws IOException { + byte[] containerIdBytes = Longs.toByteArray(containerKeyPrefix + .getContainerId()); + byte[] keyPrefixBytes = (KEY_DELIMITER + containerKeyPrefix + .getKeyPrefix()).getBytes(UTF_8); + byte[] dbKey = ArrayUtils.addAll(containerIdBytes, keyPrefixBytes); + byte[] dbValue = containerDBStore.get(dbKey); + return ByteBuffer.wrap(dbValue).getInt(); + } + + /** + * Use the DB's prefix seek iterator to start the scan from the given + * container ID prefix. + * + * @param containerId the given containerId. + * @return Map of (Key-Prefix,Count of Keys). + */ + @Override + public Map<String, Integer> getKeyPrefixesForContainer(long containerId) { + + Map<String, Integer> prefixes = new HashMap<>(); + MetaStoreIterator<MetadataStore.KeyValue> containerIterator = + containerDBStore.iterator(); + byte[] containerIdPrefixBytes = Longs.toByteArray(containerId); + containerIterator.prefixSeek(containerIdPrefixBytes); + while (containerIterator.hasNext()) { + MetadataStore.KeyValue keyValue = containerIterator.next(); + byte[] containerKey = keyValue.getKey(); + long containerIdFromDB = ByteBuffer.wrap(ArrayUtils.subarray( + containerKey, 0, Long.BYTES)).getLong(); + + //The prefix seek only guarantees that the iterator's head will be + // positioned at the first prefix match. We still have to check the key + // prefix. + if (containerIdFromDB == containerId) { + byte[] keyPrefix = ArrayUtils.subarray(containerKey, + containerIdPrefixBytes.length + 1, + containerKey.length); + try { + prefixes.put(new String(keyPrefix, UTF_8), + ByteBuffer.wrap(keyValue.getValue()).getInt()); + } catch (UnsupportedEncodingException e) { + LOG.warn("Unable to read key prefix from container DB.", e); + } + } else { + break; //Break when the first mismatch occurs. + } + } + return prefixes; + } + +} \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java similarity index 63% copy from hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java copy to hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java index 1a90e70..1ed4429 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java @@ -15,22 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import com.google.inject.AbstractModule; -import com.google.inject.Singleton; - /** - * Guice controller that defines concrete bindings. + * The classes in this package define the Service Provider implementations for + * Recon. This provides connectivity to underlying Ozone subsystems. */ -public class ReconControllerModule extends AbstractModule { - @Override - protected void configure() { - bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class); - bind(ReconHttpServer.class).in(Singleton.class); - } - - -} +package org.apache.hadoop.ozone.recon.spi.impl; \ No newline at end of file diff --git a/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java new file mode 100644 index 0000000..2fc0642 --- /dev/null +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.spi.impl; + +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; +import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider; +import org.apache.hadoop.utils.MetaStoreIterator; +import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.utils.MetadataStoreBuilder; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; + +import com.google.inject.AbstractModule; +import com.google.inject.Guice; +import com.google.inject.Injector; + +/** + * Unit Tests for ContainerDBServiceProviderImpl. + */ +@RunWith(MockitoJUnitRunner.class) +public class TestContainerDBServiceProviderImpl { + + @Rule + public TemporaryFolder tempFolder = new TemporaryFolder(); + + private MetadataStore containerDBStore; + private ContainerDBServiceProvider containerDbServiceProvider + = new ContainerDBServiceProviderImpl(); + private Injector injector; + + @Before + public void setUp() throws IOException { + tempFolder.create(); + File dbDir = tempFolder.getRoot(); + containerDBStore = MetadataStoreBuilder.newBuilder() + .setConf(new OzoneConfiguration()) + .setCreateIfMissing(true) + .setDbFile(dbDir) + .build(); + injector = Guice.createInjector(new AbstractModule() { + @Override + protected void configure() { + bind(MetadataStore.class).toInstance(containerDBStore); + bind(ContainerDBServiceProvider.class) + .toInstance(containerDbServiceProvider); + } + }); + } + + @After + public void tearDown() throws Exception { + tempFolder.delete(); + } + + @Test + public void testStoreContainerKeyMapping() throws Exception { + + long containerId = System.currentTimeMillis(); + Map<String, Integer> prefixCounts = new HashMap<>(); + prefixCounts.put("V1/B1/K1", 1); + prefixCounts.put("V1/B1/K2", 2); + prefixCounts.put("V1/B2/K3", 3); + + for (String prefix : prefixCounts.keySet()) { + ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( + containerId, prefix); + containerDbServiceProvider.storeContainerKeyMapping( + containerKeyPrefix, prefixCounts.get(prefix)); + } + + int count = 0; + MetaStoreIterator<MetadataStore.KeyValue> iterator = + containerDBStore.iterator(); + while (iterator.hasNext()) { + iterator.next(); + count++; + } + assertTrue(count == 3); + } + + @Test + public void testGetCountForForContainerKeyPrefix() throws Exception { + long containerId = System.currentTimeMillis(); + + containerDbServiceProvider.storeContainerKeyMapping(new + ContainerKeyPrefix(containerId, "V1/B1/K1"), 2); + + Integer count = containerDbServiceProvider. + getCountForForContainerKeyPrefix(new ContainerKeyPrefix(containerId, + "V1/B1/K1")); + assertTrue(count == 2); + } + + @Test + public void testGetKeyPrefixesForContainer() throws Exception { + long containerId = System.currentTimeMillis(); + + containerDbServiceProvider.storeContainerKeyMapping(new + ContainerKeyPrefix(containerId, "V1/B1/K1"), 1); + + containerDbServiceProvider.storeContainerKeyMapping(new + ContainerKeyPrefix(containerId, "V1/B1/K2"), 2); + + long nextContainerId = System.currentTimeMillis(); + containerDbServiceProvider.storeContainerKeyMapping(new + ContainerKeyPrefix(nextContainerId, "V1/B2/K1"), 3); + + Map<String, Integer> keyPrefixMap = containerDbServiceProvider + .getKeyPrefixesForContainer(containerId); + assertTrue(keyPrefixMap.size() == 2); + assertTrue(keyPrefixMap.get("V1/B1/K1") == 1); + assertTrue(keyPrefixMap.get("V1/B1/K2") == 2); + + keyPrefixMap = containerDbServiceProvider + .getKeyPrefixesForContainer(nextContainerId); + assertTrue(keyPrefixMap.size() == 1); + assertTrue(keyPrefixMap.get("V1/B2/K1") == 3); + } +} diff --git a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java similarity index 63% copy from hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java copy to hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java index 1a90e70..932c437 100644 --- a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/ozone-recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/package-info.java @@ -15,22 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; - -import com.google.inject.AbstractModule; -import com.google.inject.Singleton; - /** - * Guice controller that defines concrete bindings. + * Package for recon server impl tests. */ -public class ReconControllerModule extends AbstractModule { - @Override - protected void configure() { - bind(OzoneConfiguration.class).toProvider(OzoneConfigurationProvider.class); - bind(ReconHttpServer.class).in(Singleton.class); - } - - -} +package org.apache.hadoop.ozone.recon.spi.impl; \ No newline at end of file diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 91988ed..9884215 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -161,6 +161,11 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-ozone-recon</artifactId> + <version>${ozone.version}</version> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdds-container-service</artifactId> <version>${hdds.version}</version> <type>test-jar</type> --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org