This is an automated email from the ASF dual-hosted git repository.
ngangam pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 3ab174d HIVE-22995: Add support for location for managed tables on
database (Naveen Gangam, reviewed by Thejas Nair, Sam An)
3ab174d is described below
commit 3ab174d82ffc2bd27432c0b04433be3bd7db5c6a
Author: Naveen Gangam <[email protected]>
AuthorDate: Fri Apr 3 02:48:07 2020 -0400
HIVE-22995: Add support for location for managed tables on database (Naveen
Gangam, reviewed by Thejas Nair, Sam An)
---
.../hive/hcatalog/mapreduce/HCatMapReduceTest.java | 6 +-
.../metastore/TestHiveMetastoreTransformer.java | 4 +-
.../metastore/TestTenantBasedStorageHierarchy.java | 598 +++++++++++++++++++++
.../hadoop/hive/ql/TestWarehouseExternalDir.java | 2 +-
.../parse/BaseReplicationScenariosAcidTables.java | 1 +
.../parse/TestReplicationScenariosAcidTables.java | 2 +
.../org/apache/hadoop/hive/ql/parse/HiveLexer.g | 1 +
.../org/apache/hadoop/hive/ql/parse/HiveParser.g | 22 +-
.../hadoop/hive/ql/parse/IdentifiersParser.g | 2 +-
.../location/AlterDatabaseSetLocationDesc.java | 20 +-
.../AlterDatabaseSetLocationOperation.java | 31 +-
.../AlterDatabaseSetManagedLocationAnalyzer.java | 47 ++
.../database/create/CreateDatabaseAnalyzer.java | 10 +-
.../ql/ddl/database/create/CreateDatabaseDesc.java | 12 +
.../database/create/CreateDatabaseOperation.java | 21 +-
.../ql/ddl/database/desc/DescDatabaseDesc.java | 2 +-
.../ddl/database/desc/DescDatabaseOperation.java | 2 +-
.../org/apache/hadoop/hive/ql/exec/Utilities.java | 5 +
.../metadata/formatting/JsonMetaDataFormatter.java | 5 +-
.../ql/metadata/formatting/MetaDataFormatter.java | 2 +-
.../metadata/formatting/TextMetaDataFormatter.java | 6 +-
.../apache/hadoop/hive/ql/plan/HiveOperation.java | 3 +-
.../clientpositive/alter_change_db_location.q.out | 2 +-
.../results/clientpositive/alter_db_owner.q.out | 6 +-
.../authorization_owner_actions_db.q.out | 2 +-
.../clientpositive/beeline/escape_comments.q.out | 4 +-
.../results/clientpositive/database_location.q.out | 4 +-
.../clientpositive/database_properties.q.out | 6 +-
.../results/clientpositive/db_ddl_explain.q.out | 2 +-
.../results/clientpositive/describe_database.q.out | 4 +-
.../results/clientpositive/escape_comments.q.out | 4 +-
.../clientpositive/tez/explainanalyze_3.q.out | 2 +-
.../results/clientpositive/tez/explainuser_3.q.out | 2 +-
.../results/clientpositive/unicode_comments.q.out | 2 +-
.../apache/hadoop/hive/metastore/api/Database.java | 114 +++-
.../src/gen/thrift/gen-php/metastore/Types.php | 23 +
.../src/gen/thrift/gen-py/hive_metastore/ttypes.py | 15 +-
.../src/gen/thrift/gen-rb/hive_metastore_types.rb | 4 +-
.../apache/hadoop/hive/metastore/Warehouse.java | 77 ++-
.../hadoop/hive/metastore/conf/MetastoreConf.java | 5 +
.../src/main/thrift/hive_metastore.thrift | 1 +
.../hadoop/hive/metastore/HiveAlterHandler.java | 4 +-
.../hadoop/hive/metastore/MetaStoreDirectSql.java | 3 +-
.../metastore/MetastoreDefaultTransformer.java | 148 +++--
.../apache/hadoop/hive/metastore/ObjectStore.java | 5 +
.../metastore/client/builder/DatabaseBuilder.java | 11 +-
.../hadoop/hive/metastore/model/MDatabase.java | 32 ++
.../src/main/resources/package.jdo | 4 +
.../src/main/sql/derby/hive-schema-4.0.0.derby.sql | 3 +-
.../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql | 5 +-
.../src/main/sql/mssql/hive-schema-4.0.0.mssql.sql | 3 +-
.../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql | 2 +
.../src/main/sql/mysql/hive-schema-4.0.0.mysql.sql | 1 +
.../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql | 3 +
.../main/sql/oracle/hive-schema-4.0.0.oracle.sql | 3 +-
.../sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql | 3 +
.../sql/postgres/hive-schema-4.0.0.postgres.sql | 3 +-
.../postgres/upgrade-3.2.0-to-4.0.0.postgres.sql | 3 +
.../hive/metastore/TestCatalogOldClient.java | 2 +-
.../hadoop/hive/metastore/TestHiveMetaStore.java | 14 +-
.../hive/metastore/client/TestDatabases.java | 2 +-
.../client/TestTablesCreateDropAlterTruncate.java | 4 +-
.../minihms/AbstractMetaStoreService.java | 1 +
.../schematool/TestSchemaToolForMetastore.java | 18 +-
64 files changed, 1223 insertions(+), 137 deletions(-)
diff --git
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
index 983a66a..a3bc2d5 100644
---
a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
+++
b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatMapReduceTest.java
@@ -160,8 +160,10 @@ public abstract class HCatMapReduceTest extends
HCatBaseTest {
client.dropTable(databaseName, tableName);
// in case of external table, drop the table contents as well
if (isTableExternal() && (externalTableLocation != null)) {
- if (fs.exists(new Path(externalTableLocation))) {
- fs.delete(new Path(externalTableLocation), true);
+ Path extPath = new Path(externalTableLocation);
+ FileSystem fileSystem = extPath.getFileSystem(new HiveConf());
+ if (fileSystem.exists(extPath)) {
+ fileSystem.delete(extPath, true);
}
}
} catch (Exception e) {
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java
index ad7736d..eba6610 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java
@@ -1380,7 +1380,7 @@ public class TestHiveMetastoreTransformer {
setHMSClient("TestGetDatabaseACIDWRITE",
(String[])(capabilities.toArray(new String[0])));
db = client.getDatabase(dbName);
- assertFalse("Database location not expected to be external
warehouse:actual=" + db.getLocationUri(),
+ assertTrue("Database location expected to be external warehouse:actual="
+ db.getLocationUri(),
db.getLocationUri().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
resetHMSClient();
@@ -1389,7 +1389,7 @@ public class TestHiveMetastoreTransformer {
setHMSClient("TestGetDatabaseINSERTWRITE",
(String[])(capabilities.toArray(new String[0])));
db = client.getDatabase(dbName);
- assertFalse("Database location not expected to be external
warehouse:actual=" + db.getLocationUri(),
+ assertTrue("Database location expected to be external warehouse:actual="
+ db.getLocationUri(),
db.getLocationUri().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
resetHMSClient();
} catch (Exception e) {
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTenantBasedStorageHierarchy.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTenantBasedStorageHierarchy.java
new file mode 100644
index 0000000..774403d
--- /dev/null
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestTenantBasedStorageHierarchy.java
@@ -0,0 +1,598 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.File;
+import java.io.IOException;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.fs.Path;
+import
org.apache.hadoop.hive.metastore.HiveMetaStoreClient.GetTablesRequestBuilder;
+import org.apache.hadoop.hive.metastore.api.Catalog;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.ExtendedTableInfo;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.GetTablesExtRequestFields;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.conf.Configuration;
+
+import static
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_NONE;
+import static
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_READONLY;
+import static
org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.ACCESSTYPE_READWRITE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.apache.hadoop.hive.ql.parse.WarehouseInstance;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.collect.Lists;
+
+import org.apache.thrift.TException;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TestTenantBasedStorageHierarchy {
+ private static final Logger LOG =
LoggerFactory.getLogger(TestTenantBasedStorageHierarchy.class);
+ protected static HiveMetaStoreClient client;
+ protected static Configuration conf;
+ File ext_wh = null;
+ File wh = null;
+
+ protected static boolean isThriftClient = true;
+ private static final String CAPABILITIES_KEY = "OBJCAPABILITIES";
+ private static final String DATABASE_WAREHOUSE_SUFFIX = ".db";
+
+ @Before
+ public void setUp() throws Exception {
+ conf = MetastoreConf.newMetastoreConf();
+ wh = new File(System.getProperty("java.io.tmpdir") + File.separator +
+ "hive" + File.separator + "warehouse" + File.separator + "hive" +
File.separator);
+ wh.mkdirs();
+
+ ext_wh = new File(System.getProperty("java.io.tmpdir") + File.separator +
+ "hive" + File.separator + "warehouse" + File.separator +
"hive-external" + File.separator);
+ ext_wh.mkdirs();
+
+ MetastoreConf.setVar(conf, ConfVars.METASTORE_METADATA_TRANSFORMER_CLASS,
+ "org.apache.hadoop.hive.metastore.MetastoreDefaultTransformer");
+ MetastoreConf.setBoolVar(conf, ConfVars.ALLOW_TENANT_BASED_STORAGE, true);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_IN_TEST, false);
+ MetastoreConf.setVar(conf, ConfVars.WAREHOUSE, wh.getCanonicalPath());
+ MetastoreConf.setVar(conf, ConfVars.WAREHOUSE_EXTERNAL,
ext_wh.getCanonicalPath());
+ client = new HiveMetaStoreClient(conf);
+ }
+
+ private static void silentDropDatabase(String dbName) throws TException {
+ try {
+ for (String tableName : client.getTables(dbName, "*")) {
+ client.dropTable(dbName, tableName);
+ }
+ client.dropDatabase(dbName);
+ } catch (NoSuchObjectException|InvalidOperationException e) {
+ // NOP
+ }
+ }
+
+ private void resetHMSClient() {
+ client.setProcessorIdentifier(null);
+ client.setProcessorCapabilities(null);
+ }
+
+ private void setHMSClient(String id, String[] caps) {
+ client.setProcessorIdentifier(id);
+ client.setProcessorCapabilities(caps);
+ }
+
+ private File getManagedRootForTenant(String tenant) {
+ return new File(System.getProperty("java.io.tmpdir") + File.separator +
+ tenant + File.separator +
+ "hive" + File.separator + "warehouse" + File.separator + "managed" +
File.separator);
+ }
+
+ private File getExternalRootForTenant(String tenant) {
+ return new File(System.getProperty("java.io.tmpdir") + File.separator +
+ tenant + File.separator +
+ "hive" + File.separator + "warehouse" + File.separator + "external" +
File.separator);
+ }
+
+ @Test
+ public void testCreateDatabaseOldSyntax() throws Exception {
+ try {
+ resetHMSClient();
+ final String dbName = "db1";
+ String basetblName = "oldstyletable";
+ Map<String, Object> tProps = new HashMap<>();
+
+ Database db = createDatabase("hive", dbName, null, null);
+
+ Database db2 = client.getDatabase("hive", dbName);
+ assertNull(db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not
null:actual=" + db2.getLocationUri());
+
+ String tblName = "ext_" + basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE);
+ StringBuilder properties = new StringBuilder();
+ properties.append("EXTERNAL").append("=").append("TRUE");
+ properties.append(";");
+ tProps.put("PROPERTIES", properties.toString());
+ Table tbl = createTableWithCapabilities(tProps);
+
+ setHMSClient("testCreateDatabaseOldSyntax", (new String[] {
"HIVEBUCKET2", "EXTREAD", "EXTWRITE"}));
+ Table tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("Database location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
+ resetHMSClient();
+
+ tblName = "mgd_" + basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.MANAGED_TABLE);
+ properties = new StringBuilder();
+ properties.append("transactional=true");
+ properties.append(";");
+ properties.append("transactional_properties=insert_only");
+ tProps.put("PROPERTIES", properties.toString());
+
+ setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE",
"HIVEFULLACIDWRITE"});
+ tbl = createTableWithCapabilities(tProps);
+
+ tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("Database location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname())));
+ } catch (Exception e) {
+ fail("testCreateDatabaseOldSyntax failed with " + e.getMessage());
+ } finally {
+ silentDropDatabase("db1");
+ resetHMSClient();
+ }
+ }
+
+ @Test
+ public void testCreateDatabaseWithOldLocation() throws Exception {
+ try {
+ resetHMSClient();
+ final String dbName = "dbx";
+ String basetblName = "oldstyletable";
+ Map<String, Object> tProps = new HashMap<>();
+
+ String location =
ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX);
+ Database db = createDatabase("hive", dbName, location, null);
+
+ Database db2 = client.getDatabase("hive", dbName);
+ assertNull(db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not
null:actual=" + db2.getLocationUri());
+
+ String tblName = basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE);
+ StringBuilder properties = new StringBuilder();
+ properties.append("EXTERNAL").append("=").append("TRUE");
+ properties.append(";");
+ tProps.put("PROPERTIES", properties.toString());
+ Table tbl = createTableWithCapabilities(tProps);
+
+ setHMSClient("testCreateDatabaseWithOldLocation", (new String[] {
"HIVEBUCKET2", "EXTREAD", "EXTWRITE"}));
+ Table tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("External table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
+
+ tblName = "mgd_" + basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.MANAGED_TABLE);
+ properties = new StringBuilder();
+ properties.append("transactional=true");
+ properties.append(";");
+ properties.append("transactional_properties=insert_only");
+ tProps.put("PROPERTIES", properties.toString());
+
+ setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE",
"HIVEFULLACIDWRITE"});
+ tbl = createTableWithCapabilities(tProps);
+
+ tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertEquals("Database's locationUri is expected to be equal to set
value",
+ Path.getPathWithoutSchemeAndAuthority(new Path(location)),
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getLocationUri())));
+ assertTrue("Managed table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname())));
+ } catch (Exception e) {
+ fail("testCreateDatabaseWithOldLocation failed with " + e.getMessage());
+ } finally {
+ silentDropDatabase("dbx");
+ resetHMSClient();
+ }
+ }
+
+ @Test
+ public void testCreateDatabaseWithNewLocation() throws Exception {
+ try {
+ resetHMSClient();
+ String dbName = "dbx";
+ String basetblName = "newstyletable";
+ Map<String, Object> tProps = new HashMap<>();
+ String tenant1 = "tenant1";
+ String tenant2 = "tenant2";
+
+ String location = getExternalRootForTenant(tenant1).getAbsolutePath();
+ Database db = createDatabase("hive", dbName, location, null);
+
+ Database db2 = client.getDatabase("hive", dbName);
+ assertNull(db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not
null:actual=" + db2.getLocationUri());
+ assertEquals("Expected location is different from actual location",
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getLocationUri())),
+ Path.getPathWithoutSchemeAndAuthority(new Path(location)));
+
+ String tblName = basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE);
+ StringBuilder properties = new StringBuilder();
+ properties.append("EXTERNAL").append("=").append("TRUE");
+ properties.append(";");
+ tProps.put("PROPERTIES", properties.toString());
+ Table tbl = createTableWithCapabilities(tProps);
+
+ setHMSClient("testCreateDatabaseWithNewLocation", (new String[] {
"HIVEBUCKET2", "EXTREAD", "EXTWRITE"}));
+ Table tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("External table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
+
+ dbName = "dbm";
+ String mgdLocation = getManagedRootForTenant(tenant2).getAbsolutePath();
+ location = getExternalRootForTenant(tenant2).getAbsolutePath();
+ db = createDatabase("hive", dbName, location, mgdLocation);
+
+ db2 = client.getDatabase("hive", dbName);
+ assertNotNull("Database's managedLocationUri is expected to be not null"
+ db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not null" +
db2.getLocationUri());
+ assertEquals("Expected location is different from actual location",
+ Path.getPathWithoutSchemeAndAuthority(new Path(location)),
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getLocationUri())));
+
+ tblName = "mgd_" + basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.MANAGED_TABLE);
+ properties = new StringBuilder();
+ properties.append("transactional=true");
+ properties.append(";");
+ properties.append("transactional_properties=insert_only");
+ tProps.put("PROPERTIES", properties.toString());
+
+ setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE",
"HIVEFULLACIDWRITE"});
+ tbl = createTableWithCapabilities(tProps);
+
+ tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("Managed table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname())));
+ } catch (Exception e) {
+ fail("testCreateDatabaseWithNewLocation failed with " + e.getMessage());
+ } finally {
+ silentDropDatabase("dbx");
+ resetHMSClient();
+ }
+ }
+
+ @Test
+ public void testCreateDatabaseWithExtAndManagedLocations() throws Exception {
+ try {
+ resetHMSClient();
+ final String dbName = "dbxm";
+ String basetblName = "newstyletable";
+ Map<String, Object> tProps = new HashMap<>();
+
+ String location =
ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX);
+ String mgdLocation =
wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX);
+ Database db = createDatabase("hive", dbName, location, mgdLocation);
+
+ Database db2 = client.getDatabase("hive", dbName);
+ assertNotNull("Database's managedLocationUri is expected to be not null"
+ db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not null" +
db2.getLocationUri());
+
+ String tblName = basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.EXTERNAL_TABLE);
+ StringBuilder properties = new StringBuilder();
+ properties.append("EXTERNAL").append("=").append("TRUE");
+ properties.append(";");
+ tProps.put("PROPERTIES", properties.toString());
+ Table tbl = createTableWithCapabilities(tProps);
+
+ setHMSClient("testCreateDatabaseWithLocation", (new String[] {
"HIVEBUCKET2", "EXTREAD", "EXTWRITE"}));
+ Table tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertTrue("External table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname())));
+
+ tblName = "mgd_" + basetblName;
+ tProps.put("DBNAME", dbName);
+ tProps.put("TBLNAME", tblName);
+ tProps.put("TBLTYPE", TableType.MANAGED_TABLE);
+ properties = new StringBuilder();
+ properties.append("transactional=true");
+ properties.append(";");
+ properties.append("transactional_properties=insert_only");
+ tProps.put("PROPERTIES", properties.toString());
+
+ setHMSClient("createTable", new String[] {"HIVEMANAGEDINSERTWRITE",
"HIVEFULLACIDWRITE"});
+ tbl = createTableWithCapabilities(tProps);
+
+ tbl2 = client.getTable(dbName, tblName);
+ assertEquals("Created and retrieved tables do not match:" +
tbl2.getTableName() + ":" + tblName,
+ tbl2.getTableName(), tblName);
+ assertEquals("Database's locationUri is expected to be equal to set
value",
+ Path.getPathWithoutSchemeAndAuthority(new Path(location)),
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getLocationUri())));
+ assertTrue("Managed table location not as expected:actual=" +
db2.getLocationUri(),
+
tbl2.getSd().getLocation().contains(conf.get(MetastoreConf.ConfVars.WAREHOUSE.getVarname())));
+ } catch (Exception e) {
+ fail("testCreateDatabaseWithLocation failed with " + e.getMessage());
+ } finally {
+ silentDropDatabase("dbxm");
+ resetHMSClient();
+ }
+ }
+
+ @Test
+ public void testAlterDatabase() throws Exception {
+ try {
+ resetHMSClient();
+ final String dbName = "dbalter";
+
+ Database db = createDatabase("hive", dbName, null, null);
+ Database db2 = client.getDatabase("hive", dbName);
+ assertNull(db2.getManagedLocationUri());
+ assertNotNull("Database's locationUri is expected to be not
null:actual=" + db2.getLocationUri());
+
+ String mgdLocation =
wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX);
+ db.setManagedLocationUri(mgdLocation);
+ client.alterDatabase(dbName, db);
+ db2 = client.getDatabase("hive", dbName);
+ assertNotNull("Database's managedLocationUri is expected to be not null"
+ db2.getManagedLocationUri());
+ assertEquals("Database's managed location is expected to be equal",
db2.getManagedLocationUri(), mgdLocation);
+
+ String location =
ext_wh.getAbsolutePath().concat(File.separator).concat(dbName).concat(DATABASE_WAREHOUSE_SUFFIX);
+ db.setLocationUri(location);
+ db2 = client.getDatabase("hive", dbName);
+ assertEquals("Database's managed location is expected to be equal",
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getManagedLocationUri())),
+ Path.getPathWithoutSchemeAndAuthority(new Path(mgdLocation)));
+ assertEquals("Database's location is expected to be equal",
+ Path.getPathWithoutSchemeAndAuthority(new
Path(db2.getLocationUri())),
+ Path.getPathWithoutSchemeAndAuthority(new Path(location)));
+ } catch (Exception e) {
+
System.err.println(org.apache.hadoop.util.StringUtils.stringifyException(e));
+ System.err.println("testAlterDatabase() failed.");
+ fail("testAlterDatabase failed:" + e.getMessage());
+ } finally {
+ silentDropDatabase("dbalter");
+ resetHMSClient();
+ }
+ }
+
+ private Database createDatabase(String catName, String dbName, String
location, String managedLocation) throws Exception {
+ if (catName == null)
+ catName = "hive";
+
+ DatabaseBuilder builder = new DatabaseBuilder()
+ .setName(dbName)
+ .setCatalogName(catName);
+
+ if (location != null)
+ builder.setLocation(location);
+ if (managedLocation != null)
+ builder.setManagedLocation(managedLocation);
+ return builder.create(client, conf);
+ }
+
+ private Table createTableWithCapabilities(Map<String, Object> props) throws
Exception {
+ String catalog = (String)props.getOrDefault("CATALOG",
MetaStoreUtils.getDefaultCatalog(conf));
+ String dbName = (String)props.getOrDefault("DBNAME", "simpdb");
+ String tblName = (String)props.getOrDefault("TBLNAME", "test_table");
+ TableType type = (TableType)props.getOrDefault("TBLTYPE",
TableType.MANAGED_TABLE);
+ int buckets = ((Integer)props.getOrDefault("BUCKETS", -1)).intValue();
+ String properties = (String)props.getOrDefault("PROPERTIES", "");
+ String location = (String)(props.get("LOCATION"));
+ boolean dropDb = ((Boolean)props.getOrDefault("DROPDB",
Boolean.TRUE)).booleanValue();
+ int partitionCount = ((Integer)props.getOrDefault("PARTITIONS",
0)).intValue();
+
+ final String typeName = "Person";
+
+ if (type == TableType.EXTERNAL_TABLE) {
+ if (!properties.contains("EXTERNAL=TRUE")) {
+ properties.concat(";EXTERNAL=TRUE;");
+ }
+ }
+
+ Map<String,String> table_params = new HashMap();
+ if (properties.length() > 0) {
+ String[] propArray = properties.split(";");
+ for (String prop : propArray) {
+ String[] keyValue = prop.split("=");
+ table_params.put(keyValue[0], keyValue[1]);
+ }
+ }
+
+ Catalog cat = null;
+ try {
+ cat = client.getCatalog(catalog);
+ } catch (NoSuchObjectException e) {
+ LOG.debug("Catalog does not exist, creating a new one");
+ try {
+ if (cat == null) {
+ cat = new Catalog();
+ cat.setName(catalog.toLowerCase());
+ Warehouse wh = new Warehouse(conf);
+ cat.setLocationUri(wh.getWhRootExternal().toString() +
File.separator + catalog);
+ cat.setDescription("Non-hive catalog");
+ client.createCatalog(cat);
+ LOG.debug("Catalog " + catalog + " created");
+ }
+ } catch (Exception ce) {
+ LOG.warn("Catalog " + catalog + " could not be created");
+ }
+ } catch (Exception e) {
+ LOG.error("Creation of a new catalog failed, aborting test");
+ throw e;
+ }
+
+ try {
+ client.dropTable(dbName, tblName);
+ } catch (Exception e) {
+ LOG.info("Drop table failed for " + dbName + "." + tblName);
+ }
+
+ try {
+ if (dropDb)
+ silentDropDatabase(dbName);
+ } catch (Exception e) {
+ LOG.info("Drop database failed for " + dbName);
+ }
+
+ if (dropDb)
+ new DatabaseBuilder()
+ .setName(dbName)
+ .setCatalogName(catalog)
+ .create(client, conf);
+
+ try {
+ client.dropType(typeName);
+ } catch (Exception e) {
+ LOG.info("Drop type failed for " + typeName);
+ }
+
+ Type typ1 = new Type();
+ typ1.setName(typeName);
+ typ1.setFields(new ArrayList<>(2));
+ typ1.getFields().add(
+ new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ typ1.getFields().add(
+ new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ client.createType(typ1);
+
+ TableBuilder builder = new TableBuilder()
+ .setCatName(catalog)
+ .setDbName(dbName)
+ .setTableName(tblName)
+ .setCols(typ1.getFields())
+ .setType(type.name())
+ .setLocation(location)
+ .setNumBuckets(buckets)
+ .setTableParams(table_params)
+ .addBucketCol("name")
+ .addStorageDescriptorParam("test_param_1", "Use this for comments
etc");
+
+ if (location != null)
+ builder.setLocation(location);
+
+ if (buckets > 0)
+ builder.setNumBuckets(buckets).addBucketCol("name");
+
+ if (partitionCount > 0) {
+ builder.addPartCol("partcol", "string");
+ }
+
+ if (type == TableType.MANAGED_TABLE) {
+ if (properties.contains("transactional=true") &&
!properties.contains("transactional_properties=insert_only")) {
+
builder.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
+
builder.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
+ builder.setSerdeLib("org.apache.hadoop.hive.ql.io.orc.OrcSerde");
+ builder.addStorageDescriptorParam("inputFormat",
"org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
+ builder.addStorageDescriptorParam("outputFormat",
"org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
+ }
+ }
+
+ Table tbl = builder.create(client, conf);
+ LOG.info("Table " + tbl.getTableName() + " created:type=" +
tbl.getTableType());
+
+ if (partitionCount > 0) {
+ List<Partition> partitions = new ArrayList<>();
+
+ List<List<String>> partValues = new ArrayList<>();
+ for (int i = 1; i <= partitionCount; i++) {
+ partValues.add(Lists.newArrayList("" + i));
+ }
+
+ for(List<String> vals : partValues){
+ addPartition(client, tbl, vals);
+ }
+ }
+
+ if (isThriftClient) {
+ // the createTable() above does not update the location in the 'tbl'
+ // object when the client is a thrift client and the code below relies
+ // on the location being present in the 'tbl' object - so get the table
+ // from the metastore
+ tbl = client.getTable(catalog, dbName, tblName);
+ LOG.info("Fetched Table " + tbl.getTableName() + " created:type=" +
tbl.getTableType());
+ }
+ return tbl;
+ }
+
+ private void addPartition(IMetaStoreClient client, Table table, List<String>
values)
+ throws TException {
+ PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
+ values.forEach(val -> partitionBuilder.addValue(val));
+ Partition p = partitionBuilder.build(conf);
+ p.getSd().setNumBuckets(-1); // PartitionBuilder uses 0 as default whereas
we use -1 for Tables.
+ client.add_partition(p);
+ }
+}
+
+
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestWarehouseExternalDir.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestWarehouseExternalDir.java
index e87daf8..120ab53 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestWarehouseExternalDir.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestWarehouseExternalDir.java
@@ -166,7 +166,7 @@ public class TestWarehouseExternalDir {
stmt.execute("create table twed_db1.tab1(c1 string, c2 string)");
tab = db.getTable("twed_db1", "tab1");
- checkTableLocation(tab, new Path(new Path(whRootManagedPath,
"twed_db1.db"), "tab1"));
+ checkTableLocation(tab, new Path(new Path(whRootExternalPath,
"twed_db1.db"), "tab1"));
}
}
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
index 38580c1..2284fca 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/BaseReplicationScenariosAcidTables.java
@@ -83,6 +83,7 @@ public class BaseReplicationScenariosAcidTables {
put("mapred.input.dir.recursive", "true");
put("hive.metastore.disallow.incompatible.col.type.changes", "false");
put("hive.in.repl.test", "true");
+ put("metastore.warehouse.tenant.colocation", "true");
}};
acidEnableConf.putAll(overrides);
diff --git
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
index 2854045..1e25598 100644
---
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
+++
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java
@@ -72,6 +72,7 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
conf = new HiveConf(clazz);
conf.set("dfs.client.use.datanode.hostname", "true");
+ conf.set("metastore.warehouse.tenant.colocation", "true");
conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() +
".hosts", "*");
MiniDFSCluster miniDFSCluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
@@ -85,6 +86,7 @@ public class TestReplicationScenariosAcidTables extends
BaseReplicationScenarios
put("hive.mapred.mode", "nonstrict");
put("mapred.input.dir.recursive", "true");
put("hive.metastore.disallow.incompatible.col.type.changes", "false");
+ put("metastore.warehouse.tenant.colocation", "true");
put("hive.in.repl.test", "true");
}};
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index daf9edf..23f74ba 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -177,6 +177,7 @@ KW_DISABLE: 'DISABLE' | 'DISABLED';
KW_EXECUTED: 'EXECUTED';
KW_EXECUTE: 'EXECUTE';
KW_LOCATION: 'LOCATION';
+KW_MANAGEDLOCATION: 'MANAGEDLOCATION';
KW_TABLESAMPLE: 'TABLESAMPLE';
KW_BUCKET: 'BUCKET';
KW_OUT: 'OUT';
diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 3dcd60e..2b865f3 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -341,10 +341,12 @@ TOK_SHOWDBLOCKS;
TOK_DESCDATABASE;
TOK_DATABASEPROPERTIES;
TOK_DATABASELOCATION;
+TOK_DATABASE_MANAGEDLOCATION;
TOK_DBPROPLIST;
TOK_ALTERDATABASE_PROPERTIES;
TOK_ALTERDATABASE_OWNER;
TOK_ALTERDATABASE_LOCATION;
+TOK_ALTERDATABASE_MANAGEDLOCATION;
TOK_DBNAME;
TOK_TABNAME;
TOK_TABSRC;
@@ -585,6 +587,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
xlateMap.put("KW_INPUTFORMAT", "INPUTFORMAT");
xlateMap.put("KW_OUTPUTFORMAT", "OUTPUTFORMAT");
xlateMap.put("KW_LOCATION", "LOCATION");
+ xlateMap.put("KW_MANAGEDLOCATION", "MANAGEDLOCATION");
xlateMap.put("KW_TABLESAMPLE", "TABLESAMPLE");
xlateMap.put("KW_BUCKET", "BUCKET");
xlateMap.put("KW_OUT", "OUT");
@@ -1089,8 +1092,9 @@ createDatabaseStatement
name=identifier
databaseComment?
dbLocation?
+ dbManagedLocation?
(KW_WITH KW_DBPROPERTIES dbprops=dbProperties)?
- -> ^(TOK_CREATEDATABASE $name ifNotExists? dbLocation? databaseComment?
$dbprops?)
+ -> ^(TOK_CREATEDATABASE $name ifNotExists? dbLocation? dbManagedLocation?
databaseComment? $dbprops?)
;
dbLocation
@@ -1100,6 +1104,13 @@ dbLocation
KW_LOCATION locn=StringLiteral -> ^(TOK_DATABASELOCATION $locn)
;
+dbManagedLocation
+@init { pushMsg("database managed location specification", state); }
+@after { popMsg(state); }
+ :
+ KW_MANAGEDLOCATION locn=StringLiteral -> ^(TOK_DATABASE_MANAGEDLOCATION
$locn)
+ ;
+
dbProperties
@init { pushMsg("dbproperties", state); }
@after { popMsg(state); }
@@ -1296,6 +1307,15 @@ alterDatabaseSuffixSetLocation
@after { popMsg(state); }
: dbName=identifier KW_SET KW_LOCATION newLocation=StringLiteral
-> ^(TOK_ALTERDATABASE_LOCATION $dbName $newLocation)
+ | dbName=identifier KW_SET KW_MANAGEDLOCATION newLocation=StringLiteral
+ -> ^(TOK_ALTERDATABASE_MANAGEDLOCATION $dbName $newLocation)
+ ;
+
+alterDatabaseSuffixSetManagedLocation
+@init { pushMsg("alter database set managed location", state); }
+@after { popMsg(state); }
+ : dbName=identifier KW_SET KW_MANAGEDLOCATION newLocation=StringLiteral
+ -> ^(TOK_ALTERDATABASE_MANAGEDLOCATION $dbName $newLocation)
;
alterStatementSuffixRename[boolean table]
diff --git
a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
index 262afaa..7c8baa5 100644
--- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
+++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g
@@ -830,7 +830,7 @@ nonReserved
| KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME |
KW_HOUR | KW_IDXPROPERTIES | KW_IGNORE
| KW_INDEX | KW_INDEXES | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT |
KW_ITEMS | KW_JAR | KW_JOINCOST | KW_KILL
| KW_KEYS | KW_KEY_TYPE | KW_LAST | KW_LIMIT | KW_OFFSET | KW_LINES |
KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG
- | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA | KW_MINUTE | KW_MONTH |
KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_NULLS | KW_OFFLINE
+ | KW_MANAGEDLOCATION | KW_MAPJOIN | KW_MATERIALIZED | KW_METADATA |
KW_MINUTE | KW_MONTH | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_NULLS | KW_OFFLINE
| KW_OPTION | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER
| KW_PARTITIONED | KW_PARTITIONS | KW_PLUS
| KW_PRINCIPALS | KW_PROTECTION | KW_PURGE | KW_QUERY | KW_QUARTER |
KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER
| KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_REPLICATION |
KW_RESTRICT | KW_REWRITE
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationDesc.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationDesc.java
index ddb3206..16d28f2 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationDesc.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationDesc.java
@@ -29,15 +29,31 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
public class AlterDatabaseSetLocationDesc extends AbstractAlterDatabaseDesc {
private static final long serialVersionUID = 1L;
- private final String location;
+ private String location = null;
+ private String managedLocation = null;
public AlterDatabaseSetLocationDesc(String databaseName, String location) {
+ this(databaseName, location,null);
+ }
+
+ public AlterDatabaseSetLocationDesc(String databaseName, String location,
String managedLocation) {
super(databaseName, null);
- this.location = location;
+ if (location != null) {
+ this.location = location;
+ }
+
+ if (managedLocation != null) {
+ this.managedLocation = managedLocation;
+ }
}
@Explain(displayName="location")
public String getLocation() {
return location;
}
+
+ @Explain(displayName="managedLocation")
+ public String getManagedLocation() {
+ return managedLocation;
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
index 748236c..0c4ade3 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetLocationOperation.java
@@ -41,15 +41,32 @@ public class AlterDatabaseSetLocationOperation extends
AbstractAlterDatabaseOper
protected void doAlteration(Database database, Map<String, String> params)
throws HiveException {
try {
String newLocation = desc.getLocation();
- URI locationURI = new URI(newLocation);
- if (!locationURI.isAbsolute() ||
StringUtils.isBlank(locationURI.getScheme())) {
- throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
+ if (newLocation != null) {
+ URI locationURI = new URI(newLocation);
+ if (!locationURI.isAbsolute() ||
StringUtils.isBlank(locationURI.getScheme())) {
+ throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
+ }
+
+ if (newLocation.equals(database.getLocationUri())) {
+ LOG.info("AlterDatabase skipped. No change in location.");
+ } else {
+ database.setLocationUri(newLocation);
+ }
+ return;
}
- if (newLocation.equals(database.getLocationUri())) {
- LOG.info("AlterDatabase skipped. No change in location.");
- } else {
- database.setLocationUri(newLocation);
+ newLocation = desc.getManagedLocation();
+ if (newLocation != null) {
+ URI locationURI = new URI(newLocation);
+ if (!locationURI.isAbsolute() ||
StringUtils.isBlank(locationURI.getScheme())) {
+ throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation);
+ }
+
+ if (newLocation.equals(database.getManagedLocationUri())) {
+ LOG.info("AlterDatabase skipped. No change in location.");
+ } else {
+ database.setManagedLocationUri(newLocation);
+ }
}
} catch (URISyntaxException e) {
throw new HiveException(e);
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetManagedLocationAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetManagedLocationAnalyzer.java
new file mode 100644
index 0000000..a0e92eb
--- /dev/null
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/alter/location/AlterDatabaseSetManagedLocationAnalyzer.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.ddl.database.alter.location;
+
+import org.apache.hadoop.hive.ql.QueryState;
+import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
+import
org.apache.hadoop.hive.ql.ddl.database.alter.AbstractAlterDatabaseAnalyzer;
+import org.apache.hadoop.hive.ql.parse.ASTNode;
+import org.apache.hadoop.hive.ql.parse.HiveParser;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+
+/**
+ * Analyzer for database set location commands.
+ */
+@DDLType(types = HiveParser.TOK_ALTERDATABASE_MANAGEDLOCATION)
+public class AlterDatabaseSetManagedLocationAnalyzer extends
AbstractAlterDatabaseAnalyzer {
+ public AlterDatabaseSetManagedLocationAnalyzer(QueryState queryState) throws
SemanticException {
+ super(queryState);
+ }
+
+ @Override
+ public void analyzeInternal(ASTNode root) throws SemanticException {
+ String databaseName = getUnescapedName((ASTNode) root.getChild(0));
+ String newLocation = unescapeSQLString(root.getChild(1).getText());
+
+ outputs.add(toWriteEntity(newLocation));
+
+ AlterDatabaseSetLocationDesc desc = new
AlterDatabaseSetLocationDesc(databaseName, null, newLocation);
+ addAlterDatabaseDesc(desc);
+ }
+}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseAnalyzer.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseAnalyzer.java
index 431689e..f87dd2a 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseAnalyzer.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseAnalyzer.java
@@ -46,7 +46,7 @@ public class CreateDatabaseAnalyzer extends
BaseSemanticAnalyzer {
boolean ifNotExists = false;
String comment = null;
- String locationUri = null;
+ String locationUri = null, managedLocationUri = null;
Map<String, String> props = null;
for (int i = 1; i < root.getChildCount(); i++) {
@@ -65,15 +65,21 @@ public class CreateDatabaseAnalyzer extends
BaseSemanticAnalyzer {
locationUri = unescapeSQLString(childNode.getChild(0).getText());
outputs.add(toWriteEntity(locationUri));
break;
+ case HiveParser.TOK_DATABASE_MANAGEDLOCATION:
+ managedLocationUri =
unescapeSQLString(childNode.getChild(0).getText());
+ outputs.add(toWriteEntity(managedLocationUri));
+ break;
default:
throw new SemanticException("Unrecognized token in CREATE DATABASE
statement");
}
}
- CreateDatabaseDesc desc = new CreateDatabaseDesc(databaseName, comment,
locationUri, ifNotExists, props);
+ CreateDatabaseDesc desc = new CreateDatabaseDesc(databaseName, comment,
locationUri, ifNotExists, props, managedLocationUri);
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
desc)));
Database database = new Database(databaseName, comment, locationUri,
props);
+ if (managedLocationUri != null)
+ database.setManagedLocationUri(managedLocationUri);
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseDesc.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseDesc.java
index f0d283f..f3959f0 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseDesc.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseDesc.java
@@ -35,16 +35,23 @@ public class CreateDatabaseDesc implements DDLDesc,
Serializable {
private final String databaseName;
private final String comment;
private final String locationUri;
+ private final String managedLocationUri;
private final boolean ifNotExists;
private final Map<String, String> dbProperties;
public CreateDatabaseDesc(String databaseName, String comment, String
locationUri, boolean ifNotExists,
Map<String, String> dbProperties) {
+ this(databaseName, comment, locationUri, ifNotExists, dbProperties, null);
+ }
+
+ public CreateDatabaseDesc(String databaseName, String comment, String
locationUri, boolean ifNotExists,
+ Map<String, String> dbProperties, String managedLocationUri) {
this.databaseName = databaseName;
this.comment = comment;
this.locationUri = locationUri;
this.ifNotExists = ifNotExists;
this.dbProperties = dbProperties;
+ this.managedLocationUri = managedLocationUri;
}
@Explain(displayName="if not exists", displayOnlyOnTrue = true)
@@ -70,4 +77,9 @@ public class CreateDatabaseDesc implements DDLDesc,
Serializable {
public String getLocationUri() {
return locationUri;
}
+
+ @Explain(displayName="managedLocationUri")
+ public String getManagedLocationUri() {
+ return managedLocationUri;
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java
index fc7efe3..444db0a 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/create/CreateDatabaseOperation.java
@@ -49,9 +49,13 @@ public class CreateDatabaseOperation extends
DDLOperation<CreateDatabaseDesc> {
database.setParameters(desc.getDatabaseProperties());
database.setOwnerName(SessionState.getUserFromAuthenticator());
database.setOwnerType(PrincipalType.USER);
+ if (desc.getManagedLocationUri() != null)
+ database.setManagedLocationUri(desc.getManagedLocationUri());
try {
makeLocationQualified(database);
+ if
(database.getLocationUri().equalsIgnoreCase(database.getManagedLocationUri()))
+ throw new HiveException("Managed and external locations for database
cannot be the same");
context.getDb().createDatabase(database, desc.getIfNotExists());
} catch (AlreadyExistsException ex) {
//it would be better if AlreadyExistsException had an errorCode field....
@@ -65,11 +69,22 @@ public class CreateDatabaseOperation extends
DDLOperation<CreateDatabaseDesc> {
if (database.isSetLocationUri()) {
database.setLocationUri(Utilities.getQualifiedPath(context.getConf(),
new Path(database.getLocationUri())));
} else {
- // Location is not set we utilize METASTOREWAREHOUSE together with
database name
- Path path = new Path(MetastoreConf.getVar(context.getConf(),
MetastoreConf.ConfVars.WAREHOUSE),
- database.getName().toLowerCase() + DATABASE_PATH_SUFFIX);
+ // Location is not set we utilize WAREHOUSE_EXTERNAL together with
database name
+ String rootDir = MetastoreConf.getVar(context.getConf(),
MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL);
+ if (rootDir == null || rootDir.trim().isEmpty()) {
+ // Fallback plan
+ LOG.warn(MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL.getVarname() + " is
not set, falling back to " +
+ MetastoreConf.ConfVars.WAREHOUSE.getVarname() + ". This could
cause external tables to use to managed tablespace.");
+ rootDir = MetastoreConf.getVar(context.getConf(),
MetastoreConf.ConfVars.WAREHOUSE);
+ }
+ Path path = new Path(rootDir, database.getName().toLowerCase() +
DATABASE_PATH_SUFFIX);
String qualifiedPath = Utilities.getQualifiedPath(context.getConf(),
path);
database.setLocationUri(qualifiedPath);
}
+
+ if (database.isSetManagedLocationUri()) {
+ // TODO should we enforce a location check here?
+
database.setManagedLocationUri(Utilities.getQualifiedPath(context.getConf(),
new Path(database.getManagedLocationUri())));
+ }
}
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseDesc.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseDesc.java
index 36db036..be0e5a9 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseDesc.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseDesc.java
@@ -33,7 +33,7 @@ public class DescDatabaseDesc implements DDLDesc,
Serializable {
private static final long serialVersionUID = 1L;
public static final String DESC_DATABASE_SCHEMA =
-
"db_name,comment,location,owner_name,owner_type,parameters#string:string:string:string:string:string";
+
"db_name,comment,location,managedLocation,owner_name,owner_type,parameters#string:string:string:string:string:string:string";
private final String resFile;
private final String dbName;
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseOperation.java
index 9b68756..406397d 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseOperation.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/desc/DescDatabaseOperation.java
@@ -58,7 +58,7 @@ public class DescDatabaseOperation extends
DDLOperation<DescDatabaseDesc> {
}
context.getFormatter().showDatabaseDescription(outStream,
database.getName(), database.getDescription(),
- location, database.getOwnerName(), database.getOwnerType(), params);
+ location, database.getManagedLocationUri(), database.getOwnerName(),
database.getOwnerType(), params);
} catch (Exception e) {
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
index f51c0fc..900642e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
@@ -4661,4 +4661,9 @@ public final class Utilities {
throw new SemanticException(e);
}
}
+
+ public static boolean arePathsEqualOrWithin(Path p1, Path p2) {
+ return ((p1.toString().toLowerCase().indexOf(p2.toString().toLowerCase())
> -1) ||
+ (p2.toString().toLowerCase().indexOf(p1.toString().toLowerCase()) >
-1)) ? true : false;
+ }
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
index 8d57604..45c69e7 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java
@@ -616,10 +616,13 @@ public class JsonMetaDataFormatter implements
MetaDataFormatter {
*/
@Override
public void showDatabaseDescription(DataOutputStream out, String database,
String comment,
- String location, String ownerName, PrincipalType ownerType, Map<String,
String> params)
+ String location, String managedLocation, String ownerName, PrincipalType
ownerType, Map<String, String> params)
throws HiveException {
MapBuilder builder = MapBuilder.create().put("database",
database).put("comment", comment)
.put("location", location);
+ if (null != managedLocation) {
+ builder.put("managedLocation", managedLocation);
+ }
if (null != ownerName) {
builder.put("owner", ownerName);
}
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
index fcce9ec..7b83a55 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java
@@ -104,7 +104,7 @@ public interface MetaDataFormatter {
* Describe a database.
*/
void showDatabaseDescription(DataOutputStream out, String database, String
comment, String location,
- String ownerName, PrincipalType ownerType, Map<String, String> params)
+ String managedLocation, String ownerName, PrincipalType ownerType,
Map<String, String> params)
throws HiveException;
void showResourcePlans(DataOutputStream out, List<WMResourcePlan>
resourcePlans)
diff --git
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
index d0d40af..4700573 100644
---
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
+++
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java
@@ -622,7 +622,7 @@ class TextMetaDataFormatter implements MetaDataFormatter {
*/
@Override
public void showDatabaseDescription(DataOutputStream outStream, String
database, String comment,
- String location, String ownerName, PrincipalType ownerType, Map<String,
String> params)
+ String location, String managedLocation, String ownerName, PrincipalType
ownerType, Map<String, String> params)
throws HiveException {
try {
outStream.write(database.getBytes("UTF-8"));
@@ -635,6 +635,10 @@ class TextMetaDataFormatter implements MetaDataFormatter {
outStream.write(location.getBytes("UTF-8"));
}
outStream.write(separator);
+ if (managedLocation != null) {
+ outStream.write(managedLocation.getBytes("UTF-8"));
+ }
+ outStream.write(separator);
if (ownerName != null) {
outStream.write(ownerName.getBytes("UTF-8"));
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index 1cb78fb..c75829c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -150,7 +150,8 @@ public enum HiveOperation {
new Privilege[]{Privilege.ALTER_DATA, Privilege.CREATE}, true, false),
ALTERDATABASE("ALTERDATABASE", HiveParser.TOK_ALTERDATABASE_PROPERTIES,
null, null),
ALTERDATABASE_OWNER("ALTERDATABASE_OWNER",
HiveParser.TOK_ALTERDATABASE_OWNER, null, null),
- ALTERDATABASE_LOCATION("ALTERDATABASE_LOCATION",
HiveParser.TOK_ALTERDATABASE_LOCATION,
+ ALTERDATABASE_LOCATION("ALTERDATABASE_LOCATION",
+ new int[] {HiveParser.TOK_ALTERDATABASE_LOCATION,
HiveParser.TOK_ALTERDATABASE_MANAGEDLOCATION},
new Privilege[]{Privilege.ALTER_DATA}, null),
DESCDATABASE("DESCDATABASE", HiveParser.TOK_DESCDATABASE, null, null),
ALTERTABLE_MERGEFILES("ALTER_TABLE_MERGE",
HiveParser.TOK_ALTERTABLE_MERGEFILES, new Privilege[] {Privilege.SELECT},
diff --git a/ql/src/test/results/clientpositive/alter_change_db_location.q.out
b/ql/src/test/results/clientpositive/alter_change_db_location.q.out
index 93e4678..4c21153 100644
--- a/ql/src/test/results/clientpositive/alter_change_db_location.q.out
+++ b/ql/src/test/results/clientpositive/alter_change_db_location.q.out
@@ -11,7 +11,7 @@ PREHOOK: Input: database:newdb
POSTHOOK: query: describe database extended newDB
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:newdb
-newdb location/in/test hive_test_user USER
+newdb location/in/test hive_test_user USER
PREHOOK: query: use newDB
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:newdb
diff --git a/ql/src/test/results/clientpositive/alter_db_owner.q.out
b/ql/src/test/results/clientpositive/alter_db_owner.q.out
index de4ba64..a6118f2 100644
--- a/ql/src/test/results/clientpositive/alter_db_owner.q.out
+++ b/ql/src/test/results/clientpositive/alter_db_owner.q.out
@@ -10,7 +10,7 @@ PREHOOK: Input: database:db_alter_onr
POSTHOOK: query: describe database db_alter_onr
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db_alter_onr
-db_alter_onr location/in/test hive_test_user USER
+db_alter_onr location/in/test hive_test_user USER
#### A masked pattern was here ####
PREHOOK: type: ALTERDATABASE_OWNER
PREHOOK: Output: database:db_alter_onr
@@ -40,7 +40,7 @@ PREHOOK: Input: database:db_alter_onr
POSTHOOK: query: describe database db_alter_onr
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db_alter_onr
-db_alter_onr location/in/test user1 USER
+db_alter_onr location/in/test user1 USER
#### A masked pattern was here ####
PREHOOK: type: ALTERDATABASE_OWNER
PREHOOK: Output: database:db_alter_onr
@@ -53,4 +53,4 @@ PREHOOK: Input: database:db_alter_onr
POSTHOOK: query: describe database db_alter_onr
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db_alter_onr
-db_alter_onr location/in/test role1 ROLE
+db_alter_onr location/in/test role1 ROLE
diff --git
a/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out
b/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out
index 455ec42..bc48051 100644
--- a/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out
+++ b/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out
@@ -28,7 +28,7 @@ PREHOOK: Input: database:testdb
POSTHOOK: query: desc database testdb
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:testdb
-testdb location/in/test testrole ROLE
+testdb location/in/test testrole ROLE
PREHOOK: query: use testdb
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:testdb
diff --git a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
index 768ca7a..bc0c88c 100644
--- a/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/beeline/escape_comments.q.out
@@ -42,14 +42,14 @@ PREHOOK: Input: database:escape_comments_db
POSTHOOK: query: describe database extended escape_comments_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:escape_comments_db
-escape_comments_db a\nb location/in/test user USER
+escape_comments_db a\nb location/in/test user USER
PREHOOK: query: describe database escape_comments_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:escape_comments_db
POSTHOOK: query: describe database escape_comments_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:escape_comments_db
-escape_comments_db a\nb location/in/test user USER
+escape_comments_db a\nb location/in/test user USER
PREHOOK: query: show create table escape_comments_tbl1
PREHOOK: type: SHOW_CREATETABLE
PREHOOK: Input: escape_comments_db@escape_comments_tbl1
diff --git a/ql/src/test/results/clientpositive/database_location.q.out
b/ql/src/test/results/clientpositive/database_location.q.out
index 3d00e09..f056707 100644
--- a/ql/src/test/results/clientpositive/database_location.q.out
+++ b/ql/src/test/results/clientpositive/database_location.q.out
@@ -10,7 +10,7 @@ PREHOOK: Input: database:db1
POSTHOOK: query: DESCRIBE DATABASE EXTENDED db1
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db1
-db1 location/in/test hive_test_user USER
+db1 location/in/test hive_test_user USER
PREHOOK: query: USE db1
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:db1
@@ -85,7 +85,7 @@ PREHOOK: Input: database:db2
POSTHOOK: query: DESCRIBE DATABASE EXTENDED db2
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db2
-db2 database 2 location/in/test hive_test_user USER
+db2 database 2 location/in/test hive_test_user USER
PREHOOK: query: USE db2
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:db2
diff --git a/ql/src/test/results/clientpositive/database_properties.q.out
b/ql/src/test/results/clientpositive/database_properties.q.out
index c401a6e..5b0c857 100644
--- a/ql/src/test/results/clientpositive/database_properties.q.out
+++ b/ql/src/test/results/clientpositive/database_properties.q.out
@@ -28,14 +28,14 @@ PREHOOK: Input: database:db2
POSTHOOK: query: describe database db2
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db2
-db2 location/in/test hive_test_user USER
+db2 location/in/test hive_test_user USER
PREHOOK: query: describe database extended db2
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:db2
POSTHOOK: query: describe database extended db2
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db2
-db2 location/in/test hive_test_user USER
{hive.warehouse.dir=/user/hive/warehouse,
mapred.jobtracker.url=http://my.jobtracker.com:53000,
mapred.scratch.dir=hdfs://### HDFS PATH ###}
+db2 location/in/test hive_test_user USER
{hive.warehouse.dir=/user/hive/warehouse,
mapred.jobtracker.url=http://my.jobtracker.com:53000,
mapred.scratch.dir=hdfs://### HDFS PATH ###}
PREHOOK: query: alter database db2 set dbproperties (
'new.property' = 'some new props',
'hive.warehouse.dir' = 'new/warehouse/dir')
@@ -52,4 +52,4 @@ PREHOOK: Input: database:db2
POSTHOOK: query: describe database extended db2
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:db2
-db2 location/in/test hive_test_user USER
{hive.warehouse.dir=new/warehouse/dir,
mapred.jobtracker.url=http://my.jobtracker.com:53000,
mapred.scratch.dir=hdfs://### HDFS PATH ###, new.property=some new props}
+db2 location/in/test hive_test_user USER
{hive.warehouse.dir=new/warehouse/dir,
mapred.jobtracker.url=http://my.jobtracker.com:53000,
mapred.scratch.dir=hdfs://### HDFS PATH ###, new.property=some new props}
diff --git a/ql/src/test/results/clientpositive/db_ddl_explain.q.out
b/ql/src/test/results/clientpositive/db_ddl_explain.q.out
index 8cf5f9c..ae8982c 100644
--- a/ql/src/test/results/clientpositive/db_ddl_explain.q.out
+++ b/ql/src/test/results/clientpositive/db_ddl_explain.q.out
@@ -69,7 +69,7 @@ PREHOOK: Input: database:d
POSTHOOK: query: DESCRIBE DATABASE d
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:d
-d location/in/test hive_test_user USER
+d location/in/test hive_test_user USER
PREHOOK: query: EXPLAIN ALTER DATABASE d SET dbproperties('test'='yesthisis')
PREHOOK: type: ALTERDATABASE
PREHOOK: Output: database:d
diff --git a/ql/src/test/results/clientpositive/describe_database.q.out
b/ql/src/test/results/clientpositive/describe_database.q.out
index 3394d5b..8d1e44a 100644
--- a/ql/src/test/results/clientpositive/describe_database.q.out
+++ b/ql/src/test/results/clientpositive/describe_database.q.out
@@ -10,14 +10,14 @@ PREHOOK: Input: database:test_db
POSTHOOK: query: desc database extended test_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:test_db
-test_db location/in/test hive_test_user USER
{key1=value1, key2=value2}
+test_db location/in/test hive_test_user USER
{key1=value1, key2=value2}
PREHOOK: query: desc schema extended test_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:test_db
POSTHOOK: query: desc schema extended test_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:test_db
-test_db location/in/test hive_test_user USER
{key1=value1, key2=value2}
+test_db location/in/test hive_test_user USER
{key1=value1, key2=value2}
PREHOOK: query: drop database test_db
PREHOOK: type: DROPDATABASE
PREHOOK: Input: database:test_db
diff --git a/ql/src/test/results/clientpositive/escape_comments.q.out
b/ql/src/test/results/clientpositive/escape_comments.q.out
index f8b0713..a48ba80 100644
--- a/ql/src/test/results/clientpositive/escape_comments.q.out
+++ b/ql/src/test/results/clientpositive/escape_comments.q.out
@@ -42,14 +42,14 @@ PREHOOK: Input: database:escape_comments_db
POSTHOOK: query: describe database extended escape_comments_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:escape_comments_db
-escape_comments_db a\nb location/in/test hive_test_user USER
+escape_comments_db a\nb location/in/test hive_test_user
USER
PREHOOK: query: describe database escape_comments_db
PREHOOK: type: DESCDATABASE
PREHOOK: Input: database:escape_comments_db
POSTHOOK: query: describe database escape_comments_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:escape_comments_db
-escape_comments_db a\nb location/in/test hive_test_user USER
+escape_comments_db a\nb location/in/test hive_test_user
USER
PREHOOK: query: show create table escape_comments_tbl1
PREHOOK: type: SHOW_CREATETABLE
PREHOOK: Input: escape_comments_db@escape_comments_tbl1
diff --git a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
index 981fa86..dad999a 100644
--- a/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainanalyze_3.q.out
@@ -127,7 +127,7 @@ PREHOOK: Input: database:newdb
POSTHOOK: query: describe database extended newDB
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:newdb
-newdb location/in/test hive_test_user USER
+newdb location/in/test hive_test_user USER
PREHOOK: query: use newDB
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:newdb
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index d4374f0..5218c90 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -145,7 +145,7 @@ PREHOOK: Input: database:newdb
POSTHOOK: query: describe database extended newDB
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:newdb
-newdb location/in/test hive_test_user USER
+newdb location/in/test hive_test_user USER
PREHOOK: query: explain use newDB
PREHOOK: type: SWITCHDATABASE
PREHOOK: Input: database:newdb
diff --git a/ql/src/test/results/clientpositive/unicode_comments.q.out
b/ql/src/test/results/clientpositive/unicode_comments.q.out
index dbbc100..94b452f 100644
--- a/ql/src/test/results/clientpositive/unicode_comments.q.out
+++ b/ql/src/test/results/clientpositive/unicode_comments.q.out
@@ -41,7 +41,7 @@ PREHOOK: Input: database:unicode_comments_db
POSTHOOK: query: describe database extended unicode_comments_db
POSTHOOK: type: DESCDATABASE
POSTHOOK: Input: database:unicode_comments_db
-unicode_comments_db 数据库 location/in/test hive_test_user USER
+unicode_comments_db 数据库 location/in/test hive_test_user
USER
PREHOOK: query: show create table unicode_comments_tbl1
PREHOOK: type: SHOW_CREATETABLE
PREHOOK: Input: unicode_comments_db@unicode_comments_tbl1
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
index f68fda9..5981252 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
@@ -47,6 +47,7 @@ import org.slf4j.LoggerFactory;
private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC
= new org.apache.thrift.protocol.TField("ownerType",
org.apache.thrift.protocol.TType.I32, (short)7);
private static final org.apache.thrift.protocol.TField
CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName",
org.apache.thrift.protocol.TType.STRING, (short)8);
private static final org.apache.thrift.protocol.TField
CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime",
org.apache.thrift.protocol.TType.I32, (short)9);
+ private static final org.apache.thrift.protocol.TField
MANAGED_LOCATION_URI_FIELD_DESC = new
org.apache.thrift.protocol.TField("managedLocationUri",
org.apache.thrift.protocol.TType.STRING, (short)10);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes =
new HashMap<Class<? extends IScheme>, SchemeFactory>();
static {
@@ -63,6 +64,7 @@ import org.slf4j.LoggerFactory;
private PrincipalType ownerType; // optional
private String catalogName; // optional
private int createTime; // optional
+ private String managedLocationUri; // optional
/** The set of fields this struct contains, along with convenience methods
for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -78,7 +80,8 @@ import org.slf4j.LoggerFactory;
*/
OWNER_TYPE((short)7, "ownerType"),
CATALOG_NAME((short)8, "catalogName"),
- CREATE_TIME((short)9, "createTime");
+ CREATE_TIME((short)9, "createTime"),
+ MANAGED_LOCATION_URI((short)10, "managedLocationUri");
private static final Map<String, _Fields> byName = new HashMap<String,
_Fields>();
@@ -111,6 +114,8 @@ import org.slf4j.LoggerFactory;
return CATALOG_NAME;
case 9: // CREATE_TIME
return CREATE_TIME;
+ case 10: // MANAGED_LOCATION_URI
+ return MANAGED_LOCATION_URI;
default:
return null;
}
@@ -153,7 +158,7 @@ import org.slf4j.LoggerFactory;
// isset id assignments
private static final int __CREATETIME_ISSET_ID = 0;
private byte __isset_bitfield = 0;
- private static final _Fields optionals[] =
{_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE,_Fields.CATALOG_NAME,_Fields.CREATE_TIME};
+ private static final _Fields optionals[] =
{_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE,_Fields.CATALOG_NAME,_Fields.CREATE_TIME,_Fields.MANAGED_LOCATION_URI};
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData>
metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new
EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -177,6 +182,8 @@ import org.slf4j.LoggerFactory;
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
tmpMap.put(_Fields.CREATE_TIME, new
org.apache.thrift.meta_data.FieldMetaData("createTime",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+ tmpMap.put(_Fields.MANAGED_LOCATION_URI, new
org.apache.thrift.meta_data.FieldMetaData("managedLocationUri",
org.apache.thrift.TFieldRequirementType.OPTIONAL,
+ new
org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Database.class,
metaDataMap);
}
@@ -228,6 +235,9 @@ import org.slf4j.LoggerFactory;
this.catalogName = other.catalogName;
}
this.createTime = other.createTime;
+ if (other.isSetManagedLocationUri()) {
+ this.managedLocationUri = other.managedLocationUri;
+ }
}
public Database deepCopy() {
@@ -246,6 +256,7 @@ import org.slf4j.LoggerFactory;
this.catalogName = null;
setCreateTimeIsSet(false);
this.createTime = 0;
+ this.managedLocationUri = null;
}
public String getName() {
@@ -473,6 +484,29 @@ import org.slf4j.LoggerFactory;
__isset_bitfield = EncodingUtils.setBit(__isset_bitfield,
__CREATETIME_ISSET_ID, value);
}
+ public String getManagedLocationUri() {
+ return this.managedLocationUri;
+ }
+
+ public void setManagedLocationUri(String managedLocationUri) {
+ this.managedLocationUri = managedLocationUri;
+ }
+
+ public void unsetManagedLocationUri() {
+ this.managedLocationUri = null;
+ }
+
+ /** Returns true if field managedLocationUri is set (has been assigned a
value) and false otherwise */
+ public boolean isSetManagedLocationUri() {
+ return this.managedLocationUri != null;
+ }
+
+ public void setManagedLocationUriIsSet(boolean value) {
+ if (!value) {
+ this.managedLocationUri = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case NAME:
@@ -547,6 +581,14 @@ import org.slf4j.LoggerFactory;
}
break;
+ case MANAGED_LOCATION_URI:
+ if (value == null) {
+ unsetManagedLocationUri();
+ } else {
+ setManagedLocationUri((String)value);
+ }
+ break;
+
}
}
@@ -579,6 +621,9 @@ import org.slf4j.LoggerFactory;
case CREATE_TIME:
return getCreateTime();
+ case MANAGED_LOCATION_URI:
+ return getManagedLocationUri();
+
}
throw new IllegalStateException();
}
@@ -608,6 +653,8 @@ import org.slf4j.LoggerFactory;
return isSetCatalogName();
case CREATE_TIME:
return isSetCreateTime();
+ case MANAGED_LOCATION_URI:
+ return isSetManagedLocationUri();
}
throw new IllegalStateException();
}
@@ -706,6 +753,15 @@ import org.slf4j.LoggerFactory;
return false;
}
+ boolean this_present_managedLocationUri = true &&
this.isSetManagedLocationUri();
+ boolean that_present_managedLocationUri = true &&
that.isSetManagedLocationUri();
+ if (this_present_managedLocationUri || that_present_managedLocationUri) {
+ if (!(this_present_managedLocationUri &&
that_present_managedLocationUri))
+ return false;
+ if (!this.managedLocationUri.equals(that.managedLocationUri))
+ return false;
+ }
+
return true;
}
@@ -758,6 +814,11 @@ import org.slf4j.LoggerFactory;
if (present_createTime)
list.add(createTime);
+ boolean present_managedLocationUri = true && (isSetManagedLocationUri());
+ list.add(present_managedLocationUri);
+ if (present_managedLocationUri)
+ list.add(managedLocationUri);
+
return list.hashCode();
}
@@ -859,6 +920,16 @@ import org.slf4j.LoggerFactory;
return lastComparison;
}
}
+ lastComparison =
Boolean.valueOf(isSetManagedLocationUri()).compareTo(other.isSetManagedLocationUri());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetManagedLocationUri()) {
+ lastComparison =
org.apache.thrift.TBaseHelper.compareTo(this.managedLocationUri,
other.managedLocationUri);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -956,6 +1027,16 @@ import org.slf4j.LoggerFactory;
sb.append(this.createTime);
first = false;
}
+ if (isSetManagedLocationUri()) {
+ if (!first) sb.append(", ");
+ sb.append("managedLocationUri:");
+ if (this.managedLocationUri == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.managedLocationUri);
+ }
+ first = false;
+ }
sb.append(")");
return sb.toString();
}
@@ -1089,6 +1170,14 @@ import org.slf4j.LoggerFactory;
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
break;
+ case 10: // MANAGED_LOCATION_URI
+ if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+ struct.managedLocationUri = iprot.readString();
+ struct.setManagedLocationUriIsSet(true);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot,
schemeField.type);
}
@@ -1163,6 +1252,13 @@ import org.slf4j.LoggerFactory;
oprot.writeI32(struct.createTime);
oprot.writeFieldEnd();
}
+ if (struct.managedLocationUri != null) {
+ if (struct.isSetManagedLocationUri()) {
+ oprot.writeFieldBegin(MANAGED_LOCATION_URI_FIELD_DESC);
+ oprot.writeString(struct.managedLocationUri);
+ oprot.writeFieldEnd();
+ }
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@@ -1208,7 +1304,10 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCreateTime()) {
optionals.set(8);
}
- oprot.writeBitSet(optionals, 9);
+ if (struct.isSetManagedLocationUri()) {
+ optionals.set(9);
+ }
+ oprot.writeBitSet(optionals, 10);
if (struct.isSetName()) {
oprot.writeString(struct.name);
}
@@ -1243,12 +1342,15 @@ import org.slf4j.LoggerFactory;
if (struct.isSetCreateTime()) {
oprot.writeI32(struct.createTime);
}
+ if (struct.isSetManagedLocationUri()) {
+ oprot.writeString(struct.managedLocationUri);
+ }
}
@Override
public void read(org.apache.thrift.protocol.TProtocol prot, Database
struct) throws org.apache.thrift.TException {
TTupleProtocol iprot = (TTupleProtocol) prot;
- BitSet incoming = iprot.readBitSet(9);
+ BitSet incoming = iprot.readBitSet(10);
if (incoming.get(0)) {
struct.name = iprot.readString();
struct.setNameIsSet(true);
@@ -1297,6 +1399,10 @@ import org.slf4j.LoggerFactory;
struct.createTime = iprot.readI32();
struct.setCreateTimeIsSet(true);
}
+ if (incoming.get(9)) {
+ struct.managedLocationUri = iprot.readString();
+ struct.setManagedLocationUriIsSet(true);
+ }
}
}
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 225e11f..db4cfb9 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -5481,6 +5481,10 @@ class Database {
* @var int
*/
public $createTime = null;
+ /**
+ * @var string
+ */
+ public $managedLocationUri = null;
public function __construct($vals=null) {
if (!isset(self::$_TSPEC)) {
@@ -5530,6 +5534,10 @@ class Database {
'var' => 'createTime',
'type' => TType::I32,
),
+ 10 => array(
+ 'var' => 'managedLocationUri',
+ 'type' => TType::STRING,
+ ),
);
}
if (is_array($vals)) {
@@ -5560,6 +5568,9 @@ class Database {
if (isset($vals['createTime'])) {
$this->createTime = $vals['createTime'];
}
+ if (isset($vals['managedLocationUri'])) {
+ $this->managedLocationUri = $vals['managedLocationUri'];
+ }
}
}
@@ -5659,6 +5670,13 @@ class Database {
$xfer += $input->skip($ftype);
}
break;
+ case 10:
+ if ($ftype == TType::STRING) {
+ $xfer += $input->readString($this->managedLocationUri);
+ } else {
+ $xfer += $input->skip($ftype);
+ }
+ break;
default:
$xfer += $input->skip($ftype);
break;
@@ -5733,6 +5751,11 @@ class Database {
$xfer += $output->writeI32($this->createTime);
$xfer += $output->writeFieldEnd();
}
+ if ($this->managedLocationUri !== null) {
+ $xfer += $output->writeFieldBegin('managedLocationUri', TType::STRING,
10);
+ $xfer += $output->writeString($this->managedLocationUri);
+ $xfer += $output->writeFieldEnd();
+ }
$xfer += $output->writeFieldStop();
$xfer += $output->writeStructEnd();
return $xfer;
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 2db435a..cf31379 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -4046,6 +4046,7 @@ class Database:
- ownerType
- catalogName
- createTime
+ - managedLocationUri
"""
thrift_spec = (
@@ -4059,9 +4060,10 @@ class Database:
(7, TType.I32, 'ownerType', None, None, ), # 7
(8, TType.STRING, 'catalogName', None, None, ), # 8
(9, TType.I32, 'createTime', None, None, ), # 9
+ (10, TType.STRING, 'managedLocationUri', None, None, ), # 10
)
- def __init__(self, name=None, description=None, locationUri=None,
parameters=None, privileges=None, ownerName=None, ownerType=None,
catalogName=None, createTime=None,):
+ def __init__(self, name=None, description=None, locationUri=None,
parameters=None, privileges=None, ownerName=None, ownerType=None,
catalogName=None, createTime=None, managedLocationUri=None,):
self.name = name
self.description = description
self.locationUri = locationUri
@@ -4071,6 +4073,7 @@ class Database:
self.ownerType = ownerType
self.catalogName = catalogName
self.createTime = createTime
+ self.managedLocationUri = managedLocationUri
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is
not None and fastbinary is not None:
@@ -4133,6 +4136,11 @@ class Database:
self.createTime = iprot.readI32()
else:
iprot.skip(ftype)
+ elif fid == 10:
+ if ftype == TType.STRING:
+ self.managedLocationUri = iprot.readString()
+ else:
+ iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
@@ -4183,6 +4191,10 @@ class Database:
oprot.writeFieldBegin('createTime', TType.I32, 9)
oprot.writeI32(self.createTime)
oprot.writeFieldEnd()
+ if self.managedLocationUri is not None:
+ oprot.writeFieldBegin('managedLocationUri', TType.STRING, 10)
+ oprot.writeString(self.managedLocationUri)
+ oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
@@ -4201,6 +4213,7 @@ class Database:
value = (value * 31) ^ hash(self.ownerType)
value = (value * 31) ^ hash(self.catalogName)
value = (value * 31) ^ hash(self.createTime)
+ value = (value * 31) ^ hash(self.managedLocationUri)
return value
def __repr__(self):
diff --git
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index bd43407..849970e 100644
---
a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++
b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -999,6 +999,7 @@ class Database
OWNERTYPE = 7
CATALOGNAME = 8
CREATETIME = 9
+ MANAGEDLOCATIONURI = 10
FIELDS = {
NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
@@ -1009,7 +1010,8 @@ class Database
OWNERNAME => {:type => ::Thrift::Types::STRING, :name => 'ownerName',
:optional => true},
OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType',
:optional => true, :enum_class => ::PrincipalType},
CATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'catalogName',
:optional => true},
- CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime',
:optional => true}
+ CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime',
:optional => true},
+ MANAGEDLOCATIONURI => {:type => ::Thrift::Types::STRING, :name =>
'managedLocationUri', :optional => true}
}
def struct_fields; FIELDS; end
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
index 75a9368..7092ee8 100755
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -72,6 +72,7 @@ public class Warehouse {
private final Configuration conf;
private final String whRootString;
private final String whRootExternalString;
+ private final boolean isTenantBasedStorage;
public static final Logger LOG =
LoggerFactory.getLogger("hive.metastore.warehouse");
@@ -90,6 +91,7 @@ public class Warehouse {
fsHandler = getMetaStoreFsHandler(conf);
cm = ReplChangeManager.getInstance(conf);
storageAuthCheck = MetastoreConf.getBoolVar(conf,
ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS);
+ isTenantBasedStorage = MetastoreConf.getBoolVar(conf,
ConfVars.ALLOW_TENANT_BASED_STORAGE);
}
private MetaStoreFS getMetaStoreFsHandler(Configuration conf)
@@ -194,9 +196,9 @@ public class Warehouse {
}
if (cat == null || cat.getName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) {
if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
- return getWhRoot();
+ return getWhRootExternal();
} else {
- return new Path(getWhRoot(), dbDirFromDbName(db));
+ return new Path(getWhRootExternal(), dbDirFromDbName(db));
}
} else {
return new Path(getDnsPath(new Path(cat.getLocationUri())),
dbDirFromDbName(db));
@@ -208,7 +210,7 @@ public class Warehouse {
}
/**
- * Get the path specified by the database. In the case of the default
database the root of the
+ * Get the managed tables path specified by the database. In the case of
the default database the root of the
* warehouse is returned.
* @param db database to get the path of
* @return path to the database directory
@@ -216,30 +218,65 @@ public class Warehouse {
* file system.
*/
public Path getDatabasePath(Database db) throws MetaException {
+ if (db.getManagedLocationUri() != null) {
+ return getDnsPath(new Path(db.getManagedLocationUri()));
+ }
if (db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME) &&
db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
return getWhRoot();
}
+ // this is for backward-compatibility where certain DBs do not have
managedLocationUri set
return new Path(db.getLocationUri());
}
+ /**
+ * Get the managed tables path specified by the database. In the case of
the default database the root of the
+ * warehouse is returned.
+ * @param db database to get the path of
+ * @return path to the database directory
+ * @throws MetaException when the file path cannot be properly determined
from the configured
+ * file system.
+ */
+ public Path getDatabaseManagedPath(Database db) throws MetaException {
+ if (db.getManagedLocationUri() != null) {
+ return getDnsPath(new Path(db.getManagedLocationUri()));
+ }
+ if (!db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) {
+ return new Path(db.getLocationUri());
+ }
+ if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+ return getWhRoot();
+ }
+
+ return new Path(getWhRoot(), db.getName().toLowerCase() +
DATABASE_WAREHOUSE_SUFFIX);
+ }
+
public Path getDefaultDatabasePath(String dbName) throws MetaException {
// TODO CAT - I am fairly certain that most calls to this are in error.
This should only be
// used when the database location is unset, which should never happen
except when a
// new database is being created. Once I have confirmation of this change
calls of this to
// getDatabasePath(), since it does the right thing. Also, merge this with
// determineDatabasePath() as it duplicates much of the logic.
- if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
- return getWhRoot();
- }
- return new Path(getWhRoot(), dbName.toLowerCase() +
DATABASE_WAREHOUSE_SUFFIX);
+ return getDefaultDatabasePath(dbName, false);
}
public Path getDefaultExternalDatabasePath(String dbName) throws
MetaException {
- if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
- return getWhRootExternal();
+ return getDefaultDatabasePath(dbName, true);
+ }
+
+ // should only be used to determine paths before the creation of databases
+ public Path getDefaultDatabasePath(String dbName, boolean inExternalWH)
throws MetaException {
+ if (inExternalWH) {
+ if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+ return getWhRootExternal();
+ }
+ return new Path(getWhRootExternal(), dbName.toLowerCase() +
DATABASE_WAREHOUSE_SUFFIX);
+ } else {
+ if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) {
+ return getWhRoot();
+ }
+ return new Path(getWhRoot(), dbName.toLowerCase() +
DATABASE_WAREHOUSE_SUFFIX);
}
- return new Path(getWhRootExternal(), dbName.toLowerCase() +
DATABASE_WAREHOUSE_SUFFIX);
}
private boolean hasExternalWarehouseRoot() {
@@ -261,15 +298,29 @@ public class Warehouse {
public Path getDefaultTablePath(Database db, String tableName, boolean
isExternal) throws MetaException {
Path dbPath = null;
- if (isExternal && hasExternalWarehouseRoot()) {
- dbPath = getDefaultExternalDatabasePath(db.getName());
+ if (isExternal) {
+ dbPath = new Path(db.getLocationUri());
+ if (FileUtils.isSubdirectory(getWhRoot().toString(), dbPath.toString() +
Path.SEPARATOR)) {
+ // db metadata incorrect, find new location based on external
warehouse root
+ dbPath = getDefaultExternalDatabasePath(db.getName());
+ }
} else {
- dbPath = getDatabasePath(db);
+ if (isTenantBasedStorage) {
+ dbPath = getDatabaseManagedPath(db);
+ } else {
+ dbPath = getDatabasePath(db);
+ }
}
return getDnsPath(
new Path(dbPath,
MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
}
+ public Path getDefaultManagedTablePath(Database db, String tableName) throws
MetaException {
+ Path dbPath = getDatabaseManagedPath(db);
+ return getDnsPath(
+ new Path(dbPath,
MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
+ }
+
// A few situations where we need the default table path, without a DB object
public Path getDefaultTablePath(String dbName, String tableName, boolean
isExternal) throws MetaException {
Path dbPath = null;
diff --git
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 411f999..3bfb0e6 100644
---
a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++
b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -303,6 +303,11 @@ public class MetastoreConf {
AGGREGATE_STATS_CACHE_TTL("metastore.aggregate.stats.cache.ttl",
"hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS,
"Number of seconds for a cached node to be active in the cache before
they become stale."),
+ ALLOW_TENANT_BASED_STORAGE("metastore.warehouse.tenant.colocation",
"hive.metastore.warehouse.tenant.colocation", false,
+ "Allows managed and external tables for a tenant to have a common
parent directory\n" +
+ "For example: /user/warehouse/user1/managed and
/user/warehouse/user1/external\n" +
+ "This allows users to be able to set quotas on user1 directory. These
locations have to be defined on the\n" +
+ "database object explicitly when creating the DB or via alter
database."),
ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl",
HIVE_ALTER_HANDLE_CLASS,
"Alter handler. For now defaults to the Hive one. Really need a
better default option"),
diff --git
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index e2f541c..098ddec 100644
---
a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++
b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -386,6 +386,7 @@ struct Database {
7: optional PrincipalType ownerType,
8: optional string catalogName,
9: optional i32 createTime // creation time of database in
seconds since epoch
+ 10: optional string managedLocationUri // directory for managed tables
}
// This object holds the information needed by SerDes
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 8d77ffe..078733d 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -239,8 +239,8 @@ public class HiveAlterHandler implements AlterHandler {
// in the table rename, its data location should not be changed. We
can check
// if the table directory was created directly under its database
directory to tell
// if it is such a table
- String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri())
- .relativize(srcPath.toUri()).toString();
+ String oldtRelativePath = wh.getDatabasePath(olddb).toUri()
+ .relativize(srcPath.toUri()).toString();
boolean tableInSpecifiedLoc =
!oldtRelativePath.equalsIgnoreCase(name)
&& !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
if (!tableInSpecifiedLoc) {
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 23fcc13..d155887 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@ -374,7 +374,7 @@ class MetaStoreDirectSql {
String queryTextDbSelector= "select "
+ "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", "
- + "\"OWNER_NAME\", \"OWNER_TYPE\", \"CTLG_NAME\" , \"CREATE_TIME\""
+ + "\"OWNER_NAME\", \"OWNER_TYPE\", \"CTLG_NAME\" , \"CREATE_TIME\",
\"DB_MANAGED_LOCATION_URI\""
+ "FROM "+ DBS
+ " where \"NAME\" = ? and \"CTLG_NAME\" = ? ";
Object[] params = new Object[] { dbName, catName };
@@ -429,6 +429,7 @@ class MetaStoreDirectSql {
(null == type || type.trim().isEmpty()) ? null :
PrincipalType.valueOf(type));
db.setCatalogName(MetastoreDirectSqlUtils.extractSqlString(dbline[6]));
db.setCreateTime(MetastoreDirectSqlUtils.extractSqlInt(dbline[7]));
+
db.setManagedLocationUri(MetastoreDirectSqlUtils.extractSqlString(dbline[8]));
db.setParameters(MetaStoreServerUtils.trimMapNulls(dbParams,convertMapNullsToEmptyStrings));
if (LOG.isDebugEnabled()){
LOG.debug("getDatabase: directsql returning db " + db.getName()
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java
index a8de6ba..fa16192 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java
@@ -37,6 +37,7 @@ import
org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Partition;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.hadoop.hive.metastore.utils.FileUtils;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.slf4j.Logger;
@@ -46,6 +47,7 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
public static final Logger LOG =
LoggerFactory.getLogger(MetastoreDefaultTransformer.class);
private IHMSHandler hmsHandler = null;
private String defaultCatalog = null;
+ private boolean isTenantBasedStorage = false;
private static final String CONNECTORREAD = "CONNECTORREAD".intern();
private static final String CONNECTORWRITE = "CONNECTORWRITE".intern();
@@ -79,6 +81,7 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
public MetastoreDefaultTransformer(IHMSHandler handler) throws
HiveMetaException {
this.hmsHandler = handler;
this.defaultCatalog = MetaStoreUtils.getDefaultCatalog(handler.getConf());
+ this.isTenantBasedStorage =
hmsHandler.getConf().getBoolean(MetastoreConf.ConfVars.ALLOW_TENANT_BASED_STORAGE.getVarname(),
false);
acidWriteList.addAll(ACIDCOMMONWRITELIST);
acidList.addAll(acidWriteList);
@@ -565,7 +568,15 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
String txn_properties = null;
boolean isInsertAcid = false;
- if (TableType.MANAGED_TABLE.name().equals(tableType)) {
+ String dbName = table.getDbName();
+ Database db = null;
+ try {
+ db = hmsHandler.get_database_core(table.getCatName(), table.getDbName());
+ } catch (NoSuchObjectException e) {
+ throw new MetaException("Database " + dbName + " for table " +
table.getTableName() + " could not be found");
+ }
+
+ if (TableType.MANAGED_TABLE.name().equals(tableType)) {
LOG.debug("Table is a MANAGED_TABLE");
txnal = params.get(TABLE_IS_TRANSACTIONAL);
txn_properties = params.get(TABLE_TRANSACTIONAL_PROPERTIES);
@@ -580,9 +591,10 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
params.put("TRANSLATED_TO_EXTERNAL", "TRUE");
newTable.setParameters(params);
LOG.info("Modified table params are:" + params.toString());
+
if (!table.isSetSd() || table.getSd().getLocation() == null) {
try {
- Path newPath =
hmsHandler.getWh().getDefaultTablePath(table.getDbName(), table.getTableName(),
true);
+ Path newPath = hmsHandler.getWh().getDefaultTablePath(db,
table.getTableName(), true);
newTable.getSd().setLocation(newPath.toString());
LOG.info("Modified location from null to " + newPath);
} catch (Exception e) {
@@ -593,9 +605,11 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
if (processorCapabilities == null || processorCapabilities.isEmpty()) {
throw new MetaException("Processor has no capabilities, cannot
create an ACID table.");
}
+
+ newTable = validateTablePaths(table);
if (isInsertAcid) { // MICRO_MANAGED Tables
if (processorCapabilities.contains(HIVEMANAGEDINSERTWRITE)) {
- LOG.info("Processor has required capabilities to be able to create
INSERT-only tables");
+ LOG.debug("Processor has required capabilities to be able to
create INSERT-only tables");
return newTable;
} else {
throw new MetaException("Processor does not have capabilities to
create a INSERT ACID table:" +
@@ -603,7 +617,7 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
}
} else { // FULL-ACID table
if (processorCapabilities.contains(HIVEFULLACIDWRITE)) {
- LOG.info("Processor has required capabilities to be able to create
FULLACID tables.");
+ LOG.debug("Processor has required capabilities to be able to
create FULLACID tables.");
return newTable;
} else {
throw new MetaException("Processor does not have capabilities to
create a FULL ACID table:" +
@@ -612,18 +626,8 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
}
}
} else if (TableType.EXTERNAL_TABLE.name().equals(tableType)) {
- LOG.info("Table to be created is of type " + tableType + " but not " +
TableType.MANAGED_TABLE.toString());
- String tableLocation = table.isSetSd()? table.getSd().getLocation() :
null;
- Path whRootPath =
Path.getPathWithoutSchemeAndAuthority(hmsHandler.getWh().getWhRoot());
-
- if (tableLocation != null) {
- Path tablePath = Path.getPathWithoutSchemeAndAuthority(new
Path(tableLocation));
- if (FileUtils.isSubdirectory(whRootPath.toString(),
tablePath.toString())) {
- throw new MetaException(
- "An external table's location should not be located within managed
warehouse root directory," + "table:"
- + table.getTableName() + ",location:" + tablePath + ",Hive
managed warehouse:" + whRootPath);
- }
- }
+ LOG.debug("Table to be created is of type " + tableType);
+ newTable = validateTablePaths(table);
}
LOG.info("Transformer returning table:" + newTable.toString());
return newTable;
@@ -638,37 +642,10 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
LOG.info("Starting translation for Alter table for processor " +
processorId + " with " + processorCapabilities
+ " on table " + table.getTableName());
- String tableType = table.getTableType();
- Path tableLocation = null;
- Path whRootPath = null;
- if (TableType.MANAGED_TABLE.name().equals(tableType)) {
- LOG.debug("Table is a MANAGED_TABLE");
- if (table.isSetSd()) {
- tableLocation = Path.getPathWithoutSchemeAndAuthority(new
Path(table.getSd().getLocation()));
- }
- whRootPath =
Path.getPathWithoutSchemeAndAuthority(hmsHandler.getWh().getWhRoot());
- if (tableLocation != null && tableLocationChanged(table) &&
!FileUtils.isSubdirectory(whRootPath.toString(), tableLocation.toString())) {
- throw new MetaException(
- "A managed table's location needs to be under the hive warehouse
root directory," + "table:"
- + table.getTableName() + ",location:" + tableLocation + ",Hive
warehouse:" + whRootPath);
- }
- } else if (TableType.EXTERNAL_TABLE.name().equals(tableType)) {
- if (table.isSetSd()) {
- tableLocation = Path.getPathWithoutSchemeAndAuthority(new
Path(table.getSd().getLocation()));
- }
- whRootPath =
Path.getPathWithoutSchemeAndAuthority(hmsHandler.getWh().getWhRoot());
- if (tableLocation != null) {
- LOG.debug("Table is an EXTERNAL TABLE:tableLocation={}, whroot={}",
tableLocation, whRootPath);
- if (tableLocationChanged(table) &&
FileUtils.isSubdirectory(whRootPath.toString(), tableLocation.toString())) {
- throw new MetaException(
- "An external table's location should not be located within
managed warehouse root directory," + "table:"
- + table.getTableName() + ",location:" + tableLocation + ",Hive
managed warehouse:" + whRootPath);
- }
- } else {
- LOG.debug("Table is an EXTERNAL TABLE:tableLocation=null");
- }
- }
+ if (tableLocationChanged(table))
+ validateTablePaths(table);
+
LOG.debug("Transformer returning table:" + table.toString());
return table;
}
@@ -702,12 +679,13 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
LOG.info("Starting translation for transformDatabase for processor " +
processorId + " with " + processorCapabilities
+ " on database " + db.getName());
- if (processorCapabilities == null ||
(!processorCapabilities.contains(HIVEMANAGEDINSERTWRITE) &&
- !processorCapabilities.contains(HIVEFULLACIDWRITE))) {
+ if (!isTenantBasedStorage && (processorCapabilities == null ||
(!processorCapabilities.contains(HIVEMANAGEDINSERTWRITE) &&
+ !processorCapabilities.contains(HIVEFULLACIDWRITE)))) {
LOG.info("Processor does not have any of ACID write capabilities,
changing current location from " +
db.getLocationUri() + " to external warehouse location");
Path extWhLocation =
hmsHandler.getWh().getDefaultExternalDatabasePath(db.getName());
LOG.debug("Setting DBLocation to " + extWhLocation.toString());
+ // TODO should not alter database now.
db.setLocationUri(extWhLocation.toString());
}
LOG.info("Transformer returning database:" + db.toString());
@@ -767,4 +745,78 @@ public class MetastoreDefaultTransformer implements
IMetaStoreMetadataTransforme
}
return reads;
}
+
+ private Table validateTablePaths(Table table) throws MetaException {
+ Database db = null;
+ String tableLocation = table.isSetSd()? table.getSd().getLocation() : null;
+ try {
+ db = hmsHandler.get_database_core(table.getCatName(), table.getDbName());
+ } catch (NoSuchObjectException e) {
+ throw new MetaException("Database " + table.getTableName() + " for table
" + table.getTableName() + " could not be found");
+ }
+
+ if (TableType.MANAGED_TABLE.name().equals(table.getTableType())) {
+ if (db.getManagedLocationUri() != null) {
+ if (tableLocation != null) {
+ throw new MetaException("Location for managed table is derived from
the database's managedLocationUri, "
+ + "it cannot be specified by the user");
+ } else {
+ Path path = hmsHandler.getWh().getDefaultTablePath(db,
table.getTableName(), false);
+ table.getSd().setLocation(path.toString());
+ return table;
+ }
+ } else {
+ if (tableLocation != null) {
+ Path tablePath = Path.getPathWithoutSchemeAndAuthority(new
Path(tableLocation));
+ if
(!FileUtils.isSubdirectory(hmsHandler.getWh().getWhRoot().toString(),
tableLocation)) {
+ throw new MetaException(
+ "A managed table's location should be located within managed
warehouse root directory or within its database's "
+ + "managedLocationUri. Table " + table.getTableName() +
"'s location is not valid:" + tableLocation
+ + ", managed warehouse:" + hmsHandler.getWh().getWhRoot());
+ }
+ } else {
+ Path path = hmsHandler.getWh().getDefaultManagedTablePath(db,
table.getTableName());
+ table.getSd().setLocation(path.toString());
+ }
+ }
+ } else { // EXTERNAL TABLE
+ Path whRootPath =
Path.getPathWithoutSchemeAndAuthority(hmsHandler.getWh().getWhRoot());
+ Path dbLocation = Path.getPathWithoutSchemeAndAuthority(new
Path(db.getLocationUri()));
+ LOG.debug("ValidateTablePaths: whRoot={} dbLocation={} tableLocation={}
", whRootPath.toString(), dbLocation.toString(), tableLocation);
+ if (tableLocation != null) {
+ Path tablePath = Path.getPathWithoutSchemeAndAuthority(new
Path(tableLocation));
+ if (isTenantBasedStorage) {
+ if (!FileUtils.isSubdirectory(dbLocation.toString(),
tablePath.toString())) { // location outside dblocation
+ throw new MetaException(
+ "An external table's location should not be located outside
the location specified on its database, table:"
+ + table.getTableName() + ",location:" + tablePath +
",Database location for external tables:" + dbLocation);
+ }
+
+ dbLocation = Path.getPathWithoutSchemeAndAuthority(new
Path(db.getManagedLocationUri()));
+ if (dbLocation != null &&
FileUtils.isSubdirectory(dbLocation.toString(), tablePath.toString())) {
+ throw new MetaException(
+ "An external table's location should not be located within
managed warehouse root directory of its database, table:"
+ + table.getTableName() + ",location:" + tablePath +
",Database's managed warehouse:" + dbLocation);
+ }
+ } else {
+ if (FileUtils.isSubdirectory(whRootPath.toString(),
tablePath.toString())) {
+ throw new MetaException(
+ "An external table's location should not be located within
managed warehouse root directory, table:"
+ + table.getTableName() + ",location:" + tablePath +
",managed warehouse:" + whRootPath);
+ }
+ return table;
+ }
+ } else {
+ dbLocation = Path.getPathWithoutSchemeAndAuthority(new
Path(db.getLocationUri()));
+ Path tablePath = null;
+ if (!FileUtils.isSubdirectory(whRootPath.toString(),
dbLocation.toString())) {
+ tablePath = new Path(db.getLocationUri(), table.getTableName());
+ } else {
+ tablePath = hmsHandler.getWh().getDefaultTablePath(db,
table.getTableName(), true);
+ }
+ table.getSd().setLocation(tablePath.toString());
+ }
+ }
+ return table;
+ }
}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 289a3ee..53b7a67 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -657,6 +657,7 @@ public class ObjectStore implements RawStore, Configurable {
assert mdb.getCatalogName() != null;
mdb.setName(db.getName().toLowerCase());
mdb.setLocationUri(db.getLocationUri());
+ mdb.setManagedLocationUri(db.getManagedLocationUri());
mdb.setDescription(db.getDescription());
mdb.setParameters(db.getParameters());
mdb.setOwnerName(db.getOwnerName());
@@ -751,6 +752,7 @@ public class ObjectStore implements RawStore, Configurable {
db.setName(mdb.getName());
db.setDescription(mdb.getDescription());
db.setLocationUri(mdb.getLocationUri());
+
db.setManagedLocationUri(org.apache.commons.lang3.StringUtils.defaultIfBlank(mdb.getManagedLocationUri(),
null));
db.setParameters(convertMap(mdb.getParameters()));
db.setOwnerName(mdb.getOwnerName());
String type =
org.apache.commons.lang3.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
@@ -786,6 +788,9 @@ public class ObjectStore implements RawStore, Configurable {
if
(org.apache.commons.lang3.StringUtils.isNotBlank(db.getLocationUri())) {
mdb.setLocationUri(db.getLocationUri());
}
+ if
(org.apache.commons.lang3.StringUtils.isNotBlank(db.getManagedLocationUri())) {
+ mdb.setManagedLocationUri(db.getManagedLocationUri());
+ }
openTransaction();
pm.makePersistent(mdb);
committed = commitTransaction();
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
index 58d406d..21e3a9f 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java
@@ -37,7 +37,7 @@ import java.util.Map;
* selects reasonable defaults.
*/
public class DatabaseBuilder {
- private String name, description, location, catalogName;
+ private String name, description, location, managedLocation, catalogName;
private Map<String, String> params = new HashMap<>();
private String ownerName;
private PrincipalType ownerType;
@@ -71,6 +71,11 @@ public class DatabaseBuilder {
return this;
}
+ public DatabaseBuilder setManagedLocation(String location) {
+ this.managedLocation = location;
+ return this;
+ }
+
public DatabaseBuilder setParams(Map<String, String> params) {
this.params = params;
return this;
@@ -102,6 +107,8 @@ public class DatabaseBuilder {
Database db = new Database(name, description, location, params);
db.setCatalogName(catalogName);
db.setCreateTime(createTime);
+ if (managedLocation != null)
+ db.setManagedLocationUri(managedLocation);
try {
if (ownerName == null) ownerName = SecurityUtils.getUser();
db.setOwnerName(ownerName);
@@ -123,6 +130,8 @@ public class DatabaseBuilder {
Database db = new Database(name, description, location, params);
db.setCatalogName(catalogName);
db.setCreateTime(createTime);
+ if (managedLocation != null)
+ db.setManagedLocationUri(managedLocation);
return db;
}
diff --git
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
index 8901eed..3fc0b39 100644
---
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
+++
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java
@@ -35,6 +35,7 @@ import java.util.Set;
public class MDatabase {
private String name;
private String locationUri;
+ private String managedLocationUri;
private String description;
private Map<String, String> parameters;
private String ownerName;
@@ -49,14 +50,31 @@ public class MDatabase {
/**
* To create a database object
+ * @param catalogName Name of the catalog, the database belongs to.
* @param name of the database
* @param locationUri Location of the database in the warehouse
* @param description Comment describing the database
+ * @param parameters Parameters for the database
*/
public MDatabase(String catalogName, String name, String locationUri, String
description,
Map<String, String> parameters) {
+ this(catalogName, name, locationUri, description, parameters, null);
+ }
+
+ /**
+ * To create a database object
+ * @param catalogName Name of the catalog, the database belongs to.
+ * @param name of the database
+ * @param locationUri Default external Location of the database
+ * @param description Comment describing the database
+ * @param parameters Parameters for the database
+ * @param managedLocationUri Default location for managed tables in database
in the warehouse
+ */
+ public MDatabase(String catalogName, String name, String locationUri, String
description,
+ Map<String, String> parameters, String managedLocationUri) {
this.name = name;
this.locationUri = locationUri;
+ this.managedLocationUri = managedLocationUri;
this.description = description;
this.parameters = parameters;
this.catalogName = catalogName;
@@ -91,6 +109,20 @@ public class MDatabase {
}
/**
+ * @return the managedLocationUri
+ */
+ public String getManagedLocationUri() {
+ return managedLocationUri;
+ }
+
+ /**
+ * @param managedLocationUri the locationUri to set for managed tables.
+ */
+ public void setManagedLocationUri(String managedLocationUri) {
+ this.managedLocationUri = managedLocationUri;
+ }
+
+ /**
* @return the description
*/
public String getDescription() {
diff --git
a/standalone-metastore/metastore-server/src/main/resources/package.jdo
b/standalone-metastore/metastore-server/src/main/resources/package.jdo
index 88eabfa..b856316 100644
--- a/standalone-metastore/metastore-server/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo
@@ -69,6 +69,10 @@
<!-- allows null is true to keep backwards compatibility with old
releases -->
<column name="CREATE_TIME" jdbc-type="integer" allows-null="true"/>
</field>
+ <field name="managedLocationUri">
+ <column name="DB_MANAGED_LOCATION_URI" length="4000"
jdbc-type="VARCHAR" allows-null="true"/>
+ </field>
+
</class>
<class name="MCatalog" identity-type="datastore" table="CTLGS"
detachable="true">
diff --git
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 48ad676..05adbe9 100644
---
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -23,7 +23,8 @@ CREATE TABLE "APP"."DBS" (
"OWNER_NAME" VARCHAR(128),
"OWNER_TYPE" VARCHAR(10),
"CTLG_NAME" VARCHAR(256) NOT NULL DEFAULT 'hive',
- "CREATE_TIME" INTEGER
+ "CREATE_TIME" INTEGER,
+ "DB_MANAGED_LOCATION_URI" VARCHAR(4000)
);
CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME"
INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128),
"GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE"
VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER"
VARCHAR(128));
diff --git
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
index 7a230bd..35a2e64 100644
---
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
@@ -61,5 +61,8 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON
COMPLETED_COMPACTIONS (CC_DATABASE,CC_
-- HIVE-22872
ALTER TABLE "SCHEDULED_QUERIES" ADD "ACTIVE_EXECUTION_ID" bigint;
+-- HIVE-22995
+ALTER TABLE "APP"."DBS" ADD COLUMN "DB_MANAGED_LOCATION_URI" VARCHAR(4000);
+
-- This needs to be the last thing done. Insert any changes above this line.
-UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release
version 4.0.0' where VER_ID=1;
\ No newline at end of file
+UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release
version 4.0.0' where VER_ID=1;
diff --git
a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 9ed7f4f..f3c74bf 100644
---
a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -288,7 +288,8 @@ CREATE TABLE DBS
OWNER_NAME nvarchar(128) NULL,
OWNER_TYPE nvarchar(10) NULL,
CTLG_NAME nvarchar(256) DEFAULT 'hive',
- CREATE_TIME INT
+ CREATE_TIME INT,
+ DB_MANAGED_LOCATION_URI nvarchar(4000) NULL
);
ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
diff --git
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
index 12d24e9..228bb7c 100644
---
a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
@@ -64,6 +64,8 @@ CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS"
("SCHEDULED_QUERY_
-- HIVE-23033
INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT
EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+-- HIVE-22995
+ALTER TABLE DBS ADD DB_MANAGED_LOCATION_URI nvarchar(4000);
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release
version 4.0.0' where VER_ID=1;
diff --git
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index bc34b51..626d888 100644
---
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -104,6 +104,7 @@ CREATE TABLE IF NOT EXISTS `DBS` (
`OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT
NULL,
`CTLG_NAME` varchar(256) NOT NULL DEFAULT 'hive',
`CREATE_TIME` INT(11),
+ `DB_MANAGED_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE
latin1_bin,
PRIMARY KEY (`DB_ID`),
UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
diff --git
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
index 13f03bc..35da7b5 100644
---
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
@@ -65,6 +65,9 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON
COMPLETED_COMPACTIONS (CC_DATABASE,CC_
-- HIVE-22872
ALTER TABLE SCHEDULED_QUERIES ADD COLUMN ACTIVE_EXECUTION_ID INTEGER ;
+-- HIVE-22995
+ALTER TABLE DBS ADD COLUMN DB_MANAGED_LOCATION_URI VARCHAR(4000) CHARACTER SET
latin1 COLLATE latin1_bin;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release
version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
diff --git
a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index 8482b59..a25f4e4 100644
---
a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -101,7 +101,8 @@ CREATE TABLE DBS
OWNER_NAME VARCHAR2(128) NULL,
OWNER_TYPE VARCHAR2(10) NULL,
CTLG_NAME VARCHAR2(256) DEFAULT 'hive',
- CREATE_TIME NUMBER (10)
+ CREATE_TIME NUMBER (10),
+ DB_MANAGED_LOCATION_URI VARCHAR2(4000) NULL
);
ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
diff --git
a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
index cbfdd86..d462b4a 100644
---
a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
@@ -65,6 +65,9 @@ CREATE INDEX COMPLETED_COMPACTIONS_RES ON
COMPLETED_COMPACTIONS (CC_DATABASE,CC_
-- HIVE-22872
ALTER TABLE SCHEDULED_QUERIES ADD ACTIVE_EXECUTION_ID number(19);
+-- HIVE-22995
+ALTER TABLE DBS ADD DB_MANAGED_LOCATION_URI VARCHAR2(4000) NULL;
+
-- These lines need to be last. Insert any changes above.
UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release
version 4.0.0' where VER_ID=1;
SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status
from dual;
diff --git
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index aa35a7a..2066340 100644
---
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -82,7 +82,8 @@ CREATE TABLE "DBS" (
"OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
"OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
"CTLG_NAME" varchar(256) DEFAULT 'hive',
- "CREATE_TIME" bigint
+ "CREATE_TIME" bigint,
+ "DB_MANAGED_LOCATION_URI" character varying(4000)
);
diff --git
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
index 9462328..a50a071 100644
---
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
+++
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
@@ -196,6 +196,9 @@ CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON
"COMPLETED_COMPACTIONS" ("CC_DATABAS
-- HIVE-22872
ALTER TABLE "SCHEDULED_QUERIES" ADD "ACTIVE_EXECUTION_ID" bigint;
+-- HIVE-22995
+ALTER TABLE "DBS" ADD "DB_MANAGED_LOCATION_URI" character varying(4000);
+
-- These lines need to be last. Insert any changes above.
UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release
version 4.0.0' where "VER_ID"=1;
SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java
index b3690ec..642805e 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java
@@ -39,6 +39,6 @@ public class TestCatalogOldClient extends
NonCatCallsWithCatalog {
@Override
protected String expectedBaseDir() throws MetaException {
- return new Warehouse(conf).getWhRoot().toUri().getPath();
+ return new Warehouse(conf).getWhRootExternal().toUri().getPath();
}
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 5f85165..3f04abe 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -1863,8 +1863,14 @@ public abstract class TestHiveMetaStore {
client.dropTable(dbName, tblName);
silentDropDatabase(dbName);
+ String dbLocation =
+ "/tmp/warehouse/_testDB_table_create_";
+ String mgdLocation =
+ MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) +
"_testDB_table_create_";
new DatabaseBuilder()
.setName(dbName)
+ .setLocation(dbLocation)
+ .setManagedLocation(mgdLocation)
.create(client, conf);
ArrayList<FieldSchema> invCols = new ArrayList<>(2);
@@ -2099,26 +2105,32 @@ public abstract class TestHiveMetaStore {
silentDropDatabase(dbName);
String dbLocation =
+ "/tmp/warehouse/_testDB_table_create_";
+ String mgdLocation =
MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) +
"_testDB_table_create_";
new DatabaseBuilder()
.setName(dbName)
.setLocation(dbLocation)
+ .setManagedLocation(mgdLocation)
.create(client, conf);
Database db = client.getDatabase(dbName);
Table tbl = new TableBuilder()
.setDbName(dbName)
.setTableName(tblName_1)
+ .setType(TableType.EXTERNAL_TABLE.name())
.addCol("name", ColumnType.STRING_TYPE_NAME)
.addCol("income", ColumnType.INT_TYPE_NAME)
+ .addTableParam("EXTERNAL", "TRUE")
.create(client, conf);
tbl = client.getTable(dbName, tblName_1);
Path path = new Path(tbl.getSd().getLocation());
System.err.println("Table's location " + path + ", Database's location "
+ db.getLocationUri());
+ assertEquals("Table type is expected to be EXTERNAL",
TableType.EXTERNAL_TABLE.name(), tbl.getTableType());
assertEquals("Table location is not a subset of the database location",
- path.getParent().toString(), db.getLocationUri());
+ db.getLocationUri(), path.getParent().toString());
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java
index ed67d03..9d7cfd2 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java
@@ -150,7 +150,7 @@ public class TestDatabases extends MetaStoreClientTest {
Database createdDatabase = client.getDatabase(database.getName());
Assert.assertNull("Comparing description",
createdDatabase.getDescription());
- Assert.assertEquals("Comparing location", metaStore.getWarehouseRoot() +
"/" +
+ Assert.assertEquals("Comparing location",
metaStore.getExternalWarehouseRoot() + "/" +
createdDatabase.getName() +
".db", createdDatabase.getLocationUri());
Assert.assertEquals("Comparing parameters", new HashMap<String, String>(),
createdDatabase.getParameters());
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 08ec6c4..6d82d79 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -348,7 +348,7 @@ public class TestTablesCreateDropAlterTruncate extends
MetaStoreClientTest {
client.createTable(table);
Table createdTable = client.getTable(table.getDbName(),
table.getTableName());
- Assert.assertEquals("Storage descriptor location",
metaStore.getWarehouseRoot()
+ Assert.assertEquals("Storage descriptor location",
metaStore.getExternalWarehouseRoot()
+ "/" + table.getDbName() + ".db/" + table.getTableName(),
createdTable.getSd().getLocation());
}
@@ -766,7 +766,7 @@ public class TestTablesCreateDropAlterTruncate extends
MetaStoreClientTest {
Table alteredTable = client.getTable(newTable.getDbName(),
newTable.getTableName());
Assert.assertTrue("New table directory should exist",
metaStore.isPathExists(new Path(alteredTable.getSd().getLocation())));
- Assert.assertEquals("New directory should be set", new
Path(metaStore.getWarehouseRoot()
+ Assert.assertEquals("New directory should be set", new
Path(metaStore.getExternalWarehouseRoot()
+ "/" + alteredTable.getDbName() + ".db/" +
alteredTable.getTableName()),
new Path(alteredTable.getSd().getLocation()));
Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
index 5e9fdca..dc6203f 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java
@@ -167,6 +167,7 @@ public abstract class AbstractMetaStoreService {
*/
public void cleanWarehouseDirs() throws MetaException {
warehouse.deleteDir(getWarehouseRoot(), true, true, false);
+ warehouse.deleteDir(getExternalWarehouseRoot(), true, true, false);
warehouse.deleteDir(trashDir, true, true, false);
}
diff --git
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java
index 0d3446c..11099a6 100644
---
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java
+++
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java
@@ -111,7 +111,7 @@ public class TestSchemaToolForMetastore {
String[] scripts = new String[] {
"insert into CTLGS values(99, 'test_cat_1', 'description',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', " + time + ");",
"insert into SEQUENCE_TABLE
values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);",
- "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test',
'test', 'test_cat_1', " + time + ");"
+ "insert into DBS values(99, 'test db1', 'hdfs:///tmp/ext', 'db1',
'test', 'test', 'test_cat_1', " + time + ", 'hdfs:///tmp/mgd');"
};
File scriptFile = generateTestScript(scripts);
schemaTool.execSql(scriptFile.getPath());
@@ -123,7 +123,7 @@ public class TestSchemaToolForMetastore {
"delete from SEQUENCE_TABLE;",
"delete from DBS;",
"insert into SEQUENCE_TABLE
values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);",
- "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test',
'test', 'test_cat_1', " + time + ");"
+ "insert into DBS values(102, 'test db1', 'hdfs:///tmp/ext', 'db1',
'test', 'test', 'test_cat_1', " + time + ", 'hdfs:///tmp/mgd');"
};
scriptFile = generateTestScript(scripts);
schemaTool.execSql(scriptFile.getPath());
@@ -388,8 +388,8 @@ public class TestSchemaToolForMetastore {
// Test valid case
String[] scripts = new String[] {
"insert into CTLGS values(3, 'test_cat_2', 'description',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', " + time + ");",
- "insert into DBS values(2, 'my db',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role',
'test_cat_2', " + time + ");",
- "insert into DBS values(7, 'db with bad port',
'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'test_cat_2', " + time +
");",
+ "insert into DBS values(2, 'my db',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role',
'test_cat_2', " + time + ", '');",
+ "insert into DBS values(7, 'db with bad port',
'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'test_cat_2', " + time +
", '');",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
@@ -416,10 +416,10 @@ public class TestSchemaToolForMetastore {
"delete from TBLS;",
"delete from SDS;",
"delete from DBS;",
- "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb',
'mydb', 'public', 'role', 'test_cat_2', " + time + ");",
- "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '',
'public', 'role', 'test_cat_2', " + time + ");",
- "insert into DBS values(6, 'db with bad port',
'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'test_cat_2', " + time +
");",
- "insert into DBS values(7, 'db with bad port',
'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'test_cat_2', " + time +
");",
+ "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb',
'mydb', 'public', 'role', 'test_cat_2', " + time + ", '');",
+ "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '',
'public', 'role', 'test_cat_2', " + time + ", '');",
+ "insert into DBS values(6, 'db with bad port',
'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'test_cat_2', " + time + ",
'');",
+ "insert into DBS values(7, 'db with bad port',
'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'test_cat_2', " + time +
", '');",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED)
values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');",
@@ -519,7 +519,7 @@ public class TestSchemaToolForMetastore {
String time = String.valueOf(System.currentTimeMillis()/1000);
String[] scripts = new String[] {
"insert into CTLGS values (2, 'mycat', 'my description',
'hdfs://myhost.com:8020/user/hive/warehouse', " + time + ");",
- "insert into DBS values(2, 'my db',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role',
'mycat', " + time + ");",
+ "insert into DBS values(2, 'my db',
'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role',
'mycat', " + time + ", '');",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID)
values
(2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);",
"insert into
TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED)
values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');",