Repository: hive Updated Branches: refs/heads/master 510960268 -> ba8a99e11
http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 61ac483..fe2d758 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -20,9 +20,15 @@ package org.apache.hadoop.hive.metastore.client; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -34,11 +40,15 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -50,10 +60,20 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +98,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { public static void startMetaStores() { Map<MetastoreConf.ConfVars, String> msConf = new HashMap<MetastoreConf.ConfVars, String>(); // Enable trash, so it can be tested - Map<String, String> extraConf = new HashMap<String, String>(); + Map<String, String> extraConf = new HashMap<>(); extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) startMetaStores(msConf, extraConf); @@ -101,74 +121,62 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { testTables[0] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_view") .addCol("test_col", "int") .setType("VIRTUAL_VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_partitioned_table") .addCol("test_col1", "int") .addCol("test_col2", "int") .addPartCol("test_part_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("external_table_for_test") .addCol("test_col", "int") .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir") .addTableParam("EXTERNAL", "TRUE") .setType("EXTERNAL_TABLE") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Create partitions for the partitioned table for(int i=0; i < 3; i++) { - Partition partition = - new PartitionBuilder() - .fromTable(testTables[3]) + new PartitionBuilder() + .inTable(testTables[3]) .addValue("a" + i) - .build(); - client.add_partition(partition); + .addToTable(client, metaStore.getConf()); } // Add data files to the partitioned table List<Partition> partitions = client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } @@ -177,7 +185,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); if (testTables[i].getPartitionKeys().isEmpty()) { if (testTables[i].getSd().getLocation() != null) { - Path dataFile = new Path(testTables[i].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } } @@ -199,7 +207,6 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { /** * This test creates and queries a table and then drops it. Good for testing the happy path - * @throws Exception */ @Test public void testCreateGetDeleteTable() throws Exception { @@ -237,7 +244,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { public void testCreateTableDefaultValues() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List<FieldSchema> cols = new ArrayList<FieldSchema>(); + List<FieldSchema> cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -309,7 +316,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List<FieldSchema> cols = new ArrayList<FieldSchema>(); + List<FieldSchema> cols = new ArrayList<>(); table.setDbName(OTHER_DATABASE); table.setTableName("test_table_2"); @@ -329,7 +336,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { public void testCreateTableDefaultValuesView() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List<FieldSchema> cols = new ArrayList<FieldSchema>(); + List<FieldSchema> cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -343,7 +350,6 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views - StorageDescriptor createdSd = createdTable.getSd(); Assert.assertNull("Storage descriptor location should be null", createdTable.getSd().getLocation()); } @@ -390,10 +396,9 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { private Table getNewTable() throws MetaException { return new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_with_invalid_sd") .addCol("test_col", "int") - .build(); + .build(metaStore.getConf()); } @Test(expected = MetaException.class) @@ -604,7 +609,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { @Test public void testTruncateTableUnpartitioned() throws Exception { // Unpartitioned table - Path dataFile = new Path(testTables[0].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile"); client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null); Assert.assertTrue("Location should exist", metaStore.isPathExists(new Path(testTables[0].getSd().getLocation()))); @@ -615,7 +620,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { @Test public void testTruncateTablePartitioned() throws Exception { // Partitioned table - delete specific partitions a0, a2 - List<String> partitionsToDelete = new ArrayList<String>(); + List<String> partitionsToDelete = new ArrayList<>(); partitionsToDelete.add("test_part_col=a0"); partitionsToDelete.add("test_part_col=a2"); client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(), @@ -626,7 +631,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); if (partition.getValues().contains("a0") || partition.getValues().contains("a2")) { // a0, a2 should be empty Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile)); @@ -648,7 +653,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); Assert.assertFalse("Every dataFile should be removed", metaStore.isPathExists(dataFile)); } } @@ -704,7 +709,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed @@ -731,7 +736,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getDbName() + ".db/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed, other data should be the same @@ -755,7 +760,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -782,7 +787,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -833,6 +838,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { Assert.assertEquals("The table data should be the same", newTable, alteredTable); } + @SuppressWarnings("deprecation") @Test public void testAlterTableCascade() throws Exception { Table originalTable = partitionedTable; @@ -1069,6 +1075,255 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { } } + @Test + public void tablesInOtherCatalogs() throws TException, URISyntaxException { + String catName = "create_etc_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + // Make one have a non-standard location + if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])); + // Make one partitioned + if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME); + // Make one a materialized view + if (i == 3) { + builder.setType(TableType.MATERIALIZED_VIEW.name()) + .setRewriteEnabled(true) + .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); + } + client.createTable(builder.build(metaStore.getConf())); + } + + // Add partitions for the partitioned table + String[] partVals = new String[3]; + Table partitionedTable = client.getTable(catName, dbName, tableNames[2]); + for (int i = 0; i < partVals.length; i++) { + partVals[i] = "part" + i; + new PartitionBuilder() + .inTable(partitionedTable) + .addValue(partVals[i]) + .addToTable(client, metaStore.getConf()); + } + + // Get tables, make sure the locations are correct + for (int i = 0; i < tableNames.length; i++) { + Table t = client.getTable(catName, dbName, tableNames[i]); + Assert.assertEquals(catName, t.getCatName()); + String expectedLocation = (i < 1) ? + new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() + : + new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", + tableNames[i]).toURI().toString(); + + Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/"); + File dir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + } + + // Make sure getting table in the wrong catalog does not work + try { + Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // test getAllTables + Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName)); + Assert.assertEquals(tableNames.length, fetchedNames.size()); + for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName)); + + fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME)); + for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName)); + + // test getMaterializedViewsForRewriting + List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName); + Assert.assertEquals(1, materializedViews.size()); + Assert.assertEquals(tableNames[3], materializedViews.get(0)); + + fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME)); + Assert.assertFalse(fetchedNames.contains(tableNames[3])); + + // test getTableObjectsByName + List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(2, fetchedTables.size()); + Collections.sort(fetchedTables); + Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName()); + Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName()); + + fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(0, fetchedTables.size()); + + // Test altering the table + Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + t.getParameters().put("test", "test"); + client.alter_table(catName, dbName, tableNames[0], t); + t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + Assert.assertEquals("test", t.getParameters().get("test")); + + // Alter a table in the wrong catalog + try { + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + Assert.fail(); + } catch (InvalidOperationException e) { + // NOP + } + + // Update the metadata for the materialized view + CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata(); + cm.addToTablesUsed(dbName + "." + tableNames[1]); + client.updateCreationMetadata(catName, dbName, tableNames[3], cm); + + List<String> partNames = new ArrayList<>(); + for (String partVal : partVals) partNames.add("pcol1=" + partVal); + // Truncate a table + client.truncateTable(catName, dbName, tableNames[0], partNames); + + // Truncate a table in the wrong catalog + try { + client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Drop a table from the wrong catalog + try { + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Should ignore the failure + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true); + + // Have to do this in reverse order so that we drop the materialized view first. + for (int i = tableNames.length - 1; i >= 0; i--) { + t = client.getTable(catName, dbName, tableNames[i]); + File tableDir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + + if (tableNames[i].equalsIgnoreCase(tableNames[0])) { + client.dropTable(catName, dbName, tableNames[i], false, false); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + } else { + client.dropTable(catName, dbName, tableNames[i]); + Assert.assertFalse(tableDir.exists()); + } + } + Assert.assertEquals(0, client.getAllTables(catName, dbName).size()); + } + + @Test(expected = InvalidObjectException.class) + public void createTableInBogusCatalog() throws TException { + new TableBuilder() + .setCatName("nosuch") + .setTableName("doomed") + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void getTableInBogusCatalog() throws TException { + client.getTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName()); + } + + @Test + public void getAllTablesInBogusCatalog() throws TException { + List<String> names = client.getAllTables("nosuch", testTables[0].getDbName()); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = UnknownDBException.class) + public void getTableObjectsByNameBogusCatalog() throws TException { + client.getTableObjectsByName("nosuch", testTables[0].getDbName(), + Arrays.asList(testTables[0].getTableName(), testTables[1].getTableName())); + } + + @Test + public void getMaterializedViewsInBogusCatalog() throws TException { + List<String> names = client.getMaterializedViewsForRewriting("nosuch", DEFAULT_DATABASE_NAME); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = InvalidOperationException.class) + public void alterTableBogusCatalog() throws TException { + Table t = testTables[0].deepCopy(); + t.getParameters().put("a", "b"); + client.alter_table("nosuch", t.getDbName(), t.getTableName(), t); + } + + @Test(expected = InvalidOperationException.class) + public void moveTablesBetweenCatalogsOnAlter() throws TException { + String catName = "move_table_between_catalogs_on_alter"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "a_db"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "non_movable_table"; + Table before = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + Table after = before.deepCopy(); + after.setCatName(DEFAULT_CATALOG_NAME); + client.alter_table(catName, dbName, tableName, after); + + } + + @Test + public void truncateTableBogusCatalog() throws TException { + try { + List<String> partNames = client.listPartitionNames(partitionedTable.getDbName(), + partitionedTable.getTableName(), (short) -1); + client.truncateTable("nosuch", partitionedTable.getDbName(), partitionedTable.getTableName(), + partNames); + Assert.fail(); // For reasons I don't understand and am too lazy to debug at the moment the + // NoSuchObjectException gets swallowed by a TApplicationException in remote mode. + } catch (TApplicationException|NoSuchObjectException e) { + //NOP + } + } + + @Test(expected = NoSuchObjectException.class) + public void dropTableBogusCatalog() throws TException { + client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false); + } + /** * Creates a Table with all of the parameters set. The temporary table is available only on HS2 * server, so do not use it. @@ -1105,6 +1360,6 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest { .addSerdeParam("serdeParam", "serdeParamValue") .addTableParam("tableParam", "tableParamValue") .addStorageDescriptorParam("sdParam", "sdParamValue") - .build(); + .build(metaStore.getConf()); } } http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index a1716ce..0de7f87 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -39,7 +45,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +88,7 @@ public class TestTablesGetExists extends MetaStoreClientTest { .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -86,14 +96,14 @@ public class TestTablesGetExists extends MetaStoreClientTest { .setTableName("test_view") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -101,39 +111,35 @@ public class TestTablesGetExists extends MetaStoreClientTest { .setTableName("test_table_to_find_2") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_hidden_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[6] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table_to_find_3") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -153,12 +159,12 @@ public class TestTablesGetExists extends MetaStoreClientTest { Table table = testTables[0]; // Test in upper case - Table resultUpper = client.getTable(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase()); + Table resultUpper = client.getTable(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase()); Assert.assertEquals("Comparing tables", table, resultUpper); // Test in mixed case - Table resultMix = client.getTable("DeFaUlt", "tEsT_TabLE"); + Table resultMix = client.getTable("hIvE", "DeFaUlt", "tEsT_TabLE"); Assert.assertEquals("Comparing tables", table, resultMix); } @@ -222,7 +228,7 @@ public class TestTablesGetExists extends MetaStoreClientTest { } // Drop one table, see what remains - client.dropTable(testTables[1].getDbName(), testTables[1].getTableName()); + client.dropTable(testTables[1].getCatName(), testTables[1].getDbName(), testTables[1] .getTableName()); tables = client.getAllTables(DEFAULT_DATABASE); Assert.assertEquals("All tables size", 4, tables.size()); for(Table table : testTables) { @@ -274,7 +280,7 @@ public class TestTablesGetExists extends MetaStoreClientTest { Assert.assertEquals("No such table size", 0, tables.size()); // Look for tables without pattern - tables = client.getTables(DEFAULT_DATABASE, null); + tables = client.getTables(DEFAULT_DATABASE, (String)null); Assert.assertEquals("No such functions size", 5, tables.size()); // Look for tables with empty pattern @@ -305,8 +311,9 @@ public class TestTablesGetExists extends MetaStoreClientTest { // Using the second table, since a table called "test_table" exists in both databases Table table = testTables[1]; - Assert.assertTrue("Table exists", client.tableExists(table.getDbName(), table.getTableName())); - Assert.assertFalse("Table not exists", client.tableExists(table.getDbName(), + Assert.assertTrue("Table exists", client.tableExists(table.getCatName(), table.getDbName(), + table.getTableName())); + Assert.assertFalse("Table not exists", client.tableExists(table.getCatName(), table.getDbName(), "non_existing_table")); // No such database @@ -323,11 +330,11 @@ public class TestTablesGetExists extends MetaStoreClientTest { Table table = testTables[0]; // Test in upper case - Assert.assertTrue("Table exists", client.tableExists(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase())); + Assert.assertTrue("Table exists", client.tableExists(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase())); // Test in mixed case - Assert.assertTrue("Table exists", client.tableExists("DeFaUlt", "tEsT_TabLE")); + Assert.assertTrue("Table exists", client.tableExists("hIVe", "DeFaUlt", "tEsT_TabLE")); } @Test @@ -360,7 +367,7 @@ public class TestTablesGetExists extends MetaStoreClientTest { @Test public void testGetTableObjectsByName() throws Exception { - List<String> tableNames = new ArrayList<String>(); + List<String> tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); tableNames.add(testTables[1].getTableName()); List<Table> tables = client.getTableObjectsByName(DEFAULT_DATABASE, tableNames); @@ -374,17 +381,17 @@ public class TestTablesGetExists extends MetaStoreClientTest { } // Test with empty array - tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList<String>()); + tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList<>()); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists - tableNames = new ArrayList<String>(); + tableNames = new ArrayList<>(); tableNames.add("no_such_table"); - client.getTableObjectsByName(testTables[0].getDbName(), tableNames); + client.getTableObjectsByName(testTables[0].getCatName(), testTables[0].getDbName(), tableNames); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists in the given database - tableNames = new ArrayList<String>(); + tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName(OTHER_DATABASE, tableNames); Assert.assertEquals("Found tables", 0, tables.size()); @@ -396,23 +403,24 @@ public class TestTablesGetExists extends MetaStoreClientTest { Table table = testTables[0]; // Test in upper case - List<String> tableNames = new ArrayList<String>(); + List<String> tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName().toUpperCase()); - List<Table> tables = client.getTableObjectsByName(table.getDbName().toUpperCase(), tableNames); + List<Table> tables = client.getTableObjectsByName(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); // Test in mixed case - tableNames = new ArrayList<String>(); + tableNames = new ArrayList<>(); tableNames.add("tEsT_TabLE"); - tables = client.getTableObjectsByName("DeFaUlt", tableNames); + tables = client.getTableObjectsByName("HiVe", "DeFaUlt", tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); } @Test(expected = UnknownDBException.class) public void testGetTableObjectsByNameNoSuchDatabase() throws Exception { - List<String> tableNames = new ArrayList<String>(); + List<String> tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName("no_such_database", tableNames); @@ -421,7 +429,7 @@ public class TestTablesGetExists extends MetaStoreClientTest { @Test public void testGetTableObjectsByNameNullDatabase() throws Exception { try { - List<String> tableNames = new ArrayList<String>(); + List<String> tableNames = new ArrayList<>(); tableNames.add(OTHER_DATABASE); client.getTableObjectsByName(null, tableNames); @@ -448,4 +456,55 @@ public class TestTablesGetExists extends MetaStoreClientTest { // Expected exception - Remote MetaStore } } + + // Tests for getTable in other catalogs are covered in TestTablesCreateDropAlterTruncate. + @Test + public void otherCatalog() throws TException { + String catName = "get_exists_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + Set<String> tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*")); + Assert.assertEquals(4, tables.size()); + for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName)); + + List<String> fetchedNames = client.getTables(catName, dbName, "*_3"); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[3], fetchedNames.get(0)); + + Assert.assertTrue("Table exists", client.tableExists(catName, dbName, tableNames[0])); + Assert.assertFalse("Table not exists", client.tableExists(catName, dbName, "non_existing_table")); + } + + @Test + public void getTablesBogusCatalog() throws TException { + Assert.assertEquals(0, client.getTables("nosuch", DEFAULT_DATABASE_NAME, "*_to_find_*").size()); + } + + @Test + public void tableExistsBogusCatalog() throws TException { + Assert.assertFalse(client.tableExists("nosuch", testTables[0].getDbName(), + testTables[0].getTableName())); + } } http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 7e4a59f..00e9104 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,6 +44,8 @@ import org.junit.runners.Parameterized; import java.util.List; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata * querying like getting one, or multiple tables, and table name lists. @@ -78,7 +86,7 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -88,7 +96,7 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Owner1") .setLastAccessTime(2000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() @@ -98,7 +106,7 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Owner2") .setLastAccessTime(1000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -108,7 +116,7 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Owner3") .setLastAccessTime(3000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() @@ -118,16 +126,16 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Tester") .setLastAccessTime(2500) .addTableParam("param1", "value4") - .build(); + .create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("filter_test_table_5") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[6] = new TableBuilder() @@ -137,16 +145,12 @@ public class TestTablesList extends MetaStoreClientTest { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -268,4 +272,45 @@ public class TestTablesList extends MetaStoreClientTest { public void testListTableNamesByFilterInvalidFilter() throws Exception { client.listTableNamesByFilter(DEFAULT_DATABASE, "invalid filter", (short)-1); } + + @Test + public void otherCatalogs() throws TException { + String catName = "list_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + if (i == 0) builder.addTableParam("the_key", "the_value"); + builder.create(client, metaStore.getConf()); + } + + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List<String> fetchedNames = client.listTableNamesByFilter(catName, dbName, filter, (short)-1); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[0], fetchedNames.get(0)); + } + + @Test(expected = UnknownDBException.class) + public void listTablesBogusCatalog() throws TException { + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List<String> fetchedNames = client.listTableNamesByFilter("", DEFAULT_DATABASE_NAME, + filter, (short)-1); + } } http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java new file mode 100644 index 0000000..8eb18ec --- /dev/null +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLUniqueConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestUniqueConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestUniqueConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List<SQLUniqueConstraint> fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String table0PkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addUniqueConstraint(uc); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List<SQLUniqueConstraint> fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcuc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List<SQLUniqueConstraint> fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List<SQLUniqueConstraint> fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String tablePkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List<SQLUniqueConstraint> fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + try { + uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List<SQLUniqueConstraint> uc = new SQLUniqueConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List<SQLUniqueConstraint> uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List<SQLUniqueConstraint> uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List<SQLUniqueConstraint> uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java index f2c8fe4..709085d 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java @@ -166,4 +166,8 @@ public abstract class AbstractMetaStoreService { */ public void stop() { } + + public Configuration getConf() { + return configuration; + } } http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java index 409ddc5..fa7057f 100644 --- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java +++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java @@ -99,7 +99,8 @@ public class TestSchemaToolForMetastore { // Test valid case String[] scripts = new String[] { "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into CTLGS values(37, 'mycat', 'my description', 'hdfs://tmp');", + "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; File scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -111,7 +112,7 @@ public class TestSchemaToolForMetastore { "delete from SEQUENCE_TABLE;", "delete from DBS;", "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -217,6 +218,7 @@ public class TestSchemaToolForMetastore { public void testSchemaInit() throws Exception { IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); + LOG.info("Starting testSchemaInit"); schemaTool.doInit(metastoreSchemaInfo.getHiveSchemaVersion()); schemaTool.verifySchemaVersion(); } @@ -296,11 +298,18 @@ public class TestSchemaToolForMetastore { System.setOut(outPrintStream); // Upgrade schema from 0.7.0 to latest - schemaTool.doUpgrade("1.2.0"); + Exception caught = null; + try { + schemaTool.doUpgrade("1.2.0"); + } catch (Exception e) { + caught = e; + } LOG.info("stdout is " + stdout.toString()); LOG.info("stderr is " + stderr.toString()); + if (caught != null) Assert.fail(caught.getMessage()); + // Verify that the schemaTool ran pre-upgrade scripts and ignored errors Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript)); Assert.assertTrue(stderr.toString().contains("foo")); @@ -329,8 +338,9 @@ public class TestSchemaToolForMetastore { // Test valid case String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'mydescription', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", @@ -357,10 +367,10 @@ public class TestSchemaToolForMetastore { "delete from TBLS;", "delete from SDS;", "delete from DBS;", - "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role');", - "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role');", + "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role', 'mycat');", + "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", @@ -457,7 +467,8 @@ public class TestSchemaToolForMetastore { // Insert the records in DB to simulate a hive table private void createTestHiveTableSchemas() throws IOException { String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'my description', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", http://git-wip-us.apache.org/repos/asf/hive/blob/ba8a99e1/standalone-metastore/src/test/resources/log4j2.properties ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/test/resources/log4j2.properties b/standalone-metastore/src/test/resources/log4j2.properties index db8a550..365687e 100644 --- a/standalone-metastore/src/test/resources/log4j2.properties +++ b/standalone-metastore/src/test/resources/log4j2.properties @@ -8,64 +8,28 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. -status = INFO -name = MetastoreLog4j2 -packages = org.apache.hadoop.hive.metastore +name=PropertiesConfig +property.filename = logs +appenders = console -# list of properties -property.metastore.log.level = INFO -property.metastore.root.logger = DRFA -property.metastore.log.dir = ${sys:java.io.tmpdir}/${sys:user.name} -property.metastore.log.file = metastore.log -property.hive.perflogger.log.level = INFO - -# list of all appenders -appenders = console, DRFA - -# console appender appender.console.type = Console -appender.console.name = console -appender.console.target = SYSTEM_ERR +appender.console.name = STDOUT appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n - -# daily rolling file appender -appender.DRFA.type = RollingRandomAccessFile -appender.DRFA.name = DRFA -appender.DRFA.fileName = ${sys:metastore.log.dir}/${sys:metastore.log.file} -# Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -appender.DRFA.filePattern = ${sys:metastore.log.dir}/${sys:metastore.log.file}.%d{yyyy-MM-dd} -appender.DRFA.layout.type = PatternLayout -appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n -appender.DRFA.policies.type = Policies -appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy -appender.DRFA.policies.time.interval = 1 -appender.DRFA.policies.time.modulate = true -appender.DRFA.strategy.type = DefaultRolloverStrategy -appender.DRFA.strategy.max = 30 - -# list of all loggers -loggers = DataNucleus, Datastore, JPOX, PerfLogger - -logger.DataNucleus.name = DataNucleus -logger.DataNucleus.level = INFO - -logger.Datastore.name = Datastore -logger.Datastore.level = INFO - -logger.JPOX.name = JPOX -logger.JPOX.level = INFO +appender.console.layout.pattern = [%-5level] %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %c{1} - %msg%n -logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger -logger.PerfLogger.level = ${sys:hive.perflogger.log.level} +loggers=file +logger.file.name=guru.springframework.blog.log4j2properties +logger.file.level = debug +logger.file.appenderRefs = file +logger.file.appenderRef.file.ref = LOGFILE -# root logger -rootLogger.level = ${sys:metastore.log.level} -rootLogger.appenderRefs = root -rootLogger.appenderRef.root.ref = ${sys:metastore.root.logger} +rootLogger.level = debug +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT
