hive git commit: HIVE-20700 : Add config to diable rollup for druid tables (Nishant Bangarwa via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master c29038af9 -> 2ff9c5229 HIVE-20700 : Add config to diable rollup for druid tables (Nishant Bangarwa via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ff9c522 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ff9c522 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ff9c522 Branch: refs/heads/master Commit: 2ff9c5229d1470afab12258862f801cf14dc2a2d Parents: c29038a Author: Nishant Authored: Sat Oct 6 02:46:25 2018 +0530 Committer: Ashutosh Chauhan Committed: Tue Oct 9 19:56:06 2018 -0700 -- .../org/apache/hadoop/hive/conf/Constants.java | 1 + .../org/apache/hadoop/hive/conf/HiveConf.java | 1 + .../hive/druid/DruidStorageHandlerUtils.java| 4 .../clientpositive/druidmini_test_insert.q | 9 +++ .../druid/druidmini_test_insert.q.out | 25 5 files changed, 40 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/2ff9c522/common/src/java/org/apache/hadoop/hive/conf/Constants.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/Constants.java b/common/src/java/org/apache/hadoop/hive/conf/Constants.java index 4badfa3..1190679 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/Constants.java +++ b/common/src/java/org/apache/hadoop/hive/conf/Constants.java @@ -32,6 +32,7 @@ public class Constants { "org.apache.hadoop.hive.druid.io.DruidOutputFormat"; public static final String DRUID_DATA_SOURCE = "druid.datasource"; public static final String DRUID_SEGMENT_GRANULARITY = "druid.segment.granularity"; + public static final String DRUID_ROLLUP = "druid.rollup"; public static final String DRUID_QUERY_GRANULARITY = "druid.query.granularity"; public static final String DRUID_TARGET_SHARDS_PER_GRANULARITY = "druid.segment.targetShardsPerGranularity"; http://git-wip-us.apache.org/repos/asf/hive/blob/2ff9c522/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java -- diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index d0adc35..cc6239c 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2755,6 +2755,7 @@ public class HiveConf extends Configuration { HIVE_DRUID_BASE_PERSIST_DIRECTORY("hive.druid.basePersistDirectory", "", "Local temporary directory used to persist intermediate indexing state, will default to JVM system property java.io.tmpdir." ), +HIVE_DRUID_ROLLUP("hive.druid.rollup", true, "Whether to rollup druid rows or not."), DRUID_SEGMENT_DIRECTORY("hive.druid.storage.storageDirectory", "/druid/segments" , "druid deep storage location."), DRUID_METADATA_BASE("hive.druid.metadata.base", "druid", "Default prefix for metadata tables"), http://git-wip-us.apache.org/repos/asf/hive/blob/2ff9c522/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java index e67de89..c3e7e5d 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java @@ -827,12 +827,16 @@ public final class DruidStorageHandlerUtils { tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY) != null ? tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY) : HiveConf.getVar(configuration, HiveConf.ConfVars.HIVE_DRUID_INDEXING_GRANULARITY); +final boolean rollup = tableProperties.getProperty(Constants.DRUID_ROLLUP) != null ? + Boolean.parseBoolean(tableProperties.getProperty(Constants.DRUID_SEGMENT_GRANULARITY)): +HiveConf.getBoolVar(configuration, HiveConf.ConfVars.HIVE_DRUID_ROLLUP); return new UniformGranularitySpec( Granularity.fromString(segmentGranularity), Granularity.fromString( tableProperties.getProperty(Constants.DRUID_QUERY_GRANULARITY) == null ? "NONE" : tableProperties.getProperty(Constants.DRUID_QUERY_GRANULARITY)), +rollup, null ); } http://git-wip-us.apache.org/repos/asf/hive/blob/2ff9c522/ql/src/test/queries/clientpositive/druidmini_test_insert.q
hive git commit: HIVE-20698 : Add better message for NPE when inserting rows with null timestamp to druid (Nishant Bangarwa via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master 9d5222169 -> c29038af9 HIVE-20698 : Add better message for NPE when inserting rows with null timestamp to druid (Nishant Bangarwa via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c29038af Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c29038af Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c29038af Branch: refs/heads/master Commit: c29038af9bc237bc82b83abb4f1370017a8cd379 Parents: 9d52221 Author: Nishant Authored: Sat Oct 6 02:01:00 2018 +0530 Committer: Ashutosh Chauhan Committed: Tue Oct 9 19:37:36 2018 -0700 -- .../hadoop/hive/druid/serde/DruidSerDe.java | 8 +++-- .../hadoop/hive/druid/serde/TestDruidSerDe.java | 34 2 files changed, 40 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/c29038af/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java index 946a075..cf37e37 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/serde/DruidSerDe.java @@ -357,9 +357,13 @@ import static org.joda.time.format.ISODateTimeFormat.dateOptionalTimeParser; assert values.size() > granularityFieldIndex; Preconditions.checkArgument( fields.get(granularityFieldIndex).getFieldName().equals(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME)); -value.put(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME, + +Timestamp timestamp = ((TimestampObjectInspector) fields.get(granularityFieldIndex).getFieldObjectInspector()) - .getPrimitiveJavaObject(values.get(granularityFieldIndex)).toEpochMilli()); +.getPrimitiveJavaObject(values.get(granularityFieldIndex)); +Preconditions.checkNotNull(timestamp, "Timestamp column cannot have null value"); +value.put(Constants.DRUID_TIMESTAMP_GRANULARITY_COL_NAME, timestamp.toEpochMilli()); + if (values.size() == columns.length + 2) { // Then partition number if any. final int partitionNumPos = granularityFieldIndex + 1; http://git-wip-us.apache.org/repos/asf/hive/blob/c29038af/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java -- diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java index 8b6c890..acde239 100644 --- a/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java +++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java @@ -74,6 +74,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; import com.fasterxml.jackson.core.JsonParseException; @@ -90,6 +91,7 @@ import io.druid.query.Result; import io.druid.query.select.SelectResultValue; import io.druid.query.timeseries.TimeseriesResultValue; import io.druid.query.topn.TopNResultValue; +import org.junit.rules.ExpectedException; /** * Basic tests for Druid SerDe. The examples are taken from Druid 0.9.1.1 @@ -860,6 +862,38 @@ public class TestDruidSerDe { serializeObject(tbl, serDe, ROW_OBJECT, DRUID_WRITABLE); } + @Rule + public ExpectedException expectedEx = ExpectedException.none(); + + @Test + public void testDruidObjectSerializerwithNullTimestamp() + throws Exception { +// Create, initialize, and test the SerDe +DruidSerDe serDe = new DruidSerDe(); +Configuration conf = new Configuration(); +Properties tbl; +// Mixed source (all types) +tbl = createPropertiesSource(COLUMN_NAMES, COLUMN_TYPES); +SerDeUtils.initializeSerDe(serDe, conf, tbl, null); +Object[] row = new Object[] { +null, +new Text("dim1_val"), +new HiveCharWritable(new HiveChar("dim2_v", 6)), +new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)), +new DoubleWritable(10669.3D), +new FloatWritable(10669.45F), +new LongWritable(1113939), +new IntWritable(1112123), +new ShortWritable((short) 12), +new ByteWritable((byte) 0), +null // granularity +}; +expectedEx.expect(NullPointerException.class); +expectedEx.expectMessage("Timestamp column cannot have null value"); +// should fail as timestamp
hive git commit: HIVE-20686 : Sync query IDs between druid and Hive (Nishant Bangarwa via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master ddf765ecb -> 9d5222169 HIVE-20686 : Sync query IDs between druid and Hive (Nishant Bangarwa via Ashutosh Chauhan) Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9d522216 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9d522216 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9d522216 Branch: refs/heads/master Commit: 9d522216972598a38b6750eb9b5d4af1b79cd6ba Parents: ddf765e Author: Nishant Authored: Thu Oct 4 00:44:39 2018 +0530 Committer: Ashutosh Chauhan Committed: Tue Oct 9 19:35:26 2018 -0700 -- .../druid/io/DruidQueryBasedInputFormat.java| 21 +++- .../TestHiveDruidQueryBasedInputFormat.java | 8 2 files changed, 16 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/9d522216/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java index 9266fae..1c989c1 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hive.druid.io; -import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.JsonMappingException; import com.google.common.collect.Lists; import io.druid.java.util.http.client.Request; import io.druid.query.BaseQuery; @@ -111,6 +109,7 @@ public class DruidQueryBasedInputFormat extends InputFormat deserializedQuery = DruidStorageHandlerUtils.JSON_MAPPER.readValue( -druidQuery, BaseQuery.class); -return DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(deserializedQuery); + private static String withQueryId(String druidQuery, String queryId) throws IOException { +Query queryWithId = +DruidStorageHandlerUtils.JSON_MAPPER.readValue(druidQuery, BaseQuery.class).withId(queryId); +return DruidStorageHandlerUtils.JSON_MAPPER.writeValueAsString(queryWithId); } @Override http://git-wip-us.apache.org/repos/asf/hive/blob/9d522216/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java -- diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java index bb43d51..7da3a30 100644 --- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java +++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestHiveDruidQueryBasedInputFormat.java @@ -49,7 +49,7 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase { + "\"granularity\":\"DAY\"," + "\"aggregations\":[]," + "\"postAggregations\":[]," - + "\"context\":null}, [localhost:8082]}]"; + + "\"context\":{\"queryId\":\"\"}}, [localhost:8082]}]"; private static final String TOPN_QUERY = "{ \"queryType\": \"topN\", " @@ -86,7 +86,7 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase { + "\"aggregations\":[{\"type\":\"longSum\",\"name\":\"count\",\"fieldName\":\"count\",\"expression\":null}," + "{\"type\":\"doubleSum\",\"name\":\"some_metric\",\"fieldName\":\"some_metric\",\"expression\":null}]," + "\"postAggregations\":[]," - + "\"context\":null," + + "\"context\":{\"queryId\":\"\"}," + "\"descending\":false}, [localhost:8082]}]"; private static final String GROUP_BY_QUERY = @@ -119,7 +119,7 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase { + "\"having\":null," + "\"limitSpec\":{\"type\":\"default\",\"columns\":[{\"dimension\":\"country\",\"direction\":\"ascending\",\"dimensionOrder\":{\"type\":\"lexicographic\"}}," + "{\"dimension\":\"data_transfer\",\"direction\":\"ascending\",\"dimensionOrder\":{\"type\":\"lexicographic\"}}],\"limit\":5000}," - + "\"context\":null," + + "\"context\":{\"queryId\":\"\"}," + "\"descending\":false}, [localhost:8082]}]"; private static final String SELECT_QUERY = @@ -149,7 +149,7 @@ public class TestHiveDruidQueryBasedInputFormat extends TestCase { +
hive git commit: HIVE-20684 : Make compute stats work for Druid tables (Nishant Bangarwa via Ashutosh Chauhan)
Repository: hive Updated Branches: refs/heads/master b054174bb -> ddf765ecb HIVE-20684 : Make compute stats work for Druid tables (Nishant Bangarwa via Ashutosh Chauhan) test plan changes due to removal of virtual columns Signed-off-by: Ashutosh Chauhan Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ddf765ec Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ddf765ec Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ddf765ec Branch: refs/heads/master Commit: ddf765ecbf44888dee2ed95dc8474b4cca84fcd5 Parents: b054174 Author: Nishant Authored: Thu Oct 4 00:37:53 2018 +0530 Committer: Ashutosh Chauhan Committed: Tue Oct 9 19:32:02 2018 -0700 -- .../hive/druid/DruidStorageHandlerUtils.java| 3 +- .../druid/io/DruidQueryBasedInputFormat.java| 10 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 14 +- .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 14 +- .../ql/udf/generic/GenericUDAFComputeStats.java | 1 + .../clientpositive/druidmini_test_insert.q | 2 + .../druid/druidmini_expressions.q.out | 156 --- .../clientpositive/druid/druidmini_mv.q.out | 8 +- .../druid/druidmini_test_insert.q.out | 10 ++ .../llap/external_jdbc_table2.q.out | 18 +-- .../results/clientpositive/llap/sysdb.q.out | 2 +- .../PrimitiveObjectInspectorUtils.java | 4 + 12 files changed, 122 insertions(+), 120 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/ddf765ec/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java index b9eb367..e67de89 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandlerUtils.java @@ -672,13 +672,14 @@ public final class DruidStorageHandlerUtils { ); } - public static String createScanAllQuery(String dataSourceName) throws JsonProcessingException { + public static String createScanAllQuery(String dataSourceName, List columns) throws JsonProcessingException { final ScanQuery.ScanQueryBuilder scanQueryBuilder = ScanQuery.newScanQueryBuilder(); final List intervals = Arrays.asList(DEFAULT_INTERVAL); ScanQuery scanQuery = scanQueryBuilder .dataSource(dataSourceName) .resultFormat(ScanQuery.RESULT_FORMAT_COMPACTED_LIST) .intervals(new MultipleIntervalSegmentSpec(intervals)) +.columns(columns) .build(); return JSON_MAPPER.writeValueAsString(scanQuery); } http://git-wip-us.apache.org/repos/asf/hive/blob/ddf765ec/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java -- diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java index f5009a2..9266fae 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.druid.serde.DruidSelectQueryRecordReader; import org.apache.hadoop.hive.druid.serde.DruidTimeseriesQueryRecordReader; import org.apache.hadoop.hive.druid.serde.DruidTopNQueryRecordReader; import org.apache.hadoop.hive.druid.serde.DruidWritable; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.JobConf; @@ -114,6 +115,7 @@ public class DruidQueryBasedInputFormat extends InputFormat reader; final String druidQueryType = job.get(Constants.DRUID_QUERY_TYPE); if (druidQueryType == null) { - reader = new DruidSelectQueryRecordReader(); // By default + reader = new DruidScanQueryRecordReader(); // By default we use scan query as fallback. reader.initialize((HiveDruidSplit) split, job); return reader; } @@ -307,7 +309,7 @@ public class DruidQueryBasedInputFormat extends InputFormat reader = getDruidQueryReader(druidQueryType); http://git-wip-us.apache.org/repos/asf/hive/blob/ddf765ec/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
hive git commit: HIVE-20694: Additional unit tests for VectorizedOrcAcidRowBatchReader min max key evaluation (Saurabh Seth via Eugene Koifman)
Repository: hive Updated Branches: refs/heads/master 8b7043626 -> b054174bb HIVE-20694: Additional unit tests for VectorizedOrcAcidRowBatchReader min max key evaluation (Saurabh Seth via Eugene Koifman) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b054174b Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b054174b Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b054174b Branch: refs/heads/master Commit: b054174bb0eb8b692cafbb30194236fc75486e60 Parents: 8b70436 Author: Saurabh Seth Authored: Tue Oct 9 16:02:25 2018 -0700 Committer: Eugene Koifman Committed: Tue Oct 9 16:02:25 2018 -0700 -- .../TestVectorizedOrcAcidRowBatchReader.java| 380 +++ 1 file changed, 380 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/b054174b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index 0a499b1..0b26879 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -18,9 +18,11 @@ package org.apache.hadoop.hive.ql.io.orc; import java.io.File; +import java.util.ArrayList; import java.util.List; import java.util.Properties; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.ValidWriteIdList; @@ -43,9 +45,12 @@ import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Reporter; +import org.apache.orc.OrcConf; +import org.apache.orc.StripeInformation; import org.apache.orc.TypeDescription; import org.junit.Before; import org.junit.Test; @@ -67,6 +72,8 @@ public class TestVectorizedOrcAcidRowBatchReader { private Path root; private ObjectInspector inspector; private ObjectInspector originalInspector; + private ObjectInspector bigRowInspector; + private ObjectInspector bigOriginalRowInspector; public static class DummyRow { LongWritable field; @@ -110,6 +117,49 @@ public class TestVectorizedOrcAcidRowBatchReader { } } + /** + * A larger Dummy row that can be used to write multiple stripes. + */ + public static class BigRow { +BytesWritable field; +RecordIdentifier rowId; + +BigRow(byte[] val) { + field = new BytesWritable(val); +} + +BigRow(byte[] val, long rowId, long origTxn, int bucket) { + field = new BytesWritable(val); + bucket = BucketCodec.V1.encode(new AcidOutputFormat.Options(null).bucket(bucket)); + this.rowId = new RecordIdentifier(origTxn, bucket, rowId); +} + +static String getColumnNamesProperty() { + return "field"; +} +static String getColumnTypesProperty() { + return "binary"; +} + } + + /** + * A larger Dummy row for original files that can be used to write multiple stripes. + */ + public static class BigOriginalRow { +BytesWritable field; + +BigOriginalRow(byte[] val) { + field = new BytesWritable(val); +} + +static String getColumnNamesProperty() { + return "field"; +} +static String getColumnTypesProperty() { + return "binary"; +} + } + @Before public void setup() throws Exception { conf = new JobConf(); @@ -122,6 +172,7 @@ public class TestVectorizedOrcAcidRowBatchReader { conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, DummyRow.getColumnTypesProperty()); conf.setBoolean(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED.varname, true); conf.set(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "BI"); +OrcConf.ROWS_BETWEEN_CHECKS.setLong(conf, 1); Path workDir = new Path(System.getProperty("test.tmp.dir", "target" + File.separator + "test" + File.separator + "tmp")); @@ -135,6 +186,11 @@ public class TestVectorizedOrcAcidRowBatchReader { originalInspector = ObjectInspectorFactory.getReflectionObjectInspector(DummyOriginalRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + + bigRowInspector = ObjectInspectorFactory.getReflectionObjectInspector(BigRow.class, +
hive git commit: HIVE-20224: ReplChangeManager.java Remove Logging Guards (Morio Ramdenbourg, reviewed by Sergio Pena)
Repository: hive Updated Branches: refs/heads/master 2d2ab674f -> 8b7043626 HIVE-20224: ReplChangeManager.java Remove Logging Guards (Morio Ramdenbourg, reviewed by Sergio Pena) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8b704362 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8b704362 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8b704362 Branch: refs/heads/master Commit: 8b7043626f73fbf5a7c2c3f8db8413a67722fd42 Parents: 2d2ab67 Author: Sergio Pena Authored: Tue Oct 9 13:44:52 2018 -0500 Committer: Sergio Pena Committed: Tue Oct 9 13:44:52 2018 -0500 -- .../hive/metastore/ReplChangeManager.java | 20 +--- 1 file changed, 5 insertions(+), 15 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/8b704362/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java -- diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java index 893c9f4..b5fc994 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ReplChangeManager.java @@ -250,9 +250,7 @@ public class ReplChangeManager { count++; } else { -if (LOG.isDebugEnabled()) { - LOG.debug("A file with the same content of {} already exists, ignore", path.toString()); -} +LOG.debug("A file with the same content of {} already exists, ignore", path.toString()); // Need to extend the tenancy if we saw a newer file with the same content fs.setTimes(cmPath, now, -1); } @@ -366,9 +364,7 @@ public class ReplChangeManager { encodedUri = encodedUri + URI_FRAGMENT_SEPARATOR + URI_FRAGMENT_SEPARATOR; } encodedUri = encodedUri + URI_FRAGMENT_SEPARATOR + ((encodedSubDir != null) ? encodedSubDir : ""); -if (LOG.isDebugEnabled()) { - LOG.debug("Encoded URI: " + encodedUri); -} +LOG.debug("Encoded URI: " + encodedUri); return encodedUri; } @@ -391,9 +387,7 @@ public class ReplChangeManager { if ((uriAndFragment.length > 3) && !StringUtils.isEmpty(uriAndFragment[3])) { result[3] = uriAndFragment[3]; } -if (LOG.isDebugEnabled()) { - LOG.debug("Reading Encoded URI: " + result[0] + ":: " + result[1] + ":: " + result[2] + ":: " + result[3]); -} +LOG.debug("Reading Encoded URI: " + result[0] + ":: " + result[1] + ":: " + result[2] + ":: " + result[3]); return result; } @@ -432,18 +426,14 @@ public class ReplChangeManager { if (fs.getXAttrs(file.getPath()).containsKey(REMAIN_IN_TRASH_TAG)) { boolean succ = Trash.moveToAppropriateTrash(fs, file.getPath(), conf); if (succ) { - if (LOG.isDebugEnabled()) { -LOG.debug("Move " + file.toString() + " to trash"); - } + LOG.debug("Move " + file.toString() + " to trash"); } else { LOG.warn("Fail to move " + file.toString() + " to trash"); } } else { boolean succ = fs.delete(file.getPath(), false); if (succ) { - if (LOG.isDebugEnabled()) { -LOG.debug("Remove " + file.toString()); - } + LOG.debug("Remove " + file.toString()); } else { LOG.warn("Fail to remove " + file.toString()); }
hive git commit: HIVE-20648: LLAP: Vector group by operator should use memory per executor
Repository: hive Updated Branches: refs/heads/branch-3 b3a424bd0 -> 8703a3229 HIVE-20648: LLAP: Vector group by operator should use memory per executor Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8703a322 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8703a322 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8703a322 Branch: refs/heads/branch-3 Commit: 8703a3229e5a8d4afdd0e2ebd06579df40b01ed8 Parents: b3a424b Author: Prasanth Jayachandran Authored: Tue Oct 9 11:05:55 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Oct 9 11:06:28 2018 -0700 -- .../ql/exec/vector/VectorGroupByOperator.java | 24 +++-- .../exec/vector/TestVectorGroupByOperator.java | 96 2 files changed, 112 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/8703a322/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index 43f1162..4dfd179 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -33,6 +33,8 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.llap.LlapUtil; +import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.IConfigureJobConf; @@ -146,6 +148,7 @@ public class VectorGroupByOperator extends Operator private float memoryThreshold; + private boolean isLlap = false; /** * Interface for processing mode: global, hash, unsorted streaming, or group batch */ @@ -515,7 +518,7 @@ public class VectorGroupByOperator extends Operator aggregationBatchInfo.getAggregatorsFixedSize(); MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); - maxMemory = memoryMXBean.getHeapMemoryUsage().getMax(); + maxMemory = isLlap ? getConf().getMaxMemoryAvailable() : memoryMXBean.getHeapMemoryUsage().getMax(); memoryThreshold = conf.getMemoryThreshold(); // Tests may leave this unitialized, so better set it to 1 if (memoryThreshold == 0.0f) { @@ -525,13 +528,14 @@ public class VectorGroupByOperator extends Operator maxHashTblMemory = (int)(maxMemory * memoryThreshold); if (LOG.isDebugEnabled()) { -LOG.debug(String.format("maxMemory:%dMb (%d * %f) fixSize:%d (key:%d agg:%d)", -maxHashTblMemory/1024/1024, -maxMemory/1024/1024, -memoryThreshold, -fixedHashEntrySize, -keyWrappersBatch.getKeysFixedSize(), -aggregationBatchInfo.getAggregatorsFixedSize())); +LOG.debug("GBY memory limits - isLlap: {} maxMemory: {} ({} * {}) fixSize:{} (key:{} agg:{})", + isLlap, + LlapUtil.humanReadableByteCount(maxHashTblMemory), + LlapUtil.humanReadableByteCount(maxMemory), + memoryThreshold, + fixedHashEntrySize, + keyWrappersBatch.getKeysFixedSize(), + aggregationBatchInfo.getAggregatorsFixedSize()); } } @@ -975,6 +979,7 @@ public class VectorGroupByOperator extends Operator @Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); +isLlap = LlapProxy.isDaemon(); VectorExpression.doTransientInit(keyExpressions); List objectInspectors = new ArrayList(); @@ -1231,4 +1236,7 @@ public class VectorGroupByOperator extends Operator } } + public long getMaxMemory() { +return maxMemory; + } } http://git-wip-us.apache.org/repos/asf/hive/blob/8703a322/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java index ffdc410..e2a593f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java @@ -38,6 +38,8 @@ import java.util.Set; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf;
hive git commit: HIVE-20648: LLAP: Vector group by operator should use memory per executor
Repository: hive Updated Branches: refs/heads/master db04f3f9a -> 2d2ab674f HIVE-20648: LLAP: Vector group by operator should use memory per executor Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2d2ab674 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2d2ab674 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2d2ab674 Branch: refs/heads/master Commit: 2d2ab674f8acb8a4e1d0532790e6c27bd8553018 Parents: db04f3f Author: Prasanth Jayachandran Authored: Tue Oct 9 11:05:55 2018 -0700 Committer: Prasanth Jayachandran Committed: Tue Oct 9 11:05:55 2018 -0700 -- .../ql/exec/vector/VectorGroupByOperator.java | 24 +++-- .../exec/vector/TestVectorGroupByOperator.java | 96 2 files changed, 112 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/2d2ab674/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index a516d60..0d80c9e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -33,6 +33,8 @@ import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.llap.LlapUtil; +import org.apache.hadoop.hive.llap.io.api.LlapProxy; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.IConfigureJobConf; @@ -148,6 +150,7 @@ public class VectorGroupByOperator extends Operator private float memoryThreshold; + private boolean isLlap = false; /** * Interface for processing mode: global, hash, unsorted streaming, or group batch */ @@ -517,7 +520,7 @@ public class VectorGroupByOperator extends Operator aggregationBatchInfo.getAggregatorsFixedSize(); MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean(); - maxMemory = memoryMXBean.getHeapMemoryUsage().getMax(); + maxMemory = isLlap ? getConf().getMaxMemoryAvailable() : memoryMXBean.getHeapMemoryUsage().getMax(); memoryThreshold = conf.getMemoryThreshold(); // Tests may leave this unitialized, so better set it to 1 if (memoryThreshold == 0.0f) { @@ -527,13 +530,14 @@ public class VectorGroupByOperator extends Operator maxHashTblMemory = (int)(maxMemory * memoryThreshold); if (LOG.isDebugEnabled()) { -LOG.debug(String.format("maxMemory:%dMb (%d * %f) fixSize:%d (key:%d agg:%d)", -maxHashTblMemory/1024/1024, -maxMemory/1024/1024, -memoryThreshold, -fixedHashEntrySize, -keyWrappersBatch.getKeysFixedSize(), -aggregationBatchInfo.getAggregatorsFixedSize())); +LOG.debug("GBY memory limits - isLlap: {} maxMemory: {} ({} * {}) fixSize:{} (key:{} agg:{})", + isLlap, + LlapUtil.humanReadableByteCount(maxHashTblMemory), + LlapUtil.humanReadableByteCount(maxMemory), + memoryThreshold, + fixedHashEntrySize, + keyWrappersBatch.getKeysFixedSize(), + aggregationBatchInfo.getAggregatorsFixedSize()); } } @@ -977,6 +981,7 @@ public class VectorGroupByOperator extends Operator @Override protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); +isLlap = LlapProxy.isDaemon(); VectorExpression.doTransientInit(keyExpressions); List objectInspectors = new ArrayList(); @@ -1233,4 +1238,7 @@ public class VectorGroupByOperator extends Operator } } + public long getMaxMemory() { +return maxMemory; + } } http://git-wip-us.apache.org/repos/asf/hive/blob/2d2ab674/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java -- diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java index fe1375b..278f167 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java @@ -38,6 +38,8 @@ import java.util.Set; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf;
hive git commit: HIVE-20715: Disable test: udaf_histogram_numeric (Zoltan Haindrich reviewed by Jesus Camacho Rodriguez)
Repository: hive Updated Branches: refs/heads/master 717a15b38 -> db04f3f9a HIVE-20715: Disable test: udaf_histogram_numeric (Zoltan Haindrich reviewed by Jesus Camacho Rodriguez) Signed-off-by: Zoltan Haindrich Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/db04f3f9 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/db04f3f9 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/db04f3f9 Branch: refs/heads/master Commit: db04f3f9a8ecdacc27b5ddb55e1501e0498a1d9b Parents: 717a15b Author: Zoltan Haindrich Authored: Tue Oct 9 19:57:47 2018 +0200 Committer: Zoltan Haindrich Committed: Tue Oct 9 19:57:47 2018 +0200 -- .../main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java| 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hive/blob/db04f3f9/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java -- diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java index 507076c..11f87f4 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java @@ -59,6 +59,7 @@ public class CliConfigs { excludesFrom(testConfigProps, "erasurecoding.only.query.files"); excludeQuery("fouter_join_ppr.q"); // Disabled in HIVE-19509 +excludeQuery("udaf_histogram_numeric.q"); // disabled in HIVE-20715 setResultsDir("ql/src/test/results/clientpositive"); setLogDir("itests/qtest/target/qfile-results/clientpositive");
hive git commit: HIVE-20711: Race Condition when Multi-Threading in SessionState.createRootHDFSDir (Denys Kuzmenko, reviewed by Antal Skinkovics and Peter Vary)
Repository: hive Updated Branches: refs/heads/master 1e048df35 -> 717a15b38 HIVE-20711: Race Condition when Multi-Threading in SessionState.createRootHDFSDir (Denys Kuzmenko, reviewed by Antal Skinkovics and Peter Vary) Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/717a15b3 Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/717a15b3 Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/717a15b3 Branch: refs/heads/master Commit: 717a15b383e67c635870ec0369e77454541b994a Parents: 1e048df Author: denys kuzmenko Authored: Tue Oct 9 09:47:58 2018 +0200 Committer: Peter Vary Committed: Tue Oct 9 09:47:58 2018 +0200 -- .../ql/exec/spark/TestSparkSessionTimeout.java | 27 +--- .../apache/hadoop/hive/ql/exec/Utilities.java | 14 +++--- 2 files changed, 11 insertions(+), 30 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hive/blob/717a15b3/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java -- diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java index c887297..d8dd80a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/exec/spark/TestSparkSessionTimeout.java @@ -68,32 +68,7 @@ public class TestSparkSessionTimeout { HiveConf conf = new HiveConf(); conf.setBoolVar(HiveConf.ConfVars.SPARK_OPTIMIZE_SHUFFLE_SERDE, false); conf.set("spark.local.dir", Paths.get(System.getProperty("test.tmp.dir"), - "TestSparkSessionTimeout-testMultiSparkSessionTimeout-local-dir").toString()); - -SessionState.start(conf); - -runTestSparkSessionTimeout(conf); -return null; - })); -} -for (Future future : futures) { - future.get(); -} - } - - @Test - public void testMultiSparkSessionTimeout() throws ExecutionException, InterruptedException { -List> futures = new ArrayList<>(); -ExecutorService es = Executors.newFixedThreadPool(10); -for (int i = 0; i < 10; i++) { - futures.add(es.submit(() -> { -String confDir = "../../data/conf/spark/local/hive-site.xml"; -HiveConf.setHiveSiteLocation(new File(confDir).toURI().toURL()); - -HiveConf conf = new HiveConf(); -conf.setBoolVar(HiveConf.ConfVars.SPARK_OPTIMIZE_SHUFFLE_SERDE, false); -conf.set("spark.local.dir", Paths.get(System.getProperty("test.tmp.dir"), - "TestSparkSessionTimeout-testMultiSparkSessionTimeout-local-dir").toString()); + "TestSparkSessionTimeout-testMultiSessionSparkSessionTimeout-local-dir").toString()); SessionState.start(conf); http://git-wip-us.apache.org/repos/asf/hive/blob/717a15b3/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java -- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 76a30eb..10aa94e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -255,6 +255,9 @@ public final class Utilities { public static Random randGen = new Random(); + private static final Object INPUT_SUMMARY_LOCK = new Object(); + private static final Object ROOT_HDFS_DIR_LOCK = new Object(); + /** * ReduceField: * KEY: record key @@ -2317,8 +2320,6 @@ public final class Utilities { } } - private static final Object INPUT_SUMMARY_LOCK = new Object(); - /** * Returns the maximum number of executors required to get file information from several input locations. * It checks whether HIVE_EXEC_INPUT_LISTING_MAX_THREADS or DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX are > 1 @@ -4463,11 +4464,16 @@ public final class Utilities { public static void ensurePathIsWritable(Path rootHDFSDirPath, HiveConf conf) throws IOException { FsPermission writableHDFSDirPermission = new FsPermission((short)00733); FileSystem fs = rootHDFSDirPath.getFileSystem(conf); + if (!fs.exists(rootHDFSDirPath)) { - Utilities.createDirsWithPermission(conf, rootHDFSDirPath, writableHDFSDirPermission, true); + synchronized (ROOT_HDFS_DIR_LOCK) { +if (!fs.exists(rootHDFSDirPath)) { + Utilities.createDirsWithPermission(conf, rootHDFSDirPath, writableHDFSDirPermission, true); +} + } } FsPermission currentHDFSDirPermission =