HIVE-12036 : LLAP: merge master into branch (Sergey Shelukhin)
Project: http://git-wip-us.apache.org/repos/asf/hive/repo Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b28aec9a Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b28aec9a Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b28aec9a Branch: refs/heads/llap Commit: b28aec9a0f5e2507f05f9bb4c235bb6136bb5df2 Parents: c5ccf66 0ca9ff8 Author: Sergey Shelukhin <[email protected]> Authored: Mon Oct 5 12:42:39 2015 -0700 Committer: Sergey Shelukhin <[email protected]> Committed: Mon Oct 5 12:42:39 2015 -0700 ---------------------------------------------------------------------- .../apache/hive/beeline/HiveSchemaHelper.java | 4 +- .../hadoop/hive/common/StatsSetupConst.java | 8 - .../org/apache/hadoop/hive/conf/HiveConf.java | 20 +- .../hive/common/type/TestHiveDecimal.java | 12 +- data/files/decimal_1_1.txt | 30 + data/files/sample2.json | 2 + .../hadoop/hive/hbase/HBaseStatsAggregator.java | 128 - .../hadoop/hive/hbase/HBaseStatsPublisher.java | 154 - .../hive/hbase/HBaseStatsSetupConstants.java | 34 - .../hadoop/hive/hbase/HBaseStatsUtils.java | 135 - .../src/test/queries/positive/hbase_stats.q | 30 - .../src/test/queries/positive/hbase_stats2.q | 31 - .../positive/hbase_stats_empty_partition.q | 13 - .../src/test/results/positive/hbase_stats.q.out | 311 - .../test/results/positive/hbase_stats2.q.out | 311 - .../positive/hbase_stats_empty_partition.q.out | 63 - hcatalog/conf/proto-hive-site.xml | 2 +- .../templates/conf/hive-site.xml.template | 2 +- hcatalog/streaming/pom.xml | 7 + .../streaming/AbstractRecordWriter.java | 93 +- .../streaming/DelimitedInputWriter.java | 54 +- .../hcatalog/streaming/StrictJsonWriter.java | 46 +- .../hive/hcatalog/streaming/TestStreaming.java | 698 +- .../test/resources/testconfiguration.properties | 1 + .../hadoop/hive/metastore/HiveAlterHandler.java | 2 +- .../hadoop/hive/metastore/HiveMetaStore.java | 30 +- .../hive/metastore/HiveMetaStoreClient.java | 94 + .../hadoop/hive/metastore/IMetaStoreClient.java | 20 + .../hadoop/hive/metastore/MetaStoreUtils.java | 40 +- .../apache/hadoop/hive/metastore/Warehouse.java | 4 +- .../hive/metastore/hbase/HBaseReadWrite.java | 9 +- .../metastore/txn/CompactionTxnHandler.java | 36 +- .../metastore/AlternateFailurePreListener.java | 2 +- pom.xml | 3 +- ql/pom.xml | 3 + .../java/org/apache/hadoop/hive/ql/Driver.java | 2 +- .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 8 +- .../hive/ql/exec/tez/HashTableLoader.java | 25 +- .../hive/ql/exec/tez/HiveSplitGenerator.java | 4 +- .../hadoop/hive/ql/io/orc/OrcInputFormat.java | 366 +- .../hadoop/hive/ql/io/orc/ReaderImpl.java | 3 +- .../serde/ArrayWritableObjectInspector.java | 7 + .../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 3 +- .../hadoop/hive/ql/lockmgr/HiveLockObject.java | 6 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 48 +- .../apache/hadoop/hive/ql/metadata/Table.java | 5 +- .../hive/ql/optimizer/ConvertJoinMapJoin.java | 4 + .../optimizer/SortedDynPartitionOptimizer.java | 7 +- .../correlation/CorrelationUtilities.java | 33 - .../RewriteQueryUsingAggregateIndexCtx.java | 2 +- .../ql/parse/ColumnStatsSemanticAnalyzer.java | 2 +- .../hive/ql/parse/LoadSemanticAnalyzer.java | 12 - .../hive/ql/txn/AcidHouseKeeperService.java | 5 +- .../hadoop/hive/ql/txn/compactor/Cleaner.java | 3 +- .../hive/ql/txn/compactor/CompactorMR.java | 10 +- .../hive/ql/txn/compactor/CompactorThread.java | 9 +- .../hadoop/hive/ql/txn/compactor/Initiator.java | 5 +- .../hadoop/hive/ql/metadata/StringAppender.java | 2 +- .../hadoop/hive/ql/metadata/TestHive.java | 2 +- .../special_character_in_tabnames_1.q | 13 + ql/src/test/queries/clientpositive/cross_join.q | 8 + .../test/queries/clientpositive/decimal_1_1.q | 9 + .../dynpart_sort_opt_vectorization.q | 2 - .../clientpositive/dynpart_sort_optimization.q | 2 - .../clientpositive/dynpart_sort_optimization2.q | 2 - .../queries/clientpositive/load_non_hdfs_path.q | 6 + .../parquet_mixed_partition_formats2.q | 31 + .../special_character_in_tabnames_1.q | 1075 + .../special_character_in_tabnames_2.q | 40 + .../special_character_in_tabnames_1.q.out | 10 + .../results/clientpositive/cross_join.q.out | 196 + .../results/clientpositive/decimal_1_1.q.out | 104 + .../clientpositive/load_non_hdfs_path.q.out | 16 + .../parquet_mixed_partition_formats2.q.out | 99 + .../clientpositive/spark/cross_join.q.out | 211 + .../clientpositive/spark/decimal_1_1.q.out | 104 + .../special_character_in_tabnames_1.q.out | 19550 +++++++++++++++++ .../special_character_in_tabnames_2.q.out | 304 + .../results/clientpositive/tez/auto_join0.q.out | 1 - .../tez/auto_sortmerge_join_12.q.out | 1 - .../results/clientpositive/tez/cross_join.q.out | 187 + .../tez/cross_product_check_2.q.out | 6 - .../tez/dynamic_partition_pruning.q.out | 1 - .../vectorized_dynamic_partition_pruning.q.out | 1 - .../objectinspector/ObjectInspectorUtils.java | 29 + service/pom.xml | 6 + .../auth/LdapAuthenticationProviderImpl.java | 19 +- .../cli/operation/LogDivertAppender.java | 2 +- .../auth/TestLdapAtnProviderWithLdapServer.java | 215 + .../org/apache/hive/service/auth/ldapdata.ldif | 59 + .../hadoop/hive/common/type/HiveDecimal.java | 10 +- .../hive/ptest/execution/JIRAService.java | 115 +- .../org/apache/hive/ptest/execution/PTest.java | 11 +- .../hive/ptest/execution/TestCheckPhase.java | 77 + .../ptest/execution/TestTestCheckPhase.java | 91 + .../src/test/resources/HIVE-10761.6.patch | 2539 +++ .../src/test/resources/HIVE-11271.4.patch | 606 + .../ptest2/src/test/resources/HIVE-9377.1.patch | 25 + .../ptest2/src/test/resources/remove-test.patch | 33 + 99 files changed, 27175 insertions(+), 1646 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/itests/src/test/resources/testconfiguration.properties ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/pom.xml ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/pom.xml ---------------------------------------------------------------------- diff --cc ql/pom.xml index 99c22a3,587e2ee..83b9ebf --- a/ql/pom.xml +++ b/ql/pom.xml @@@ -709,7 -704,7 +709,8 @@@ <include>org.apache.hive:hive-common</include> <include>org.apache.hive:hive-exec</include> <include>org.apache.hive:hive-serde</include> + <include>org.apache.hive:hive-llap-client</include> + <include>org.apache.hive:hive-metastore</include> <include>com.esotericsoftware.kryo:kryo</include> <include>org.apache.parquet:parquet-hadoop-bundle</include> <include>org.apache.thrift:libthrift</include> http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java ---------------------------------------------------------------------- diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 8941db1,ef62a23..6ffec30 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@@ -108,10 -114,10 +114,10 @@@ import com.google.common.util.concurren * delete events have null for row. */ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>, - InputFormatChecker, VectorizedInputFormatInterface, + InputFormatChecker, VectorizedInputFormatInterface, LlapWrappableInputFormatInterface, AcidInputFormat<NullWritable, OrcStruct>, CombineHiveInputFormat.AvoidSplitCombination { - static enum SplitStrategyKind{ + static enum SplitStrategyKind { HYBRID, BI, ETL @@@ -811,7 -821,7 +823,8 @@@ this.file = this.fileWithId.getFileStatus(); this.blockSize = this.file.getBlockSize(); this.fileInfo = splitInfo.fileInfo; - locations = SHIMS.getLocationsWithOffset(fs, fileWithId.getFileStatus()); - locations = SHIMS.getLocationsWithOffset(fs, file); // TODO: potential DFS call ++ // TODO: potential DFS call ++ this.locations = SHIMS.getLocationsWithOffset(fs, fileWithId.getFileStatus()); this.isOriginal = splitInfo.isOriginal; this.deltas = splitInfo.deltas; this.hasBase = splitInfo.hasBase; @@@ -993,41 -1003,51 +1006,51 @@@ } private void populateAndCacheStripeDetails() throws IOException { - Reader orcReader = OrcFile.createReader(fileWithId.getFileStatus().getPath(), - OrcFile.readerOptions(context.conf).filesystem(fs)); + // Only create OrcReader if we are missing some information. - OrcProto.Footer footer; ++ List<OrcProto.ColumnStatistics> colStatsLocal; ++ List<OrcProto.Type> typesLocal; if (fileInfo != null) { stripes = fileInfo.stripeInfos; + stripeStats = fileInfo.stripeStats; fileMetaInfo = fileInfo.fileMetaInfo; - types = fileInfo.types; - metadata = fileInfo.metadata; - types = fileInfo.footer.getTypesList(); ++ typesLocal = types = fileInfo.types; ++ colStatsLocal = fileInfo.fileStats; writerVersion = fileInfo.writerVersion; - footer = fileInfo.footer; // For multiple runs, in case sendSplitsInFooter changes if (fileMetaInfo == null && context.footerInSplits) { + Reader orcReader = createOrcReader(); fileInfo.fileMetaInfo = ((ReaderImpl) orcReader).getFileMetaInfo(); - fileInfo.types = orcReader.getTypes(); - fileInfo.writerVersion = orcReader.getWriterVersion(); - assert fileInfo.metadata != null && fileInfo.footer != null ++ assert fileInfo.stripeStats != null && fileInfo.types != null + && fileInfo.writerVersion != null; - footer = fileInfo.footer; + // We assume that if we needed to create a reader, we need to cache it to meta cache. + // TODO: This will also needlessly overwrite it in local cache for now. + Context.footerCache.put(fileWithId.getFileId(), file, fileInfo.fileMetaInfo, orcReader); } } else { + Reader orcReader = createOrcReader(); stripes = orcReader.getStripes(); - metadata = orcReader.getMetadata(); -- types = orcReader.getTypes(); ++ typesLocal = types = orcReader.getTypes(); ++ colStatsLocal = orcReader.getOrcProtoFileStatistics(); writerVersion = orcReader.getWriterVersion(); + stripeStats = orcReader.getStripeStatistics(); fileMetaInfo = context.footerInSplits ? ((ReaderImpl) orcReader).getFileMetaInfo() : null; - footer = orcReader.getFooter(); if (context.cacheStripeDetails) { - // Populate into cache. - Context.footerCache.put(fileWithId.getFileStatus().getPath(), - new FileInfo(fileWithId.getFileStatus().getModificationTime(), - fileWithId.getFileStatus().getLen(), stripes, - stripeStats, types, fileMetaInfo, writerVersion)); + Long fileId = fileWithId.getFileId(); + Context.footerCache.put(fileId, file, fileMetaInfo, orcReader); } } includedCols = genIncludedColumns(types, context.conf, isOriginal); - projColsUncompressedSize = computeProjectionSize(orcReader, includedCols, isOriginal); - projColsUncompressedSize = computeProjectionSize(footer, includedCols, isOriginal); ++ projColsUncompressedSize = computeProjectionSize(typesLocal, colStatsLocal, includedCols, isOriginal); + } + + private Reader createOrcReader() throws IOException { + return OrcFile.createReader(file.getPath(), + OrcFile.readerOptions(context.conf).filesystem(fs)); } - private long computeProjectionSize(final Reader orcReader, final boolean[] includedCols, - final boolean isOriginal) { - private long computeProjectionSize( - OrcProto.Footer footer, final boolean[] includedCols, final boolean isOriginal) { ++ private long computeProjectionSize(List<OrcProto.Type> types, ++ List<OrcProto.ColumnStatistics> stats, boolean[] includedCols, boolean isOriginal) { final int rootIdx = getRootColumn(isOriginal); List<Integer> internalColIds = Lists.newArrayList(); if (includedCols != null) { @@@ -1037,7 -1057,7 +1060,7 @@@ } } } - return orcReader.getRawDataSizeFromColIndices(internalColIds); - return ReaderImpl.getRawDataSizeFromColIndices(internalColIds, footer); ++ return ReaderImpl.getRawDataSizeFromColIndices(internalColIds, types, stats); } } @@@ -1048,9 -1068,12 +1071,13 @@@ static List<OrcSplit> generateSplitsInfo(Configuration conf, int numSplits) throws IOException { - // use threads to resolve directories into splits + // Use threads to resolve directories into splits. + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED)) { + // Create HiveConf once, since this is expensive. + conf = new HiveConf(conf, OrcInputFormat.class); + } Context context = new Context(conf, numSplits); + boolean useFileIds = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS); List<OrcSplit> splits = Lists.newArrayList(); List<Future<AcidDirInfo>> pathFutures = Lists.newArrayList(); List<Future<List<OrcSplit>>> splitFutures = Lists.newArrayList(); @@@ -1137,26 -1163,28 +1166,29 @@@ * */ private static class FileInfo { - long modificationTime; - long size; - List<StripeInformation> stripeInfos; - FileMetaInfo fileMetaInfo; - List<StripeStatistics> stripeStats; - List<OrcProto.Type> types; + private final long modificationTime; + private final long size; + private final Long fileId; + private final List<StripeInformation> stripeInfos; - private ReaderImpl.FileMetaInfo fileMetaInfo; - private Metadata metadata; - private OrcProto.Footer footer; ++ private FileMetaInfo fileMetaInfo; ++ private List<StripeStatistics> stripeStats; ++ private List<OrcProto.ColumnStatistics> fileStats; ++ private List<OrcProto.Type> types; private OrcFile.WriterVersion writerVersion; -- FileInfo(long modificationTime, long size, -- List<StripeInformation> stripeInfos, - Metadata metadata, OrcProto.Footer footer, - ReaderImpl.FileMetaInfo fileMetaInfo, ++ FileInfo(long modificationTime, long size, List<StripeInformation> stripeInfos, + List<StripeStatistics> stripeStats, List<OrcProto.Type> types, - FileMetaInfo fileMetaInfo, - OrcFile.WriterVersion writerVersion) { ++ List<OrcProto.ColumnStatistics> fileStats, FileMetaInfo fileMetaInfo, + OrcFile.WriterVersion writerVersion, Long fileId) { this.modificationTime = modificationTime; this.size = size; + this.fileId = fileId; this.stripeInfos = stripeInfos; this.fileMetaInfo = fileMetaInfo; - this.metadata = metadata; - this.footer = footer; + this.stripeStats = stripeStats; + this.types = types; ++ this.fileStats = fileStats; this.writerVersion = writerVersion; } } @@@ -1516,5 -1544,186 +1548,187 @@@ bucket, validTxnList, new Reader.Options(), deltaDirectory); } + /** + * Represents footer cache. + */ + public interface FooterCache { + FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) throws IOException; + void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader) + throws IOException; + } + + /** Local footer cache using Guava. Stores convoluted Java objects. */ + private static class LocalCache implements FooterCache { + private Cache<Path, FileInfo> cache; + + public LocalCache(int numThreads, int cacheStripeDetailsSize) { + cache = CacheBuilder.newBuilder() + .concurrencyLevel(numThreads) + .initialCapacity(cacheStripeDetailsSize) + .maximumSize(cacheStripeDetailsSize) + .softValues() + .build(); + } + @Override + public FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) { + // TODO: should local cache also be by fileId? Preserve the original logic for now. + FileInfo[] result = new FileInfo[files.size()]; + int i = -1; + for (HdfsFileStatusWithId fileWithId : files) { + ++i; + FileStatus file = fileWithId.getFileStatus(); + Path path = file.getPath(); + Long fileId = fileWithId.getFileId(); + FileInfo fileInfo = cache.getIfPresent(path); + if (isDebugEnabled) { + LOG.debug("Info " + (fileInfo == null ? "not " : "") + "cached for path: " + path); + } + if (fileInfo == null) continue; + if ((fileId != null && fileInfo.fileId != null && fileId == fileInfo.fileId) + || (fileInfo.modificationTime == file.getModificationTime() && + fileInfo.size == file.getLen())) { + result[i] = fileInfo; + continue; + } + // Invalidate + cache.invalidate(path); + if (isDebugEnabled) { + LOG.debug("Meta-Info for : " + path + " changed. CachedModificationTime: " + + fileInfo.modificationTime + ", CurrentModificationTime: " + + file.getModificationTime() + ", CachedLength: " + fileInfo.size + + ", CurrentLength: " + file.getLen()); + } + } + return result; + } + + public void put(Path path, FileInfo fileInfo) { + cache.put(path, fileInfo); + } + + @Override + public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader) + throws IOException { + cache.put(file.getPath(), new FileInfo(file.getModificationTime(), file.getLen(), - orcReader.getStripes(), orcReader.getMetadata(), orcReader.getFooter(), fileMetaInfo, - orcReader.getWriterVersion(), fileId)); ++ orcReader.getStripes(), orcReader.getStripeStatistics(), orcReader.getTypes(), ++ orcReader.getOrcProtoFileStatistics(), fileMetaInfo, orcReader.getWriterVersion(), ++ fileId)); + } + } + + /** Metastore-based footer cache storing serialized footers. Also has a local cache. */ + public static class MetastoreCache implements FooterCache { + private final LocalCache localCache; + private boolean isWarnLogged = false; + private HiveConf conf; + + public MetastoreCache(LocalCache lc) { + localCache = lc; + } + + @Override + public FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) throws IOException { + // First, check the local cache. + FileInfo[] result = localCache.getAndValidate(files); + assert result.length == files.size(); + // This is an unfortunate consequence of batching/iterating thru MS results. + // TODO: maybe have a direct map call for small lists if this becomes a perf issue. + HashMap<Long, Integer> posMap = new HashMap<>(files.size()); + for (int i = 0; i < result.length; ++i) { + if (result[i] != null) continue; + HdfsFileStatusWithId file = files.get(i); + Long fileId = file.getFileId(); + if (fileId == null) { + if (!isWarnLogged || isDebugEnabled) { + LOG.warn("Not using metastore cache because fileId is missing: " + + file.getFileStatus().getPath()); + isWarnLogged = true; + } + continue; + } + posMap.put(fileId, i); + } + Iterator<Entry<Long, ByteBuffer>> iter = null; + Hive hive; + try { + hive = getHive(); + iter = hive.getFileMetadata(Lists.newArrayList(posMap.keySet()), conf).iterator(); + } catch (HiveException ex) { + throw new IOException(ex); + } + List<Long> corruptIds = null; + while (iter.hasNext()) { + Entry<Long, ByteBuffer> e = iter.next(); + int ix = posMap.get(e.getKey()); + assert result[ix] == null; + HdfsFileStatusWithId file = files.get(ix); + assert file.getFileId() == e.getKey(); + result[ix] = createFileInfoFromMs(file, e.getValue()); + if (result[ix] == null) { + if (corruptIds == null) { + corruptIds = new ArrayList<>(); + } + corruptIds.add(file.getFileId()); + } else { + localCache.put(file.getFileStatus().getPath(), result[ix]); + } + } + if (corruptIds != null) { + try { + hive.clearFileMetadata(corruptIds); + } catch (HiveException ex) { + LOG.error("Failed to clear corrupt cache data", ex); + } + } + return result; + } + + private Hive getHive() throws HiveException { + // TODO: we wish we could cache the Hive object, but it's not thread safe, and each + // threadlocal we "cache" would need to be reinitialized for every query. This is + // a huge PITA. Hive object will be cached internally, but the compat check will be + // done every time inside get(). + return Hive.getWithFastCheck(conf); + } + + private static FileInfo createFileInfoFromMs( + HdfsFileStatusWithId file, ByteBuffer bb) throws IOException { + FileStatus fs = file.getFileStatus(); + ReaderImpl.FooterInfo fi = null; + ByteBuffer original = bb.duplicate(); + try { + fi = ReaderImpl.extractMetaInfoFromFooter(bb, fs.getPath()); + } catch (Exception ex) { + byte[] data = new byte[original.remaining()]; + System.arraycopy(original.array(), original.arrayOffset() + original.position(), + data, 0, data.length); + String msg = "Failed to parse the footer stored in cache for file ID " + + file.getFileId() + " " + original + " [ " + Hex.encodeHexString(data) + " ]"; + LOG.error(msg, ex); + return null; + } - return new FileInfo(fs.getModificationTime(), fs.getLen(), fi.getStripes(), - fi.getMetadata(), fi.getFooter(), fi.getFileMetaInfo(), ++ return new FileInfo(fs.getModificationTime(), fs.getLen(), fi.getStripes(), fi.getMetadata(), ++ fi.getFooter().getTypesList(), fi.getFooter().getStatisticsList(), fi.getFileMetaInfo(), + fi.getFileMetaInfo().writerVersion, file.getFileId()); + } + + @Override + public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader) + throws IOException { + localCache.put(fileId, file, fileMetaInfo, orcReader); + if (fileId != null) { + try { + getHive().putFileMetadata(Lists.newArrayList(fileId), + Lists.newArrayList(((ReaderImpl)orcReader).getSerializedFileFooter())); + } catch (HiveException e) { + throw new IOException(e); + } + } + } + + public void configure(HiveConf queryConfig) { + this.conf = queryConfig; + } + } } http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hive/blob/b28aec9a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out ----------------------------------------------------------------------
