Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java Tue Sep 23 19:30:11 2014 @@ -451,7 +451,11 @@ public class SessionHiveMetaStoreClient // Delete table data if (deleteData && !MetaStoreUtils.isExternalTable(table)) { try { - getWh().deleteDir(tablePath, true); + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } + getWh().deleteDir(tablePath, true, ifPurge); } catch (Exception err) { LOG.error("Failed to delete temp table directory: " + tablePath, err); // Forgive error
Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java Tue Sep 23 19:30:11 2014 @@ -670,10 +670,15 @@ public final class ConstantPropagateProc cppCtx.getOpToConstantExprs().put(op, constants); foldOperator(op, cppCtx); List<ExprNodeDesc> colList = op.getConf().getColList(); + List<String> columnNames = op.getConf().getOutputColumnNames(); + Map<String, ExprNodeDesc> columnExprMap = op.getColumnExprMap(); if (colList != null) { for (int i = 0; i < colList.size(); i++) { ExprNodeDesc newCol = foldExpr(colList.get(i), constants, cppCtx, op, 0, false); colList.set(i, newCol); + if (columnExprMap != null) { + columnExprMap.put(columnNames.get(i), newCol); + } } LOG.debug("New column list:(" + StringUtils.join(colList, " ") + ")"); } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java Tue Sep 23 19:30:11 2014 @@ -791,6 +791,7 @@ public class Vectorizer implements Physi boolean validateMapWorkOperator(Operator<? extends OperatorDesc> op, boolean isTez) { boolean ret = false; + LOG.info("Validating MapWork operator " + op.getType().name()); switch (op.getType()) { case MAPJOIN: if (op instanceof MapJoinOperator) { @@ -816,6 +817,7 @@ public class Vectorizer implements Physi break; case FILESINK: case LIMIT: + case EVENT: ret = true; break; default: @@ -827,6 +829,7 @@ public class Vectorizer implements Physi boolean validateReduceWorkOperator(Operator<? extends OperatorDesc> op) { boolean ret = false; + LOG.info("Validating ReduceWork operator " + op.getType().name()); switch (op.getType()) { case EXTRACT: ret = validateExtractOperator((ExtractOperator) op); @@ -840,7 +843,12 @@ public class Vectorizer implements Physi } break; case GROUPBY: - ret = validateGroupByOperator((GroupByOperator) op, true, true); + if (HiveConf.getBoolVar(physicalContext.getConf(), + HiveConf.ConfVars.HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED)) { + ret = validateGroupByOperator((GroupByOperator) op, true, true); + } else { + ret = false; + } break; case FILTER: ret = validateFilterOperator((FilterOperator) op); @@ -855,6 +863,7 @@ public class Vectorizer implements Physi ret = validateFileSinkOperator((FileSinkOperator) op); break; case LIMIT: + case EVENT: ret = true; break; default: @@ -1071,11 +1080,11 @@ public class Vectorizer implements Physi VectorizationContext vc = new ValidatorVectorizationContext(); if (vc.getVectorExpression(desc, mode) == null) { // TODO: this cannot happen - VectorizationContext throws in such cases. - LOG.info("getVectorExpression returned null"); + LOG.debug("getVectorExpression returned null"); return false; } } catch (Exception e) { - LOG.info("Failed to vectorize", e); + LOG.debug("Failed to vectorize", e); return false; } return true; @@ -1098,19 +1107,19 @@ public class Vectorizer implements Physi if (!supportedAggregationUdfs.contains(aggDesc.getGenericUDAFName().toLowerCase())) { return false; } - if (aggDesc.getParameters() != null) { - return validateExprNodeDesc(aggDesc.getParameters()); + if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters())) { + return false; } // See if we can vectorize the aggregation. try { VectorizationContext vc = new ValidatorVectorizationContext(); if (vc.getAggregatorExpression(aggDesc, isReduce) == null) { // TODO: this cannot happen - VectorizationContext throws in such cases. - LOG.info("getAggregatorExpression returned null"); + LOG.debug("getAggregatorExpression returned null"); return false; } } catch (Exception e) { - LOG.info("Failed to vectorize", e); + LOG.debug("Failed to vectorize", e); return false; } return true; @@ -1196,6 +1205,7 @@ public class Vectorizer implements Physi case REDUCESINK: case LIMIT: case EXTRACT: + case EVENT: vectorOp = OperatorFactory.getVectorOperator(op.getConf(), vContext); break; default: Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Sep 23 19:30:11 2014 @@ -847,7 +847,8 @@ public class DDLSemanticAnalyzer extends outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); } - DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists); + boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); + DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g Tue Sep 23 19:30:11 2014 @@ -263,7 +263,7 @@ searchCondition // INSERT INTO <table> (col1,col2,...) SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as Foo(a,b,c) valueRowConstructor : - LPAREN atomExpression (COMMA atomExpression)* RPAREN -> ^(TOK_VALUE_ROW atomExpression+) + LPAREN precedenceUnaryPrefixExpression (COMMA precedenceUnaryPrefixExpression)* RPAREN -> ^(TOK_VALUE_ROW precedenceUnaryPrefixExpression+) ; valuesTableConstructor Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Tue Sep 23 19:30:11 2014 @@ -479,8 +479,9 @@ import java.util.HashMap; xlateMap.put("KW_SUBQUERY", "SUBQUERY"); xlateMap.put("KW_REWRITE", "REWRITE"); xlateMap.put("KW_UPDATE", "UPDATE"); - xlateMap.put("KW_VALUES", "VALUES"); + xlateMap.put("KW_PURGE", "PURGE"); + // Operators xlateMap.put("DOT", "."); @@ -929,7 +930,7 @@ dropIndexStatement dropTableStatement @init { pushMsg("drop statement", state); } @after { popMsg(state); } - : KW_DROP KW_TABLE ifExists? tableName -> ^(TOK_DROPTABLE tableName ifExists?) + : KW_DROP KW_TABLE ifExists? tableName KW_PURGE? -> ^(TOK_DROPTABLE tableName ifExists? KW_PURGE?) ; alterStatement @@ -2237,7 +2238,7 @@ deleteStatement /*SET <columName> = (3 + col2)*/ columnAssignmentClause : - tableOrColumn EQUAL^ atomExpression + tableOrColumn EQUAL^ precedencePlusExpression ; /*SET col1 = 5, col2 = (4 + col4), ...*/ Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Tue Sep 23 19:30:11 2014 @@ -903,7 +903,8 @@ public class SemanticAnalyzer extends Ba return PlanUtils.stripQuotes(expr.getText()); case HiveParser.KW_FALSE: - return "FALSE"; + // UDFToBoolean casts any non-empty string to true, so set this to false + return ""; case HiveParser.KW_TRUE: return "TRUE"; @@ -911,6 +912,10 @@ public class SemanticAnalyzer extends Ba case HiveParser.MINUS: return "-" + unparseExprForValuesClause((ASTNode)expr.getChildren().get(0)); + case HiveParser.TOK_NULL: + // Hive's text input will translate this as a null + return "\\N"; + default: throw new SemanticException("Expression of type " + expr.getText() + " not supported in insert/values"); @@ -6353,6 +6358,9 @@ public class SemanticAnalyzer extends Ba // Check constraints on acid tables. This includes // * no insert overwrites // * no use of vectorization + // * turns off reduce deduplication optimization, as that sometimes breaks acid + // This method assumes you have already decided that this is an Acid write. Don't call it if + // that isn't true. private void checkAcidConstraints(QB qb, TableDesc tableDesc) throws SemanticException { String tableName = tableDesc.getTableName(); if (!qb.getParseInfo().isInsertIntoTable(tableName)) { @@ -6363,6 +6371,9 @@ public class SemanticAnalyzer extends Ba LOG.info("Turning off vectorization for acid write operation"); conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, false); } + LOG.info("Modifying config values for ACID write"); + conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, false); + conf.setBoolVar(ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES, true); } /** Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java Tue Sep 23 19:30:11 2014 @@ -343,8 +343,10 @@ public class UpdateDeleteSemanticAnalyze // Add the setRCols to the input list for (String colName : setRCols) { - columnAccessInfo.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), + if(columnAccessInfo != null) {//assuming this means we are not doing Auth + columnAccessInfo.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), colName); + } } } @@ -386,7 +388,7 @@ public class UpdateDeleteSemanticAnalyze setRCols.add(colName.getText()); } else if (node.getChildren() != null) { for (Node n : node.getChildren()) { - addSetRCols(node, setRCols); + addSetRCols((ASTNode)n, setRCols); } } } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java Tue Sep 23 19:30:11 2014 @@ -52,6 +52,7 @@ public class DropTableDesc extends DDLDe ArrayList<PartSpec> partSpecs; boolean expectView; boolean ifExists; + boolean ifPurge; boolean ignoreProtection; public DropTableDesc() { @@ -59,12 +60,14 @@ public class DropTableDesc extends DDLDe /** * @param tableName + * @param ifPurge */ - public DropTableDesc(String tableName, boolean expectView, boolean ifExists) { + public DropTableDesc(String tableName, boolean expectView, boolean ifExists, boolean ifPurge) { this.tableName = tableName; this.partSpecs = null; this.expectView = expectView; this.ifExists = ifExists; + this.ifPurge = ifPurge; this.ignoreProtection = false; } @@ -149,4 +152,19 @@ public class DropTableDesc extends DDLDe public void setIfExists(boolean ifExists) { this.ifExists = ifExists; } + + /** + * @return whether Purge was specified + */ + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @param ifPurge + * set whether Purge was specified + */ + public void setIfPurge(boolean ifPurge) { + this.ifPurge = ifPurge; + } } Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java (original) +++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java Tue Sep 23 19:30:11 2014 @@ -19,12 +19,13 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -305,15 +306,23 @@ public class TezWork extends AbstractOpe work.configureJobConf(jobConf); } String[] newTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); - if (oldTmpJars != null && (oldTmpJars.length != 0)) { - if (newTmpJars != null && (newTmpJars.length != 0)) { - String[] combinedTmpJars = new String[newTmpJars.length + oldTmpJars.length]; - System.arraycopy(oldTmpJars, 0, combinedTmpJars, 0, oldTmpJars.length); - System.arraycopy(newTmpJars, 0, combinedTmpJars, oldTmpJars.length, newTmpJars.length); - jobConf.setStrings(MR_JAR_PROPERTY, combinedTmpJars); + if (oldTmpJars != null || newTmpJars != null) { + String[] finalTmpJars; + if (oldTmpJars == null || oldTmpJars.length == 0) { + // Avoid a copy when oldTmpJars is null or empty + finalTmpJars = newTmpJars; + } else if (newTmpJars == null || newTmpJars.length == 0) { + // Avoid a copy when newTmpJars is null or empty + finalTmpJars = oldTmpJars; } else { - jobConf.setStrings(MR_JAR_PROPERTY, oldTmpJars); + // Both are non-empty, only copy now + finalTmpJars = new String[oldTmpJars.length + newTmpJars.length]; + System.arraycopy(oldTmpJars, 0, finalTmpJars, 0, oldTmpJars.length); + System.arraycopy(newTmpJars, 0, finalTmpJars, oldTmpJars.length, newTmpJars.length); } + + jobConf.setStrings(MR_JAR_PROPERTY, finalTmpJars); + return finalTmpJars; } return newTmpJars; } Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java Tue Sep 23 19:30:11 2014 @@ -26,6 +26,7 @@ import java.util.Random; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import org.apache.hadoop.hive.conf.HiveConf; public class TestTezSessionPool { @@ -157,4 +158,29 @@ public class TestTezSessionPool { } } } + + @Test + public void testCloseAndOpenDefault() throws Exception { + poolManager = new TestTezSessionPoolManager(); + TezSessionState session = Mockito.mock(TezSessionState.class); + Mockito.when(session.isDefault()).thenReturn(false); + + poolManager.closeAndOpen(session, conf); + + Mockito.verify(session).close(false); + Mockito.verify(session).open(conf, null); + } + + @Test + public void testCloseAndOpenWithResources() throws Exception { + poolManager = new TestTezSessionPoolManager(); + TezSessionState session = Mockito.mock(TezSessionState.class); + Mockito.when(session.isDefault()).thenReturn(false); + String[] extraResources = new String[] { "file:///tmp/foo.jar" }; + + poolManager.closeAndOpen(session, conf, extraResources); + + Mockito.verify(session).close(false); + Mockito.verify(session).open(conf, extraResources); + } } Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java Tue Sep 23 19:30:11 2014 @@ -30,9 +30,11 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.LinkedList; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -204,9 +206,10 @@ public class TestTezTask { @Test public void testSubmit() throws Exception { DAG dag = DAG.create("test"); - task.submit(conf, dag, path, appLr, sessionState, new LinkedList()); + task.submit(conf, dag, path, appLr, sessionState, Collections.<LocalResource> emptyList(), + new String[0], Collections.<String,LocalResource> emptyMap()); // validate close/reopen - verify(sessionState, times(1)).open(any(HiveConf.class)); + verify(sessionState, times(1)).open(any(HiveConf.class), any(String[].class)); verify(sessionState, times(1)).close(eq(false)); // now uses pool after HIVE-7043 verify(session, times(2)).submitDAG(any(DAG.class)); } @@ -216,4 +219,54 @@ public class TestTezTask { task.close(work, 0); verify(op, times(4)).jobClose(any(Configuration.class), eq(true)); } + + @Test + public void testExistingSessionGetsStorageHandlerResources() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List<LocalResource> resources = Collections.singletonList(res); + final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>(); + resMap.put("foo.jar", res); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + when(sessionState.isOpen()).thenReturn(true); + when(sessionState.hasResources(inputOutputJars)).thenReturn(false); + task.updateSession(sessionState, conf, path, inputOutputJars, resMap); + verify(session).addAppMasterLocalFiles(resMap); + } + + @Test + public void testExtraResourcesAddedToDag() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List<LocalResource> resources = Collections.singletonList(res); + final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>(); + resMap.put("foo.jar", res); + DAG dag = mock(DAG.class); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + when(sessionState.isOpen()).thenReturn(true); + when(sessionState.hasResources(inputOutputJars)).thenReturn(false); + task.addExtraResourcesToDag(sessionState, dag, inputOutputJars, resMap); + verify(dag).addTaskLocalFiles(resMap); + } + + @Test + public void testGetExtraLocalResources() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List<LocalResource> resources = Collections.singletonList(res); + final Map<String,LocalResource> resMap = new HashMap<String,LocalResource>(); + resMap.put("foo.jar", res); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + + assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars)); + } } Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java Tue Sep 23 19:30:11 2014 @@ -1633,7 +1633,7 @@ public class TestInputOutputFormat { assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0", combineSplit.getPath(bucket).toString()); assertEquals(0, combineSplit.getOffset(bucket)); - assertEquals(227, combineSplit.getLength(bucket)); + assertEquals(225, combineSplit.getLength(bucket)); } String[] hosts = combineSplit.getLocations(); assertEquals(2, hosts.length); Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java Tue Sep 23 19:30:11 2014 @@ -335,6 +335,104 @@ public class TestNewIntegerEncoding { } @Test + public void testDeltaOverflow() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{4513343538618202719l, 4513343538618202711l, + 2911390882471569739l, + -9181829309989854913l}; + List<Long> input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test + public void testDeltaOverflow2() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{Long.MAX_VALUE, 4513343538618202711l, + 2911390882471569739l, + Long.MIN_VALUE}; + List<Long> input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test + public void testDeltaOverflow3() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{-4513343538618202711l, -2911390882471569739l, -2, + Long.MAX_VALUE}; + List<Long> input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test public void testIntegerMin() throws Exception { ObjectInspector inspector; synchronized (TestOrcFile.class) { Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java Tue Sep 23 19:30:11 2014 @@ -1754,9 +1754,9 @@ public class TestOrcFile { stripe.getDataLength() < 5000); } // with HIVE-7832, the dictionaries will be disabled after writing the first - // stripe as there are too many distinct values. Hence only 3 stripes as + // stripe as there are too many distinct values. Hence only 4 stripes as // compared to 25 stripes in version 0.11 (above test case) - assertEquals(3, i); + assertEquals(4, i); assertEquals(2500, reader.getNumberOfRows()); } Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java Tue Sep 23 19:30:11 2014 @@ -17,15 +17,18 @@ */ package org.apache.hadoop.hive.ql.io.orc; -import org.junit.Test; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.InputStream; import java.math.BigInteger; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; +import org.junit.Test; + +import com.google.common.math.LongMath; public class TestSerializationUtils { @@ -112,6 +115,47 @@ public class TestSerializationUtils { SerializationUtils.readBigInteger(fromBuffer(buffer))); } + @Test + public void testSubtractionOverflow() { + // cross check results with Guava results below + SerializationUtils utils = new SerializationUtils(); + assertEquals(false, utils.isSafeSubtract(22222222222L, Long.MIN_VALUE)); + assertEquals(false, utils.isSafeSubtract(-22222222222L, Long.MAX_VALUE)); + assertEquals(false, utils.isSafeSubtract(Long.MIN_VALUE, Long.MAX_VALUE)); + assertEquals(true, utils.isSafeSubtract(-1553103058346370095L, 6553103058346370095L)); + assertEquals(true, utils.isSafeSubtract(0, Long.MAX_VALUE)); + assertEquals(true, utils.isSafeSubtract(Long.MIN_VALUE, 0)); + } + + @Test + public void testSubtractionOverflowGuava() { + try { + LongMath.checkedSubtract(22222222222L, Long.MIN_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + try { + LongMath.checkedSubtract(-22222222222L, Long.MAX_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + try { + LongMath.checkedSubtract(Long.MIN_VALUE, Long.MAX_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + assertEquals(-8106206116692740190L, + LongMath.checkedSubtract(-1553103058346370095L, 6553103058346370095L)); + assertEquals(-Long.MAX_VALUE, LongMath.checkedSubtract(0, Long.MAX_VALUE)); + assertEquals(Long.MIN_VALUE, LongMath.checkedSubtract(Long.MIN_VALUE, 0)); + } + public static void main(String[] args) throws Exception { TestSerializationUtils test = new TestSerializationUtils(); test.testDoubles(); Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java Tue Sep 23 19:30:11 2014 @@ -18,9 +18,12 @@ package org.apache.hadoop.hive.ql.metadata; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -28,7 +31,10 @@ import java.util.Map; import junit.framework.TestCase; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -63,6 +69,9 @@ public class TestHive extends TestCase { protected void setUp() throws Exception { super.setUp(); hiveConf = new HiveConf(this.getClass()); + // enable trash so it can be tested + hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30); + hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30); SessionState.start(hiveConf); try { hm = Hive.get(hiveConf); @@ -79,6 +88,9 @@ public class TestHive extends TestCase { protected void tearDown() throws Exception { try { super.tearDown(); + // disable trash + hiveConf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, 30); + hiveConf.setFloat(FS_TRASH_INTERVAL_KEY, 30); Hive.closeCurrent(); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); @@ -294,7 +306,7 @@ public class TestHive extends TestCase { try { String dbName = "db_for_testgettables"; String table1Name = "table1"; - hm.dropDatabase(dbName, true, true); + hm.dropDatabase(dbName, true, true, true); Database db = new Database(); db.setName(dbName); @@ -330,16 +342,92 @@ public class TestHive extends TestCase { // Drop all tables for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); } hm.dropDatabase(dbName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); - System.err.println("testGetTables() failed"); + System.err.println("testGetAndDropTables() failed"); throw e; } } + public void testDropTableTrash() throws Throwable { + try { + String dbName = "db_for_testdroptable"; + hm.dropDatabase(dbName, true, true, true); + + Database db = new Database(); + db.setName(dbName); + hm.createDatabase(db); + + List<String> ts = new ArrayList<String>(2); + String tableBaseName = "droptable"; + ts.add(tableBaseName + "1"); + ts.add(tableBaseName + "2"); + Table tbl1 = createTestTable(dbName, ts.get(0)); + hm.createTable(tbl1); + Table tbl2 = createTestTable(dbName, ts.get(1)); + hm.createTable(tbl2); + // test dropping tables and trash behavior + Table table1 = hm.getTable(dbName, ts.get(0)); + assertNotNull(table1); + assertEquals(ts.get(0), table1.getTableName()); + Path path1 = table1.getPath(); + FileSystem fs = path1.getFileSystem(hiveConf); + assertTrue(fs.exists(path1)); + // drop table and check that trash works + TrashPolicy tp = TrashPolicy.getInstance(hiveConf, fs, fs.getHomeDirectory()); + assertNotNull("TrashPolicy instance should not be null", tp); + assertTrue("TrashPolicy is not enabled for filesystem: " + fs.getUri(), tp.isEnabled()); + Path trashDir = tp.getCurrentTrashDir(); + assertNotNull("trash directory should not be null", trashDir); + Path trash1 = Path.mergePaths(trashDir, path1); + Path pathglob = trash1.suffix("*");; + FileStatus before[] = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(0)); + assertFalse(fs.exists(path1)); + FileStatus after[] = fs.globStatus(pathglob); + assertTrue("trash dir before and after DROP TABLE noPURGE are not different", + before.length != after.length); + + // drop a table without saving to trash by setting the purge option + Table table2 = hm.getTable(dbName, ts.get(1)); + assertNotNull(table2); + assertEquals(ts.get(1), table2.getTableName()); + Path path2 = table2.getPath(); + assertTrue(fs.exists(path2)); + Path trash2 = Path.mergePaths(trashDir, path2); + System.out.println("trashDir2 is " + trash2); + pathglob = trash2.suffix("*"); + before = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(1), true, true, true); // deleteData, ignoreUnknownTable, ifPurge + assertFalse(fs.exists(path2)); + after = fs.globStatus(pathglob); + Arrays.sort(before); + Arrays.sort(after); + assertEquals("trash dir before and after DROP TABLE PURGE are different", + before.length, after.length); + assertTrue("trash dir before and after DROP TABLE PURGE are different", + Arrays.equals(before, after)); + + // Drop all tables + for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); + hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); + } + hm.dropDatabase(dbName); + } catch (Throwable e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testDropTableTrash() failed"); + throw e; + } + } + + public void testPartition() throws Throwable { try { String tableName = "table_for_testpartition"; Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java Tue Sep 23 19:30:11 2014 @@ -84,6 +84,13 @@ public class TestHiveRemote extends Test } /** + * Cannot control trash in remote metastore, so skip this test + */ + @Override + public void testDropTableTrash() { + } + + /** * Finds a free port. * * @return a free port Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java Tue Sep 23 19:30:11 2014 @@ -24,6 +24,9 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +/** + * various Parser tests for INSERT/UPDATE/DELETE + */ public class TestIUD { private static HiveConf conf; @@ -102,6 +105,18 @@ public class TestIUD { ast.toStringTree()); } @Test + public void testUpdateWithWhereSingleSetExpr() throws ParseException { + ASTNode ast = parse("UPDATE src SET key = -3+(5*9)%8, val = cast(6.1 + c as INT), d = d - 1 WHERE value IS NULL"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE (TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= (TOK_TABLE_OR_COL key) (+ (- 3) (% (* 5 9) 8))) " + + "(= (TOK_TABLE_OR_COL val) (TOK_FUNCTION TOK_INT (+ 6.1 (TOK_TABLE_OR_COL c)))) " + + "(= (TOK_TABLE_OR_COL d) (- (TOK_TABLE_OR_COL d) 1))) " + + "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (TOK_TABLE_OR_COL value))))", + ast.toStringTree()); + } + @Test public void testUpdateWithWhereMultiSet() throws ParseException { ASTNode ast = parse("UPDATE src SET key = 3, value = 8 WHERE VALUE = 1230997"); Assert.assertEquals("AST doesn't match", @@ -207,13 +222,13 @@ public class TestIUD { } @Test public void testInsertIntoTableFromAnonymousTable() throws ParseException { - ASTNode ast = parse("insert into table page_view values(1,2),(3,4)"); + ASTNode ast = parse("insert into table page_view values(-1,2),(3,+4)"); Assert.assertEquals("AST doesn't match", "(TOK_QUERY " + "(TOK_FROM " + "(TOK_VIRTUAL_TABLE " + "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " + - "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW (- 1) 2) (TOK_VALUE_ROW 3 (+ 4))))) " + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", ast.toStringTree()); Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java Tue Sep 23 19:30:11 2014 @@ -198,7 +198,7 @@ public class TestUpdateDeleteSemanticAna @Test public void testInsertValues() throws Exception { try { - ReturnInfo rc = parseAndAnalyze("insert into table T values ('abc', 3), ('ghi', 5)", + ReturnInfo rc = parseAndAnalyze("insert into table T values ('abc', 3), ('ghi', null)", "testInsertValues"); LOG.info(explain((SemanticAnalyzer)rc.sem, rc.plan, rc.ast.dump())); Modified: hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java (original) +++ hive/branches/cbo/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java Tue Sep 23 19:30:11 2014 @@ -23,11 +23,16 @@ import java.util.List; import junit.framework.Assert; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; +import org.apache.hadoop.mapred.JobConf; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; public class TestTezWork { + private static final String MR_JAR_PROPERTY = "tmpjars"; private List<BaseWork> nodes; private TezWork work; @@ -156,4 +161,75 @@ public class TestTezWork { Assert.assertEquals(sorted.get(i), nodes.get(4-i)); } } + + @Test + public void testConfigureJars() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer<Void>() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo2.jar"); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsNoExtraJars() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsWithNull() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer<Void>() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.unset(MR_JAR_PROPERTY); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsStartingWithNull() throws Exception { + final JobConf conf = new JobConf(); + conf.unset(MR_JAR_PROPERTY); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer<Void>() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.setStrings(MR_JAR_PROPERTY, "file:///tmp/foo1.jar", "file:///tmp/foo2.jar"); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY)); + } } Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/acid_vectorization.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/acid_vectorization.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/acid_vectorization.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/acid_vectorization.q Tue Sep 23 19:30:11 2014 @@ -4,7 +4,6 @@ set hive.input.format=org.apache.hadoop. set hive.enforce.bucketing=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.vectorized.execution.enabled=true; -set hive.mapred.supports.subdirectories=true; CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC; insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.exec.reducers.max = 1; create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/delete_all_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/delete_where_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/delete_whole_partition.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/delete_whole_partition.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/delete_whole_partition.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/delete_whole_partition.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/insert_update_delete.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/insert_update_delete.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/insert_update_delete.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/insert_update_delete.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q Tue Sep 23 19:30:11 2014 @@ -3,7 +3,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table ivdp(i int, de decimal(5,2), Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q Tue Sep 23 19:30:11 2014 @@ -12,12 +12,14 @@ create table acid_ivnp(ti tinyint, de decimal(5,2), t timestamp, dt date, + b boolean, s string, vc varchar(128), ch char(12)) clustered by (i) into 2 buckets stored as orc; insert into table acid_ivnp values - (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'), - (3, 25, 6553, 429496729, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', 'its fleece was white as snow', 'a pocket full of posies', 'blue' ); + (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'), + (null, null, null, null, null, null, null, null, null, null, null, null, null), + (3, 25, 6553, null, 0.14, 1923.141592654, 1.2301, '2014-08-24 17:21:30.0', '2014-08-26', false, 'its fleece was white as snow', 'a pocket full of posies', 'blue' ); -select ti, si, i, bi, f, d, de, t, dt, s, vc, ch from acid_ivnp order by ti; +select * from acid_ivnp order by ti; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.exec.dynamic.partition.mode=nonstrict; create table acid_ivp(ti tinyint, si smallint, Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_tmp_table.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_tmp_table.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_tmp_table.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/insert_values_tmp_table.q Tue Sep 23 19:30:11 2014 @@ -7,6 +7,7 @@ create temporary table acid_ivtt(i int, insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow'); + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test'); select i, de, vc from acid_ivtt order by i; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q Tue Sep 23 19:30:11 2014 @@ -3,7 +3,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_uami(i int, de decimal(5,2), Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_types.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_types.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_types.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/update_all_types.q Tue Sep 23 19:30:11 2014 @@ -53,4 +53,11 @@ update acid_uat set select * from acid_uat order by i; +update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624'; + +select * from acid_uat order by i; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_partitioned.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_partitioned.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_partitioned.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/update_where_partitioned.q Tue Sep 23 19:30:11 2014 @@ -2,7 +2,6 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -set hive.mapred.supports.subdirectories=true; create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/vectorization_0.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/vectorization_0.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/vectorization_0.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/vectorization_0.q Tue Sep 23 19:30:11 2014 @@ -1,4 +1,180 @@ SET hive.vectorized.execution.enabled=true; + +-- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))); + SELECT AVG(cbigint), (-(AVG(cbigint))), (-6432 + AVG(cbigint)), Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_date_funcs.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_date_funcs.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_date_funcs.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_date_funcs.q Tue Sep 23 19:30:11 2014 @@ -122,4 +122,20 @@ SELECT FROM date_udf_flight_orc LIMIT 10; -- Test extracting the date part of expression that includes time -SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; \ No newline at end of file +SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; + +EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; + +SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; \ No newline at end of file Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q (original) +++ hive/branches/cbo/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q Tue Sep 23 19:30:11 2014 @@ -1,6 +1,7 @@ -SET hive.vectorized.execution.enabled = true; - -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC; @@ -11,6 +12,8 @@ SELECT FROM alltypesorc LIMIT 40; +SET hive.vectorized.execution.enabled = true; + CREATE TABLE alltypesorc_wrong(stimestamp1 string) STORED AS ORC; INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1; @@ -122,3 +125,48 @@ SELECT second(stimestamp1) FROM alltypesorc_wrong ORDER BY c1; + +EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +-- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; + +SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; \ No newline at end of file Modified: hive/branches/cbo/ql/src/test/resources/orc-file-dump-dictionary-threshold.out URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/resources/orc-file-dump-dictionary-threshold.out?rev=1627125&r1=1627124&r2=1627125&view=diff ============================================================================== --- hive/branches/cbo/ql/src/test/resources/orc-file-dump-dictionary-threshold.out (original) +++ hive/branches/cbo/ql/src/test/resources/orc-file-dump-dictionary-threshold.out Tue Sep 23 19:30:11 2014 @@ -38,7 +38,7 @@ File Statistics: Column 3: count: 21000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 6910238 Stripes: - Stripe: offset: 3 data: 151109 rows: 5000 tail: 68 index: 704 + Stripe: offset: 3 data: 151108 rows: 5000 tail: 68 index: 704 Stream: column 0 section ROW_INDEX start: 3 length 15 Stream: column 1 section ROW_INDEX start: 18 length 156 Stream: column 2 section ROW_INDEX start: 174 length 172 @@ -46,7 +46,7 @@ Stripes: Stream: column 1 section DATA start: 707 length 20029 Stream: column 2 section DATA start: 20736 length 40035 Stream: column 3 section DATA start: 60771 length 86757 - Stream: column 3 section LENGTH start: 147528 length 4288 + Stream: column 3 section LENGTH start: 147528 length 4287 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -65,19 +65,19 @@ Stripes: Entry 4: count: 1000 min: -9216505819108477308 max: 9196474183833079923 positions: 20006,8686,416 Row group index column 3: Entry 0: count: 1000 min: Darkness,-230 max: worst-54-290-346-648-908-996 positions: 0,0,0,0,0 - Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,696,18 - Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1555,14 - Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2373,90 - Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3355,108 - Stripe: offset: 151884 data: 336358 rows: 5000 tail: 69 index: 941 - Stream: column 0 section ROW_INDEX start: 151884 length 15 - Stream: column 1 section ROW_INDEX start: 151899 length 150 - Stream: column 2 section ROW_INDEX start: 152049 length 167 - Stream: column 3 section ROW_INDEX start: 152216 length 609 - Stream: column 1 section DATA start: 152825 length 20029 - Stream: column 2 section DATA start: 172854 length 40035 - Stream: column 3 section DATA start: 212889 length 270789 - Stream: column 3 section LENGTH start: 483678 length 5505 + Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,695,18 + Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1554,14 + Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2372,90 + Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3354,108 + Stripe: offset: 151883 data: 336358 rows: 5000 tail: 69 index: 941 + Stream: column 0 section ROW_INDEX start: 151883 length 15 + Stream: column 1 section ROW_INDEX start: 151898 length 150 + Stream: column 2 section ROW_INDEX start: 152048 length 167 + Stream: column 3 section ROW_INDEX start: 152215 length 609 + Stream: column 1 section DATA start: 152824 length 20029 + Stream: column 2 section DATA start: 172853 length 40035 + Stream: column 3 section DATA start: 212888 length 270789 + Stream: column 3 section LENGTH start: 483677 length 5505 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -100,15 +100,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988 positions: 80822,9756,0,1945,222 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984 positions: 137149,4496,0,3268,48 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938 positions: 197972,6590,0,4064,342 - Stripe: offset: 489252 data: 558031 rows: 5000 tail: 69 index: 1169 - Stream: column 0 section ROW_INDEX start: 489252 length 15 - Stream: column 1 section ROW_INDEX start: 489267 length 159 - Stream: column 2 section ROW_INDEX start: 489426 length 169 - Stream: column 3 section ROW_INDEX start: 489595 length 826 - Stream: column 1 section DATA start: 490421 length 20029 - Stream: column 2 section DATA start: 510450 length 40035 - Stream: column 3 section DATA start: 550485 length 492258 - Stream: column 3 section LENGTH start: 1042743 length 5709 + Stripe: offset: 489251 data: 558031 rows: 5000 tail: 69 index: 1169 + Stream: column 0 section ROW_INDEX start: 489251 length 15 + Stream: column 1 section ROW_INDEX start: 489266 length 159 + Stream: column 2 section ROW_INDEX start: 489425 length 169 + Stream: column 3 section ROW_INDEX start: 489594 length 826 + Stream: column 1 section DATA start: 490420 length 20029 + Stream: column 2 section DATA start: 510449 length 40035 + Stream: column 3 section DATA start: 550484 length 492258 + Stream: column 3 section LENGTH start: 1042742 length 5709 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -131,15 +131,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976 positions: 170641,3422,0,2077,16 2 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13 246-13502-13766 positions: 268420,9960,0,3369,16 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12 782-12790-12802-12976-13216-13246-13502-13766-14454-14974 positions: 377916,1620,0,4041,470 - Stripe: offset: 1048521 data: 792850 rows: 5000 tail: 69 index: 1351 - Stream: column 0 section ROW_INDEX start: 1048521 length 15 - Stream: column 1 section ROW_INDEX start: 1048536 length 149 - Stream: column 2 section ROW_INDEX start: 1048685 length 170 - Stream: column 3 section ROW_INDEX start: 1048855 length 1017 - Stream: column 1 section DATA start: 1049872 length 20029 - Stream: column 2 section DATA start: 1069901 length 40035 - Stream: column 3 section DATA start: 1109936 length 727038 - Stream: column 3 section LENGTH start: 1836974 length 5748 + Stripe: offset: 1048520 data: 792850 rows: 5000 tail: 69 index: 1351 + Stream: column 0 section ROW_INDEX start: 1048520 length 15 + Stream: column 1 section ROW_INDEX start: 1048535 length 149 + Stream: column 2 section ROW_INDEX start: 1048684 length 170 + Stream: column 3 section ROW_INDEX start: 1048854 length 1017 + Stream: column 1 section DATA start: 1049871 length 20029 + Stream: column 2 section DATA start: 1069900 length 40035 + Stream: column 3 section DATA start: 1109935 length 727038 + Stream: column 3 section LENGTH start: 1836973 length 5748 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -162,15 +162,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9 938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878 positions: 263111,206,0,1926,462 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-93 44-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788 positions: 407371,8480,0,3444,250 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8 620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904 positions: 562094,3058,0,4643,292 - Stripe: offset: 1842791 data: 188033 rows: 1000 tail: 67 index: 832 - Stream: column 0 section ROW_INDEX start: 1842791 length 10 - Stream: column 1 section ROW_INDEX start: 1842801 length 36 - Stream: column 2 section ROW_INDEX start: 1842837 length 39 - Stream: column 3 section ROW_INDEX start: 1842876 length 747 - Stream: column 1 section DATA start: 1843623 length 4007 - Stream: column 2 section DATA start: 1847630 length 8007 - Stream: column 3 section DATA start: 1855637 length 174759 - Stream: column 3 section LENGTH start: 2030396 length 1260 + Stripe: offset: 1842790 data: 188033 rows: 1000 tail: 67 index: 832 + Stream: column 0 section ROW_INDEX start: 1842790 length 10 + Stream: column 1 section ROW_INDEX start: 1842800 length 36 + Stream: column 2 section ROW_INDEX start: 1842836 length 39 + Stream: column 3 section ROW_INDEX start: 1842875 length 747 + Stream: column 1 section DATA start: 1843622 length 4007 + Stream: column 2 section DATA start: 1847629 length 8007 + Stream: column 3 section DATA start: 1855636 length 174759 + Stream: column 3 section LENGTH start: 2030395 length 1260 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -182,6 +182,6 @@ Stripes: Row group index column 3: Entry 0: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952- 7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 positions: 0,0,0,0,0 -File length: 2033559 bytes +File length: 2033557 bytes Padding length: 0 bytes Padding ratio: 0%
