This is an automated email from the ASF dual-hosted git repository.
kunalkapoor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new f5e35cd [CARBONDATA-4097] ColumnVectors should not be initialized as
ColumnVectorWrapperDirect for alter tables
f5e35cd is described below
commit f5e35cd039109af3bbadd2375205e3c608052027
Author: Karan980 <[email protected]>
AuthorDate: Tue Dec 22 18:51:23 2020 +0530
[CARBONDATA-4097] ColumnVectors should not be initialized as
ColumnVectorWrapperDirect for alter tables
Why is this PR needed?
Direct filling of column vectors is not allowed for alter tables,
But its column vectors were getting initialized as
ColumnVectorWrapperDirect.
What changes were proposed in this PR?
Changed the initialization of column vectors to ColumnVectorWrapper
for alter tables.
This closes #4062
---
.../scan/executor/impl/AbstractQueryExecutor.java | 4 ++--
.../core/scan/executor/util/RestructureUtil.java | 7 +++++--
.../scan/executor/util/RestructureUtilTest.java | 8 ++++++--
.../vectorreader/AddColumnTestCases.scala | 23 ++++++++++++++++++++++
4 files changed, 36 insertions(+), 6 deletions(-)
diff --git
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 3822f3e..72e3b31 100644
---
a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++
b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -445,7 +445,7 @@ public abstract class AbstractQueryExecutor<E> implements
QueryExecutor<E> {
blockExecutionInfo.getActualQueryDimensions(),
segmentProperties.getDimensions(),
segmentProperties.getComplexDimensions(),
blockExecutionInfo.getActualQueryMeasures().length,
- queryModel.getTable().getTableInfo().isTransactionalTable());
+ queryModel.getTable().getTableInfo().isTransactionalTable(),
queryModel);
boolean isStandardTable =
CarbonUtil.isStandardCarbonTable(queryModel.getTable());
String blockId = CarbonUtil
.getBlockId(queryModel.getAbsoluteTableIdentifier(), filePath,
segment.getSegmentNo(),
@@ -461,7 +461,7 @@ public abstract class AbstractQueryExecutor<E> implements
QueryExecutor<E> {
List<ProjectionMeasure> projectionMeasures = RestructureUtil
.createMeasureInfoAndGetCurrentBlockQueryMeasures(blockExecutionInfo,
blockExecutionInfo.getActualQueryMeasures(),
segmentProperties.getMeasures(),
- queryModel.getTable().getTableInfo().isTransactionalTable());
+ queryModel.getTable().getTableInfo().isTransactionalTable(),
queryModel);
blockExecutionInfo.setProjectionMeasures(
projectionMeasures.toArray(new
ProjectionMeasure[projectionMeasures.size()]));
blockExecutionInfo.setDataBlock(blockIndex);
diff --git
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
index d08ff1b..2f2cdfe 100644
---
a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
+++
b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
@@ -67,7 +67,7 @@ public class RestructureUtil {
public static List<ProjectionDimension>
createDimensionInfoAndGetCurrentBlockQueryDimension(
BlockExecutionInfo blockExecutionInfo, ProjectionDimension[]
queryDimensions,
List<CarbonDimension> tableBlockDimensions, List<CarbonDimension>
tableComplexDimension,
- int measureCount, boolean isTransactionalTable) {
+ int measureCount, boolean isTransactionalTable, QueryModel queryModel) {
List<ProjectionDimension> presentDimension =
new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
boolean[] isDimensionExists = new boolean[queryDimensions.length];
@@ -133,6 +133,7 @@ public class RestructureUtil {
if (!isDimensionExists[dimIndex]) {
defaultValues[dimIndex] =
validateAndGetDefaultValue(queryDimension.getDimension());
blockExecutionInfo.setRestructuredBlock(true);
+ queryModel.setDirectVectorFill(false);
// set the flag to say whether a new dictionary column or no
dictionary column
// has been added. This will be useful after restructure for
compaction scenarios where
// newly added columns data need to be filled
@@ -401,7 +402,8 @@ public class RestructureUtil {
*/
public static List<ProjectionMeasure>
createMeasureInfoAndGetCurrentBlockQueryMeasures(
BlockExecutionInfo blockExecutionInfo, ProjectionMeasure[] queryMeasures,
- List<CarbonMeasure> currentBlockMeasures, boolean isTransactionalTable) {
+ List<CarbonMeasure> currentBlockMeasures, boolean isTransactionalTable,
+ QueryModel queryModel) {
MeasureInfo measureInfo = new MeasureInfo();
List<ProjectionMeasure> presentMeasure = new
ArrayList<>(queryMeasures.length);
int numberOfMeasureInQuery = queryMeasures.length;
@@ -435,6 +437,7 @@ public class RestructureUtil {
queryMeasure.getMeasure().getDefaultValue());
measureDataTypes[index] = queryMeasure.getMeasure().getDataType();
blockExecutionInfo.setRestructuredBlock(true);
+ queryModel.setDirectVectorFill(false);
}
index++;
}
diff --git
a/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
b/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
index ca44396..3399d76 100644
---
a/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
+++
b/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
@@ -24,6 +24,7 @@ import java.util.UUID;
import org.apache.carbondata.core.metadata.datatype.DataTypes;
import org.apache.carbondata.core.metadata.encoder.Encoding;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
@@ -31,6 +32,7 @@ import
org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
import org.apache.carbondata.core.scan.executor.infos.MeasureInfo;
import org.apache.carbondata.core.scan.model.ProjectionDimension;
import org.apache.carbondata.core.scan.model.ProjectionMeasure;
+import org.apache.carbondata.core.scan.model.QueryModel;
import org.junit.Test;
@@ -93,7 +95,8 @@ public class RestructureUtilTest {
List<ProjectionDimension> result = null;
result = RestructureUtil
.createDimensionInfoAndGetCurrentBlockQueryDimension(blockExecutionInfo,
queryDimensions,
- tableBlockDimensions, tableComplexDimensions,
queryMeasures.size(), true);
+ tableBlockDimensions, tableComplexDimensions,
queryMeasures.size(), true,
+ QueryModel.newInstance(new CarbonTable()));
List<CarbonDimension> resultDimension = new ArrayList<>(result.size());
for (ProjectionDimension queryDimension : result) {
resultDimension.add(queryDimension.getDimension());
@@ -129,7 +132,8 @@ public class RestructureUtilTest {
new ProjectionMeasure[] { queryMeasure1, queryMeasure2, queryMeasure3
};
BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
RestructureUtil.createMeasureInfoAndGetCurrentBlockQueryMeasures(blockExecutionInfo,
- queryMeasures, currentBlockMeasures, true);
+ queryMeasures, currentBlockMeasures, true,
+ QueryModel.newInstance(new CarbonTable()));
MeasureInfo measureInfo = blockExecutionInfo.getMeasureInfo();
boolean[] measuresExist = { true, true, false };
assertThat(measureInfo.getMeasureExists(), is(equalTo(measuresExist)));
diff --git
a/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
b/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
index 0d00c7b..2958446 100644
---
a/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
+++
b/integration/spark/src/test/scala/org/apache/spark/carbondata/restructure/vectorreader/AddColumnTestCases.scala
@@ -240,6 +240,29 @@ class AddColumnTestCases extends QueryTest with
BeforeAndAfterAll {
sql("DROP TABLE IF EXISTS carbon_table")
}
+ test("test querying data after adding new column and performing IUD") {
+ sql("drop table if exists altertable")
+ sql(
+ """
+ | CREATE TABLE altertable (empname String, designation String, doj
Timestamp,
+ | workgroupcategory int, workgroupcategoryname String, deptno int,
deptname String,
+ | projectcode int, projectjoindate Timestamp, projectenddate
Date,attendance int,
+ | utilization int,salary int, empno int)
+ | STORED AS carbondata
+ """.stripMargin)
+ sql(
+ s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE
altertable OPTIONS
+ |('DELIMITER'= ',', 'QUOTECHAR'= '"')""".stripMargin)
+ sql(s"delete from altertable where empno = 13").collect()
+ sql(s"delete from altertable where empno = 14").collect()
+ val ans = sql("SELECT empno FROM altertable").collect()
+ sql(
+ "ALTER TABLE altertable ADD COLUMNS(newField STRING) TBLPROPERTIES" +
+ "('DEFAULT.VALUE.newField'='def')")
+ checkAnswer(sql("SELECT empno FROM altertable"), ans)
+ sql("DROP TABLE altertable")
+ }
+
test("test to check if exception is thrown with wrong char syntax") {
sqlContext.setConf("carbon.enable.vector.reader", "false")
try {