fixed query issue, use different actualoffset
Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/32ce7910 Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/32ce7910 Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/32ce7910 Branch: refs/heads/branch-0.1 Commit: 32ce7910a7b92586a9c037db63a61fd50819655f Parents: 008bee1 Author: foryou2030 <foryou2...@126.com> Authored: Wed Sep 7 22:46:28 2016 +0800 Committer: ravipesala <ravi.pes...@gmail.com> Committed: Thu Sep 22 09:38:44 2016 +0530 ---------------------------------------------------------------------- .../impl/btree/BTreeDataRefNodeFinder.java | 17 +++++++++++------ .../spark/src/test/resources/dataWithEmptyRows.csv | 2 ++ .../detailquery/NoDictionaryColumnTestCase.scala | 16 ++++++++++++++++ 3 files changed, 29 insertions(+), 6 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/32ce7910/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java ---------------------------------------------------------------------- diff --git a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java index e443182..31273ff 100644 --- a/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java +++ b/core/src/main/java/org/apache/carbondata/core/carbon/datastore/impl/btree/BTreeDataRefNodeFinder.java @@ -223,6 +223,7 @@ public class BTreeDataRefNodeFinder implements DataRefNodeFinder { ByteBuffer firstNoDictionaryKeyBuffer = ByteBuffer.wrap(first.getNoDictionaryKeys()); ByteBuffer secondNoDictionaryKeyBuffer = ByteBuffer.wrap(second.getNoDictionaryKeys()); int actualOffset = 0; + int actualOffset1 = 0; int firstNoDcitionaryLength = 0; int secondNodeDictionaryLength = 0; @@ -237,21 +238,25 @@ public class BTreeDataRefNodeFinder implements DataRefNodeFinder { if (processedNoDictionaryColumn > 1) { actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset); firstNoDcitionaryLength = - firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES); + firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES) + - actualOffset; + actualOffset1 = secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset); secondNodeDictionaryLength = - secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES); + secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset + SHORT_SIZE_IN_BYTES) + - actualOffset1; compareResult = ByteUtil.UnsafeComparer.INSTANCE - .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength, - second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength); + .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength, + second.getNoDictionaryKeys(), actualOffset1, secondNodeDictionaryLength); nonDictionaryKeyOffset += SHORT_SIZE_IN_BYTES; processedNoDictionaryColumn--; } else { actualOffset = firstNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset); + actualOffset1 = secondNoDictionaryKeyBuffer.getShort(nonDictionaryKeyOffset); firstNoDcitionaryLength = first.getNoDictionaryKeys().length - actualOffset; - secondNodeDictionaryLength = second.getNoDictionaryKeys().length - actualOffset; + secondNodeDictionaryLength = second.getNoDictionaryKeys().length - actualOffset1; compareResult = ByteUtil.UnsafeComparer.INSTANCE .compareTo(first.getNoDictionaryKeys(), actualOffset, firstNoDcitionaryLength, - second.getNoDictionaryKeys(), actualOffset, secondNodeDictionaryLength); + second.getNoDictionaryKeys(), actualOffset1, secondNodeDictionaryLength); } } if (compareResult != 0) { http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/32ce7910/integration/spark/src/test/resources/dataWithEmptyRows.csv ---------------------------------------------------------------------- diff --git a/integration/spark/src/test/resources/dataWithEmptyRows.csv b/integration/spark/src/test/resources/dataWithEmptyRows.csv new file mode 100644 index 0000000..0e3a81f --- /dev/null +++ b/integration/spark/src/test/resources/dataWithEmptyRows.csv @@ -0,0 +1,2 @@ +29000,cust_name_2000,active_emui_version_2000,2010-10-04 01:00:01,12345678 +,,,,0 http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/32ce7910/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/NoDictionaryColumnTestCase.scala ---------------------------------------------------------------------- diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/NoDictionaryColumnTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/NoDictionaryColumnTestCase.scala index 1b3a4bc..b36da1f 100644 --- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/NoDictionaryColumnTestCase.scala +++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/NoDictionaryColumnTestCase.scala @@ -33,10 +33,17 @@ class NoDictionaryColumnTestCase extends QueryTest with BeforeAndAfterAll { override def beforeAll { sql("DROP TABLE IF EXISTS carbonTable") sql("DROP TABLE IF EXISTS hiveTable") + sql("DROP TABLE IF EXISTS carbonEmpty") + sql("DROP TABLE IF EXISTS hiveEmpty") sql("CREATE TABLE carbonTable (imei String, age Int, num BigInt) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='age,num')") sql("LOAD DATA LOCAL INPATH './src/test/resources/datawithNegtiveNumber.csv' INTO TABLE carbonTable") sql("CREATE TABLE hiveTable (imei String, age Int, num BigInt) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','") sql("LOAD DATA LOCAL INPATH './src/test/resources/datawithNegeativewithoutHeader.csv' INTO TABLE hiveTable") + + sql("CREATE TABLE carbonEmpty (cust_id int, cust_name String, active_emui_version String, bob timestamp, bigint_column bigint) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='cust_name,active_emui_version')") + sql("LOAD DATA LOCAL INPATH './src/test/resources/dataWithEmptyRows.csv' INTO TABLE carbonEmpty OPTIONS('FILEHEADER'='cust_id,cust_name,active_emui_version,bob,bigint_column')") + sql("CREATE TABLE hiveEmpty (cust_id int, cust_name String, active_emui_version String, bob timestamp, bigint_column bigint) ROW FORMAT DELIMITED FIELDS TERMINATED BY ','") + sql("LOAD DATA LOCAL INPATH './src/test/resources/dataWithEmptyRows.csv' INTO TABLE hiveEmpty") } test("SELECT IntType FROM carbonTable") { @@ -53,8 +60,17 @@ class NoDictionaryColumnTestCase extends QueryTest with BeforeAndAfterAll { ) } + test("test load data with one row that all no dictionary column values are empty") { + checkAnswer( + sql("SELECT cust_name,active_emui_version FROM carbonEmpty"), + sql("SELECT cust_name,active_emui_version FROM hiveEmpty") + ) + } + override def afterAll { sql("DROP TABLE IF EXISTS carbonTable") sql("DROP TABLE IF EXISTS hiveTable") + sql("DROP TABLE IF EXISTS carbonEmpty") + sql("DROP TABLE IF EXISTS hiveEmpty") } } \ No newline at end of file