This is an automated email from the ASF dual-hosted git repository.
indhumuthumurugesh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new effa9d2 [CARBONDATA-4023] Create MV failed on table with geospatial
index using carbonsession
effa9d2 is described below
commit effa9d2b1e5e3f4ec9eefaa38a6f94931061d78a
Author: ShreelekhyaG <[email protected]>
AuthorDate: Mon Oct 5 16:47:45 2020 +0530
[CARBONDATA-4023] Create MV failed on table with geospatial index using
carbonsession
Why is this PR needed?
Create MV failed on the table with geospatial index using carbonsession.
Failed with, java.lang.ClassNotFoundException:
org.apache.carbondata.geo.geohashindex
What changes were proposed in this PR?
When geo table is created with carbon session, the spatial properties are
normalized
and converted to lower case. Added spatial index class into noConverterList.
Does this PR introduce any user interface change?
No
Is any new testcase added?
No
This closes #3966
---
.../carbondata/examples/GeoTableExampleWithCarbonSession.scala | 4 ++++
.../scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala | 5 ++++-
2 files changed, 8 insertions(+), 1 deletion(-)
diff --git
a/examples/spark/src/main/scala/org/apache/carbondata/examples/GeoTableExampleWithCarbonSession.scala
b/examples/spark/src/main/scala/org/apache/carbondata/examples/GeoTableExampleWithCarbonSession.scala
index ee4a4a0..534b127 100644
---
a/examples/spark/src/main/scala/org/apache/carbondata/examples/GeoTableExampleWithCarbonSession.scala
+++
b/examples/spark/src/main/scala/org/apache/carbondata/examples/GeoTableExampleWithCarbonSession.scala
@@ -87,6 +87,10 @@ object GeoTableExampleWithCarbonSession {
}
spark.sql(s"""LOAD DATA local inpath '$path' INTO TABLE geoTable OPTIONS
|('DELIMITER'= ',')""".stripMargin)
+ // Test for MV creation
+ spark.sql(s"CREATE MATERIALIZED VIEW view1 AS SELECT longitude, latitude
FROM geoTable")
+ val result = spark.sql("show materialized views on table
geoTable").collectAsList()
+ assert(result.get(0).get(1).toString.equalsIgnoreCase("view1"))
spark.sql("select *from geoTable").show()
spark.sql("DROP TABLE IF EXISTS geoTable")
}
diff --git
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
index 144e1fb..4f96942 100644
---
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
+++
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
@@ -562,8 +562,11 @@ object CarbonSparkSqlParserUtil {
* @return returns <true> if lower case conversion is needed else <false>
*/
def needToConvertToLowerCase(key: String): Boolean = {
- val noConvertList = Array(CarbonCommonConstants.COMPRESSOR, "PATH",
"bad_record_path",
+ var noConvertList = Array(CarbonCommonConstants.COMPRESSOR, "PATH",
"bad_record_path",
"timestampformat", "dateformat")
+ if (key.startsWith(CarbonCommonConstants.SPATIAL_INDEX) &&
key.endsWith(".class")) {
+ noConvertList = noConvertList ++ Array(key)
+ }
!noConvertList.exists(x => x.equalsIgnoreCase(key))
}