This is an automated email from the ASF dual-hosted git repository.

ajantha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 8ab12b7  [CARBONDATA-3742] Support spark 2.4.5 integration
8ab12b7 is described below

commit 8ab12b76c797de2140fcdb909f3fdd73ab5d9b63
Author: Jacky Li <jacky.li...@qq.com>
AuthorDate: Tue Mar 17 16:37:58 2020 +0800

    [CARBONDATA-3742] Support spark 2.4.5 integration
    
    Why is this PR needed?
    Currently CarbonData does not support integration with spark 2.4.5
    
    What changes were proposed in this PR?
    Support integration with spark 2.4.5
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    No
    
    This closes #3671
---
 build/README.md                                                    | 2 +-
 docs/prestodb-guide.md                                             | 4 ++--
 docs/prestosql-guide.md                                            | 4 ++--
 docs/streaming-guide.md                                            | 2 +-
 .../scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala   | 7 ++++---
 .../main/scala/org/apache/spark/util/CarbonReflectionUtils.scala   | 7 +++----
 pom.xml                                                            | 2 +-
 7 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/build/README.md b/build/README.md
index 81f9133..56f72e7 100644
--- a/build/README.md
+++ b/build/README.md
@@ -25,7 +25,7 @@
 * [Apache Thrift 0.9.3](http://archive.apache.org/dist/thrift/0.9.3/)
 
 ## Build command
-Build with different supported versions of Spark, by default using Spark 2.4.4
+Build with different supported versions of Spark, by default using Spark 2.4.5
 ```
 mvn -DskipTests -Pspark-2.4 clean package
 ```
diff --git a/docs/prestodb-guide.md b/docs/prestodb-guide.md
index 9bd9a89..b048d9d 100644
--- a/docs/prestodb-guide.md
+++ b/docs/prestodb-guide.md
@@ -238,10 +238,10 @@ Now you can use the Presto CLI on the coordinator to 
query data sources in the c
   ```
   Replace the spark and hadoop version with the version used in your cluster.
   For example, use prestodb profile and 
-  if you are using Spark 2.4.4, you would like to compile using:
+  if you are using Spark 2.4.5, you would like to compile using:
   
   ```
-  mvn -DskipTests -Pspark-2.4 -Pprestodb -Dspark.version=2.4.4 
-Dhadoop.version=2.7.2 clean package
+  mvn -DskipTests -Pspark-2.4 -Pprestodb -Dspark.version=2.4.5 
-Dhadoop.version=2.7.2 clean package
   ```
 
   Secondly: Create a folder named 'carbondata' under $PRESTO_HOME$/plugin and
diff --git a/docs/prestosql-guide.md b/docs/prestosql-guide.md
index b42a1b8..8832b7a 100644
--- a/docs/prestosql-guide.md
+++ b/docs/prestosql-guide.md
@@ -238,9 +238,9 @@ Now you can use the Presto CLI on the coordinator to query 
data sources in the c
   ```
   Replace the spark and hadoop version with the version used in your cluster.
   For example, use prestosql profile and  
-  if you are using Spark 2.4.4, you would like to compile using:
+  if you are using Spark 2.4.5, you would like to compile using:
   ```
-  mvn -DskipTests -Pspark-2.4 -Pprestosql -Dspark.version=2.4.4 
-Dhadoop.version=2.7.2 clean package
+  mvn -DskipTests -Pspark-2.4 -Pprestosql -Dspark.version=2.4.5 
-Dhadoop.version=2.7.2 clean package
   ```
 
   Secondly: Create a folder named 'carbondata' under $PRESTO_HOME$/plugin and
diff --git a/docs/streaming-guide.md b/docs/streaming-guide.md
index 97618bb..cc5ac49 100644
--- a/docs/streaming-guide.md
+++ b/docs/streaming-guide.md
@@ -37,7 +37,7 @@
     - [CLOSE STREAM](#close-stream)
 
 ## Quick example
-Download and unzip spark-2.4.4-bin-hadoop2.7.tgz, and export $SPARK_HOME
+Download and unzip spark-2.4.5-bin-hadoop2.7.tgz, and export $SPARK_HOME
 
 Package carbon jar, and copy 
assembly/target/scala-2.11/carbondata_2.11-2.0.0-SNAPSHOT-shade-hadoop2.7.2.jar 
to $SPARK_HOME/jars
 ```shell
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index f4a524b..f1d8f26 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -23,7 +23,7 @@ import org.apache.spark.sql.{CarbonSession, CarbonUtils, 
SparkSession}
 import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, SqlBaseParser}
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.apache.spark.sql.execution.SparkSqlAstBuilder
+import org.apache.spark.sql.execution.{SparkSqlAstBuilder, SparkSqlParser}
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
 import org.apache.spark.sql.types.StructField
 import org.apache.spark.sql.util.CarbonException
@@ -36,10 +36,11 @@ import org.apache.carbondata.spark.util.CarbonScalaUtil
  * Concrete parser for Spark SQL statements and carbon specific
  * statements
  */
-class CarbonSparkSqlParser(conf: SQLConf, sparkSession: SparkSession) extends 
AbstractSqlParser {
+class CarbonSparkSqlParser(conf: SQLConf, sparkSession: SparkSession) extends 
SparkSqlParser(conf) {
 
   val parser = new CarbonSpark2SqlParser
-  val astBuilder = CarbonReflectionUtils.getAstBuilder(conf, parser, 
sparkSession)
+
+  override val astBuilder = CarbonReflectionUtils.getAstBuilder(conf, parser, 
sparkSession)
 
   private val substitutor = new VariableSubstitution(conf)
 
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
 
b/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
index caee10d..250469a 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/util/CarbonReflectionUtils.scala
@@ -28,10 +28,9 @@ import org.apache.spark.sql.catalyst.analysis.Analyzer
 import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
 import org.apache.spark.sql.catalyst.catalog.CatalogTable
 import org.apache.spark.sql.catalyst.expressions.{Attribute, 
AttributeReference, Expression}
-import org.apache.spark.sql.catalyst.parser.AstBuilder
 import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoTable, 
LogicalPlan, SubqueryAlias}
 import org.apache.spark.sql.catalyst.plans.physical.Partitioning
-import org.apache.spark.sql.execution.{RowDataSourceScanExec, SparkPlan}
+import org.apache.spark.sql.execution.{RowDataSourceScanExec, SparkPlan, 
SparkSqlAstBuilder}
 import org.apache.spark.sql.execution.command.AlterTableAddColumnsCommand
 import org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}
 import org.apache.spark.sql.internal.HiveSerDe
@@ -133,13 +132,13 @@ object CarbonReflectionUtils {
 
   def getAstBuilder(conf: Object,
       sqlParser: Object,
-      sparkSession: SparkSession): AstBuilder = {
+      sparkSession: SparkSession): SparkSqlAstBuilder = {
     val className = sparkSession.sparkContext.conf.get(
       CarbonCommonConstants.CARBON_SQLASTBUILDER_CLASSNAME,
       CarbonCommonConstants.CARBON_SQLASTBUILDER_CLASSNAME_DEFAULT)
     createObject(className,
       conf,
-      sqlParser, sparkSession)._1.asInstanceOf[AstBuilder]
+      sqlParser, sparkSession)._1.asInstanceOf[SparkSqlAstBuilder]
   }
 
   def getSessionState(sparkContext: SparkContext,
diff --git a/pom.xml b/pom.xml
index 7b8b8b7..2a89e01 100644
--- a/pom.xml
+++ b/pom.xml
@@ -584,7 +584,7 @@
     <profile>
       <id>spark-2.4</id>
       <properties>
-        <spark.version>2.4.4</spark.version>
+        <spark.version>2.4.5</spark.version>
         <scala.binary.version>2.11</scala.binary.version>
         <scala.version>2.11.8</scala.version>
       </properties>

Reply via email to