This is an automated email from the ASF dual-hosted git repository.

kunalkapoor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 51f474b  [CARBONDATA-3680]Add NI as a function not as udf
51f474b is described below

commit 51f474ba4b8b0f72cc85a32efbe0233c6d49cecf
Author: akashrn5 <[email protected]>
AuthorDate: Fri Apr 17 11:06:19 2020 +0530

    [CARBONDATA-3680]Add NI as a function not as udf
    
    Why is this PR needed?
    NI is registered as udf, so it is handled for both case of scalaUDF and 
HiveSimpleUDF
    
    What changes were proposed in this PR?
    Register NI as a functionand remove unwanted case for ScalaUDF for NI 
functionality.
    
    This closes #3718
---
 .../spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala | 11 ++++++++++-
 .../apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala   |  8 ++------
 2 files changed, 12 insertions(+), 7 deletions(-)

diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala 
b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index 63bc70d..9df5809 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, 
NoSuchTableException}
 import org.apache.spark.sql.catalyst.catalog.SessionCatalog
 import org.apache.spark.sql.events.{MergeBloomIndexEventListener, 
MergeIndexEventListener}
+import org.apache.spark.sql.execution.command.CreateFunctionCommand
 import org.apache.spark.sql.hive._
 import org.apache.spark.sql.listeners._
 import org.apache.spark.sql.profiler.Profiler
@@ -74,7 +75,15 @@ class CarbonEnv {
 
     sparkSession.udf.register("getTupleId", () => "")
     sparkSession.udf.register("getPositionId", () => "")
-    sparkSession.udf.register("NI", (anyRef: AnyRef) => true)
+    // add NI as a temp function, for queries to not hit SI table, it will be 
added as HiveSimpleUDF
+    CreateFunctionCommand(
+      databaseName = None,
+      functionName = "NI",
+      className = "org.apache.spark.sql.hive.NonIndexUDFExpression",
+      resources = Seq(),
+      isTemp = true,
+      ignoreIfExists = false,
+      replace = true).run(sparkSession)
 
     // register for lucene indexSchema
     // TODO: move it to proper place, it should be registered by indexSchema 
implementation
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
index f11f530..57fb05e 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveIndexMetadataUtil.scala
@@ -19,7 +19,7 @@ package org.apache.spark.sql.hive
 import org.apache.hadoop.hive.ql.exec.UDF
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
-import org.apache.spark.sql.catalyst.expressions.{Expression, ScalaUDF}
+import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.index.CarbonIndexUtil
 import org.apache.spark.sql.secondaryindex.util.FileInternalUtil
 
@@ -133,19 +133,15 @@ object CarbonHiveIndexMetadataUtil {
   }
 
   def transformToRemoveNI(expression: Expression): Expression = {
-    val newExpWithoutNI = expression.transform {
+    expression.transform {
       case hiveUDF: HiveSimpleUDF if 
hiveUDF.function.isInstanceOf[NonIndexUDFExpression] =>
         hiveUDF.asInstanceOf[HiveSimpleUDF].children.head
-      case scalaUDF: ScalaUDF if "NI".equalsIgnoreCase(scalaUDF.udfName.get) =>
-        scalaUDF.children.head
     }
-    newExpWithoutNI
   }
 
   def checkNIUDF(condition: Expression): Boolean = {
     condition match {
       case hiveUDF: HiveSimpleUDF if 
hiveUDF.function.isInstanceOf[NonIndexUDFExpression] => true
-      case scalaUDF: ScalaUDF if "NI".equalsIgnoreCase(scalaUDF.udfName.get) 
=> true
       case _ => false
     }
   }

Reply via email to