Repository: incubator-carbondata
Updated Branches:
  refs/heads/master abc807dbd -> 417685704


Problem : select * with order by and limit for join is not working
Solution : updated logical plan for join


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: 
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/10d898c8
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/10d898c8
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/10d898c8

Branch: refs/heads/master
Commit: 10d898c86e90d5e51f52fa4a1e8b95b34f506a92
Parents: abc807d
Author: joobisb <joob...@gmail.com>
Authored: Thu Apr 20 21:57:32 2017 +0530
Committer: ravipesala <ravi.pes...@gmail.com>
Committed: Fri Apr 21 22:39:43 2017 +0530

----------------------------------------------------------------------
 .../joinquery/OrderByLimitTestCase.scala        | 90 ++++++++++++++++++++
 .../spark/sql/CarbonCatalystOperators.scala     |  4 +
 2 files changed, 94 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/10d898c8/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/OrderByLimitTestCase.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/OrderByLimitTestCase.scala
 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/OrderByLimitTestCase.scala
new file mode 100644
index 0000000..ecf4542
--- /dev/null
+++ 
b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/joinquery/OrderByLimitTestCase.scala
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.joinquery
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.common.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+/**
+ * Test Class for join query with orderby and limit
+ */
+
+class OrderByLimitTestCase extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql(
+      "CREATE TABLE carbon1 (empno int, empname String, designation String, 
doj Timestamp, " +
+      "workgroupcategory int, workgroupcategoryname String, deptno int, 
deptname String, " +
+      "projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance int," +
+      "utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE carbon1 
OPTIONS
+          |('DELIMITER'= ',', 'QUOTECHAR'= '\"')""".stripMargin);
+
+    sql(
+      "CREATE TABLE carbon2 (empno int, empname String, designation String, 
doj Timestamp, " +
+      "workgroupcategory int, workgroupcategoryname String, deptno int, 
deptname String, " +
+      "projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance int," +
+      "utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql(
+      s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE carbon2 
OPTIONS
+          |('DELIMITER'= ',', 'QUOTECHAR'= '\"')""".stripMargin);
+
+    sql(
+      "CREATE TABLE carbon1_hive (empno int, empname String, designation 
String, doj Timestamp, " +
+      "workgroupcategory int, workgroupcategoryname String, deptno int, 
deptname String, " +
+      "projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance int," +
+      "utilization int,salary int) row format delimited fields terminated by 
','")
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO 
TABLE carbon1_hive ")
+
+    sql(
+      "CREATE TABLE carbon2_hive (empno int, empname String, designation 
String, doj Timestamp, " +
+      "workgroupcategory int, workgroupcategoryname String, deptno int, 
deptname String, " +
+      "projectcode int, projectjoindate Timestamp, projectenddate 
Timestamp,attendance int," +
+      "utilization int,salary int) row format delimited fields terminated by 
','")
+    sql(
+      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO 
TABLE carbon2_hive ");
+
+
+  }
+
+  test("test join with orderby limit") {
+    checkAnswer(
+      sql(
+        "select a.empno,a.empname,a.workgroupcategoryname from carbon1 a full 
outer join carbon2 " +
+        "b on substr(a.workgroupcategoryname," +
+        "1,3)" +
+        "=substr(b.workgroupcategoryname,1,3) order by a.empname limit 5"),
+      sql(
+        "select a.empno,a.empname,a.workgroupcategoryname from carbon1_hive a 
full outer join " +
+        "carbon2_hive b on " +
+        "substr(a" +
+        ".workgroupcategoryname,1,3)=substr(b.workgroupcategoryname,1,3) order 
by a.empname limit" +
+        " 5")
+    )
+  }
+
+  override def afterAll {
+    sql("drop table carbon1")
+    sql("drop table carbon2")
+    sql("drop table carbon1_hive")
+    sql("drop table carbon2_hive")
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/10d898c8/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
----------------------------------------------------------------------
diff --git 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index 5917369..5b47fcf 100644
--- 
a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ 
b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -46,6 +46,10 @@ case class CarbonDictionaryCatalystDecoder(
         val logicalOut =
           CarbonDictionaryDecoder.updateAttributes(child.output, relations, 
aliasMap)
         CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, 
aliasMap)
+      case Join(l: LogicalRelation, r: LogicalRelation, _, _) =>
+        val logicalOut =
+          CarbonDictionaryDecoder.updateAttributes(child.output, relations, 
aliasMap)
+        CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, 
aliasMap)
       case _ => CarbonDictionaryDecoder.convertOutput(child.output, relations, 
profile, aliasMap)
     }
   }

Reply via email to