This is an automated email from the ASF dual-hosted git repository.

akashrn5 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 4ccc3dd  [CARBONDATA-3901][CARBONDATA-3903][CARBONDATA-3824] SI 
creation on unkbown table and doc changes.
4ccc3dd is described below

commit 4ccc3dd81d54838774363fb8821bea63065abe13
Author: Nihal ojha <[email protected]>
AuthorDate: Tue Oct 13 18:15:02 2020 +0530

    [CARBONDATA-3901][CARBONDATA-3903][CARBONDATA-3824] SI creation on unkbown 
table and
    doc changes.
    
    Why is this PR needed?
    1. Currently, the exception message for SI creation on the table which 
doesn't exist
    is not correct.
    2. Some links were not working and also removed unnecessary things.
    3. Included spatial index changes in documentation.
    
    What changes were proposed in this PR?
    1. Changed exception message for SI creation on the table which doesn't 
exist.
    2. Corrected the documentation.
    3. Included spatial index changes in documentation.
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    Yes
    
    This closes #3980
---
 docs/ddl-of-carbondata.md                                  |  2 +-
 docs/spatial-index-guide.md                                |  7 +++++--
 docs/streaming-guide.md                                    |  2 +-
 .../testsuite/secondaryindex/TestCreateIndexTable.scala    | 14 ++++++++++++--
 .../apache/spark/sql/execution/strategy/DDLStrategy.scala  |  3 ++-
 .../org/apache/spark/sql/hive/CarbonFileMetastore.scala    |  1 +
 6 files changed, 22 insertions(+), 7 deletions(-)

diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 56d7e4e..9d68296 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -86,7 +86,7 @@ CarbonData DDL statements are documented here,which includes:
 | [NO_INVERTED_INDEX](#inverted-index-configuration)           | Columns to 
exclude from inverted index generation            |
 | [INVERTED_INDEX](#inverted-index-configuration)              | Columns to 
include for inverted index generation             |
 | [SORT_COLUMNS](#sort-columns-configuration)                  | Columns to 
include in sort and its order of sort             |
-| [SORT_SCOPE](#sort-scope-configuration)                      | Sort scope of 
the load.Options include no sort, local sort ,batch sort and global sort |
+| [SORT_SCOPE](#sort-scope-configuration)                      | Sort scope of 
the load.Options include no sort, local sort and global sort |
 | [TABLE_BLOCKSIZE](#table-block-size-configuration)           | Size of 
blocks to write onto hdfs                            |
 | [TABLE_BLOCKLET_SIZE](#table-blocklet-size-configuration)    | Size of 
blocklet to write in the file                        |
 | [TABLE_PAGE_SIZE_INMB](#table-page-size-configuration)       | Size of page 
in MB; if page size crosses this value before 32000 rows, page will be cut to 
this many rows and remaining rows are processed in the subsequent pages. This 
helps in keeping page size to fit in cpu cache size|
diff --git a/docs/spatial-index-guide.md b/docs/spatial-index-guide.md
index f97ee38..0f9ddcc 100644
--- a/docs/spatial-index-guide.md
+++ b/docs/spatial-index-guide.md
@@ -62,13 +62,16 @@ create table source_index(id BIGINT, latitude long, 
longitude long) stored by 'c
 'SPATIAL_INDEX.mygeohash.maxLatitude'='20.225281',
 'SPATIAL_INDEX.mygeohash.conversionRatio'='1000000');
 ```
-Note: `mygeohash` in the above example represent the index name.
+Note: 
+   * `mygeohash` in the above example represent the index name.
+   * Columns present in spatial_index table properties cannot be altered
+    i.e., sourcecolumns: `longitude, latitude` and index column: `mygeohash` 
in the above example.
 
 #### List of spatial index table properties
 
 |Name|Description|
 
|-----------------------------------|-----------------------------------------------------------------------------------------|
-| SPATIAL_INDEX | Used to configure Spatial Index name. This name is appended 
to `SPATIAL_INDEX` in the subsequent sub-property configurations. `xxx` in the 
below sub-properties refer to index name.|
+| SPATIAL_INDEX | Used to configure Spatial Index name. This name is appended 
to `SPATIAL_INDEX` in the subsequent sub-property configurations. `xxx` in the 
below sub-properties refer to index name. Generated spatial index column is not 
allowed in any properties except in `SORT_COLUMNS` table property.|
 | SPATIAL_INDEX.xxx.type | Type of algorithm for processing spatial data. 
Currently, supports only 'geohash'.|
 | SPATIAL_INDEX.xxx.sourcecolumns | longitude and latitude column names as in 
the table. These columns are used to generate index value for each row.|
 | SPATIAL_INDEX.xxx.gridSize | Grid size of raster data in metres. Currently, 
spatial index supports raster data.|
diff --git a/docs/streaming-guide.md b/docs/streaming-guide.md
index cc5ac49..aec9b3c 100644
--- a/docs/streaming-guide.md
+++ b/docs/streaming-guide.md
@@ -34,7 +34,7 @@
     - [CREATE STREAM](#create-stream)
     - [DROP STREAM](#drop-stream)
     - [SHOW STREAMS](#show-streams)
-    - [CLOSE STREAM](#close-stream)
+    - [CLOSE STREAM](#alter-table-close-stream)
 
 ## Quick example
 Download and unzip spark-2.4.5-bin-hadoop2.7.tgz, and export $SPARK_HOME
diff --git 
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
 
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
index 394db5d..00fe37d 100644
--- 
a/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
+++ 
b/index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestCreateIndexTable.scala
@@ -314,7 +314,8 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
       assert(false)
     } catch {
       case e: Exception =>
-        assert(e.getMessage.contains("Operation not allowed on non-carbon 
table"))
+        assert(e.getMessage.contains("Operation not allowed because either 
table " +
+          "createindextemptable doesn't exist or not a carbon table."))
     }
   }
 
@@ -337,7 +338,8 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
       assert(false)
     } catch {
       case e: Exception =>
-        assert(e.getMessage.contains("Operation not allowed on non-carbon 
table"))
+        assert(e.getMessage.contains("Operation not allowed because either 
table " +
+          "createindextemptable1 doesn't exist or not a carbon table."))
     }
     sql("insert into temptablecheckDB.createindextemptable1 select 
1,'string','string',3")
     sql("insert into temptablecheckDB.createindextemptable1 select 
1,'string','string',3")
@@ -450,6 +452,14 @@ class TestCreateIndexTable extends QueryTest with 
BeforeAndAfterAll {
     }
   }
 
+  test("test SI creation on table which doesn't exist") {
+    val exception = intercept[RuntimeException] {
+      sql("""create index indextable on table unknown(c) AS 
'carbondata'""").show()
+    }
+    assert(exception.getMessage.contains("Operation not allowed because either 
table " +
+    "unknown doesn't exist or not a carbon table."))
+  }
+
   object CarbonMetastore {
     import org.apache.carbondata.core.reader.ThriftReader
 
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
index 9a0e12c..e1032e9 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
@@ -204,7 +204,8 @@ class DDLStrategy(sparkSession: SparkSession) extends 
SparkStrategy {
           ExecutedCommandExec(CarbonCreateSecondaryIndexCommand(
             indexModel, tableProperties, ifNotExists, isDeferredRefresh, 
isCreateSIndex)) :: Nil
         } else {
-          sys.error("Operation not allowed on non-carbon table")
+          sys.error(s"Operation not allowed because either table " +
+            s"${indexModel.tableName} doesn't exist or not a carbon table.")
         }
       case showIndex@ShowIndexesCommand(_, _) =>
         try {
diff --git 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
index 932c175..f632189 100644
--- 
a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
+++ 
b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonFileMetastore.scala
@@ -267,6 +267,7 @@ class CarbonFileMetastore extends CarbonMetaStore {
       lookupRelation(tableIdentifier)(sparkSession)
     } catch {
       case _: NoSuchTableException =>
+        LOGGER.debug(s"Table ${tableIdentifier.table} does not exist.")
         return false
       case ex: Exception =>
         throw ex

Reply via email to