Github user chenerlu commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/1042#discussion_r122658340
  
    --- Diff: 
examples/spark2/src/main/scala/org/apache/carbondata/examples/CarbonShowPartitionInfo.scala
 ---
    @@ -0,0 +1,111 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.carbondata.examples
    +
    +import java.io.File
    +
    +import org.apache.spark.sql.SparkSession
    +
    +object CarbonShowPartitionInfo {
    +  def main(args: Array[String]) {
    +
    +    CarbonShowPartitionInfo.extracted("t3", args)
    +  }
    +  def extracted(tableName: String, args: Array[String]): Unit = {
    +    val rootPath = new File(this.getClass.getResource("/").getPath
    +      + "../../../..").getCanonicalPath
    +    val storeLocation = s"$rootPath/examples/spark2/target/store"
    +    val warehouse = s"$rootPath/examples/spark2/target/warehouse"
    +    val metastoredb = s"$rootPath/examples/spark2/target"
    +    val testData = 
s"$rootPath/examples/spark2/src/main/resources/bitmaptest2.csv"
    +    import org.apache.spark.sql.CarbonSession._
    +    val spark = SparkSession
    +      .builder()
    +      .master("local")
    +      .appName("CarbonDataLoad")
    +      .config("spark.sql.warehouse.dir", warehouse)
    +      .getOrCreateCarbonSession(storeLocation, metastoredb)
    +
    +    // range partition
    +    spark.sql("DROP TABLE IF EXISTS t1")
    +    // hash partition
    +    spark.sql("DROP TABLE IF EXISTS t3")
    +    // list partition
    +    spark.sql("DROP TABLE IF EXISTS t5")
    +
    +    spark.sql("""
    +                | CREATE TABLE IF NOT EXISTS t1
    +                | (
    +                | vin String,
    +                | phonenumber Long,
    +                | country String,
    +                | area String
    +                | )
    +                | PARTITIONED BY (logdate Timestamp)
    +                | STORED BY 'carbondata'
    +                | TBLPROPERTIES('PARTITION_TYPE'='RANGE',
    +                | 'RANGE_INFO'='20140101, 2015/01/01 ,2016-01-01')
    +              """.stripMargin)
    +
    +    spark.sql("""
    +                | CREATE TABLE IF NOT EXISTS t3
    +                | (
    +                | logdate Timestamp,
    +                | phonenumber Long,
    +                | country String,
    +                | area String
    +                | )
    +                | PARTITIONED BY (vin String)
    +                | STORED BY 'carbondata'
    +                | 
TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='5')
    +                """.stripMargin)
    +
    +    spark.sql("""
    +       | CREATE TABLE IF NOT EXISTS t5
    +       | (
    +       | vin String,
    +       | logdate Timestamp,
    +       | phonenumber Long,
    +       | area String
    +       |)
    +       | PARTITIONED BY (country string)
    +       | STORED BY 'carbondata'
    +       | TBLPROPERTIES('PARTITION_TYPE'='LIST',
    +       | 'LIST_INFO'='(China,United States),UK ,japan,(Canada,Russia), 
South Korea ')
    +       """.stripMargin)
    +
    +    spark.sparkContext.setLogLevel("WARN")
    +    spark.sql(s"""
    +      SHOW PARTITIONS t1
    +             """).show()
    +    spark.sql(s"""
    +      SHOW PARTITIONS t3
    +             """).show()
    +    spark.sql(s"""
    +      SHOW PARTITIONS t5
    +             """).show()
    +
    +    // range partition
    +    spark.sql("DROP TABLE IF EXISTS t1")
    +    // hash partition
    +    spark.sql("DROP TABLE IF EXISTS t3")
    +    // list partition
    +    spark.sql("DROP TABLE IF EXISTS t5")
    +
    --- End diff --
    
    Suggest close spark session as last.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to