Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1805#discussion_r161675648
--- Diff:
examples/spark2/src/main/scala/org/apache/carbondata/examples/S3Example.scala
---
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.examples
+
+import java.io.File
+
+import org.apache.hadoop.fs.s3a.Constants.{ACCESS_KEY, SECRET_KEY}
+import org.apache.spark.sql.SparkSession
+import org.slf4j.{Logger, LoggerFactory}
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+object S3Example {
+
+ /**
+ * This example demonstrate usage of s3 as a store.
+ *
+ * @param args require three parameters "Access-key" "Secret-key"
+ * "s3 bucket path" "spark-master"
+ */
+
+ def main(args: Array[String]) {
+ val rootPath = new File(this.getClass.getResource("/").getPath
+ + "../../../..").getCanonicalPath
+ val path = s"$rootPath/examples/spark2/src/main/resources/data1.csv"
+ val logger: Logger = LoggerFactory.getLogger(this.getClass)
+ CarbonProperties.getInstance()
+ .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
"yyyy/MM/dd HH:mm:ss")
+ .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")
+
.addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE_LOADING, "true")
+
.addProperty(CarbonCommonConstants.DEFAULT_CARBON_MAJOR_COMPACTION_SIZE, "0.02")
+
+ import org.apache.spark.sql.CarbonSession._
+ if (args.length != 4) {
+ logger.error("Usage: java CarbonS3Example <access-key> <secret-key>"
+
+ "<table-path> <spark-master>")
+ System.exit(0)
+ }
+
+ val (accessKey, secretKey) = getKeyOnPrefix(args(2))
+ val spark = SparkSession
+ .builder()
+ .master(args(3))
+ .appName("S3Example")
+ .config("spark.driver.host", "localhost")
+ .config(accessKey, args(0))
+ .config(secretKey, args(1))
+ .getOrCreateCarbonSession()
+
+ spark.sparkContext.setLogLevel("INFO")
+
+ spark.sql(
+ s"""
+ | CREATE TABLE if not exists carbon_table(
+ | shortField SHORT,
+ | intField INT,
+ | bigintField LONG,
+ | doubleField DOUBLE,
+ | stringField STRING,
+ | timestampField TIMESTAMP,
+ | decimalField DECIMAL(18,2),
+ | dateField DATE,
+ | charField CHAR(5),
+ | floatField FLOAT
+ | )
+ | STORED BY 'carbondata'
+ | LOCATION '${ args(2) }'
+ | TBLPROPERTIES('SORT_COLUMNS'='',
'DICTIONARY_INCLUDE'='dateField, charField')
+ """.stripMargin)
+
+ spark.sql(
+ s"""
+ | LOAD DATA LOCAL INPATH '$path'
+ | INTO TABLE carbon_table
+ | OPTIONS('HEADER'='true')
+ """.stripMargin)
+
+ spark.sql(
+ s"""
+ | LOAD DATA LOCAL INPATH '$path'
+ | INTO TABLE carbon_table
+ | OPTIONS('HEADER'='true')
+ """.stripMargin)
+
+ spark.sql(
+ s"""
+ | LOAD DATA LOCAL INPATH '$path'
+ | INTO TABLE carbon_table
+ | OPTIONS('HEADER'='true')
+ """.stripMargin)
+
+ spark.sql(
+ s"""
+ | LOAD DATA LOCAL INPATH '$path'
+ | INTO TABLE carbon_table
+ | OPTIONS('HEADER'='true')
+ """.stripMargin)
+
+ spark.sql("ALTER table carbon_table compact 'MINOR'")
--- End diff --
Add a comment:
> // Use compaction command to merge segments or small files in object
based storage, this can be done periodically.
---