Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1805#discussion_r162090652
--- Diff:
examples/spark2/src/main/scala/org/apache/carbondata/examples/S3Example.scala
---
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.examples
+
+import java.io.File
+
+import org.apache.hadoop.fs.s3a.Constants.{ACCESS_KEY, ENDPOINT,
SECRET_KEY}
+import org.apache.spark.sql.SparkSession
+import org.slf4j.{Logger, LoggerFactory}
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+
+object S3Example {
+
+ /**
+ * This example demonstrate usage of
+ * 1. create carbon table with storage location on object based storage
+ * like AWS S3, Huawei OBS, etc
+ * 2. load data into carbon table, the generated file will be stored on
object based storage
+ * query the table.
+ * 3. With the indexing feature of carbondata, the data read from object
based storage is minimum,
+ * thus providing both high performance analytic and low cost storage
+ *
+ * @param args require three parameters "Access-key" "Secret-key"
+ * "s3 bucket path" "spark-master" "s3-endpoint"
+ */
+ def main(args: Array[String]) {
+ val rootPath = new File(this.getClass.getResource("/").getPath
+ + "../../../..").getCanonicalPath
+ val path = s"$rootPath/examples/spark2/src/main/resources/data1.csv"
+ val logger: Logger = LoggerFactory.getLogger(this.getClass)
+
+ import org.apache.spark.sql.CarbonSession._
+ if (args.length < 4 || args.length > 5) {
+ logger.error("Usage: java CarbonS3Example <access-key> <secret-key>"
+
+ "<table-path> <spark-master> <s3-endpoint>")
--- End diff --
Modify to `<table-path-on-S3>`
---