Github user kunal642 commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/2951#discussion_r236646951
  
    --- Diff: 
integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/dli/CreateTableUsingSparkCarbonFileFormatTestCase.scala
 ---
    @@ -0,0 +1,484 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.carbondata.cluster.sdv.generated.dli
    +
    +import java.io.File
    +import java.text.SimpleDateFormat
    +import java.util.{Date, Random}
    +
    +import scala.collection.JavaConverters._
    +
    +import org.apache.commons.io.FileUtils
    +import org.apache.commons.lang.RandomStringUtils
    +import org.scalatest.BeforeAndAfterAll
    +import org.apache.spark.util.SparkUtil
    +
    +import org.apache.carbondata.core.datastore.filesystem.CarbonFile
    +import org.apache.carbondata.core.datastore.impl.FileFactory
    +import org.apache.carbondata.core.metadata.datatype.DataTypes
    +import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, 
DataFileFooterConverter}
    +import org.apache.carbondata.sdk.file.{CarbonWriter, Field, Schema}
    +import org.apache.spark.sql.Row
    +import org.apache.spark.sql.common.util.QueryTest
    +
    +import org.apache.carbondata.core.constants.CarbonCommonConstants
    +import org.apache.carbondata.core.datamap.DataMapStoreManager
    +import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
    +import org.apache.carbondata.core.metadata.blocklet.DataFileFooter
    +
    +class CreateTableUsingSparkCarbonFileFormatTestCase extends QueryTest with 
BeforeAndAfterAll {
    +
    +
    +
    +  override def beforeAll(): Unit = {
    +    sql("DROP TABLE IF EXISTS sdkOutputTable")
    +  }
    +
    +  override def afterAll(): Unit = {
    +    CarbonProperties.getInstance()
    +      .addProperty(CarbonCommonConstants.CARBON_MINMAX_ALLOWED_BYTE_COUNT,
    +        CarbonCommonConstants.CARBON_MINMAX_ALLOWED_BYTE_COUNT_DEFAULT)
    +    sql("DROP TABLE IF EXISTS sdkOutputTable")
    +  }
    +
    +  var writerPath = new File(this.getClass.getResource("/").getPath
    +                            +
    +                            "../." +
    +                            
"./src/test/resources/SparkCarbonFileFormat/WriterOutput/")
    +    .getCanonicalPath
    +  //getCanonicalPath gives path with \, but the code expects /.
    +  writerPath = writerPath.replace("\\", "/");
    +
    +  def buildTestData(): Any = {
    +
    +    FileUtils.deleteDirectory(new File(writerPath))
    +
    +    val schema = new StringBuilder()
    +      .append("[ \n")
    +      .append("   {\"name\":\"string\"},\n")
    +      .append("   {\"age\":\"int\"},\n")
    +      .append("   {\"height\":\"double\"}\n")
    +      .append("]")
    +      .toString()
    +
    +    try {
    +      val builder = CarbonWriter.builder()
    +      val writer =
    +        
builder.outputPath(writerPath).withCsvInput(Schema.parseJson(schema)).writtenBy("CreateTableUsingSparkCarbonFileFormatTestCase").build()
    +      var i = 0
    +      while (i < 100) {
    +        writer.write(Array[String]("robot" + i, String.valueOf(i), 
String.valueOf(i.toDouble / 2)))
    +        i += 1
    +      }
    +      writer.close()
    +    } catch {
    +      case _: Throwable => None
    +    }
    +  }
    +
    +  def cleanTestData() = {
    +    FileUtils.deleteDirectory(new File(writerPath))
    +  }
    +
    +  def deleteIndexFile(path: String, extension: String) : Unit = {
    +    val file: CarbonFile = FileFactory
    +      .getCarbonFile(path, FileFactory.getFileType(path))
    +
    +    for (eachDir <- file.listFiles) {
    +      if (!eachDir.isDirectory) {
    +        if (eachDir.getName.endsWith(extension)) {
    +          CarbonUtil.deleteFoldersAndFilesSilent(eachDir)
    +        }
    +      } else {
    +        deleteIndexFile(eachDir.getPath, extension)
    +      }
    +    }
    +  }
    +
    +  //TO DO, need to remove segment dependency and tableIdentifier Dependency
    +  test("read carbondata files (sdk Writer Output) using the 
SparkCarbonFileFormat ") {
    +    buildTestData()
    +    assert(new File(writerPath).exists())
    +    sql("DROP TABLE IF EXISTS sdkOutputTable")
    +
    +    //data source file format
    +    if (SparkUtil.isSparkVersionEqualTo("2.1")) {
    +      //data source file format
    +      sql(s"""CREATE TABLE sdkOutputTable USING carbon OPTIONS (PATH 
'$writerPath') """)
    +    } else if (SparkUtil.isSparkVersionXandAbove("2.2")) {
    +      //data source file format
    +      sql(
    +        s"""CREATE TABLE sdkOutputTable USING carbon LOCATION
    +           |'$writerPath' """.stripMargin)
    +    } else{
    +      // TO DO
    +    }
    +
    +    sql("Describe formatted sdkOutputTable").show(false)
    --- End diff --
    
    No need to use .show(). All these queries can be removed


---

Reply via email to