Github user jackylk commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/2328#discussion_r196137795
  
    --- Diff: 
integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/stream/CarbonCreateStreamCommand.scala
 ---
    @@ -0,0 +1,132 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.sql.execution.command.stream
    +
    +import java.util
    +
    +import scala.collection.JavaConverters._
    +
    +import org.apache.spark.sql._
    +import org.apache.spark.sql.catalyst.expressions.{Attribute, 
AttributeReference}
    +import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
    +import org.apache.spark.sql.execution.command.DataCommand
    +import org.apache.spark.sql.execution.datasources.LogicalRelation
    +import org.apache.spark.sql.execution.streaming.StreamingRelation
    +import org.apache.spark.sql.streaming.DataStreamReader
    +import org.apache.spark.sql.types.{StringType, StructType}
    +
    +import 
org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
    +import org.apache.carbondata.common.logging.LogServiceFactory
    +import org.apache.carbondata.core.metadata.schema.table.CarbonTable
    +import org.apache.carbondata.spark.StreamingOption
    +import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl
    +import org.apache.carbondata.stream.StreamJobManager
    +
    +/**
    + * This command will start a Spark streaming job to insert rows from 
source to sink
    + */
    +case class CarbonCreateStreamCommand(
    +    streamName: String,
    +    sinkDbName: Option[String],
    +    sinkTableName: String,
    +    optionMap: Map[String, String],
    +    query: String
    +) extends DataCommand {
    +
    +  override def output: Seq[Attribute] =
    +    Seq(AttributeReference("Stream Name", StringType, nullable = false)(),
    +      AttributeReference("JobId", StringType, nullable = false)(),
    +      AttributeReference("Status", StringType, nullable = false)())
    +
    +  override def processData(sparkSession: SparkSession): Seq[Row] = {
    +    val df = sparkSession.sql(query)
    +    var sourceTable: CarbonTable = null
    +
    +    // find the streaming source table in the query
    +    // and replace it with StreamingRelation
    +    val streamLp = df.logicalPlan transform {
    +      case r: LogicalRelation
    +        if r.relation.isInstanceOf[CarbonDatasourceHadoopRelation] &&
    +           
r.relation.asInstanceOf[CarbonDatasourceHadoopRelation].carbonTable.isStreamingSource
 =>
    +        val (source, streamingRelation) = 
prepareStreamingRelation(sparkSession, r)
    +        if (sourceTable != null && sourceTable.getTableName != 
source.getTableName) {
    --- End diff --
    
    fixed


---

Reply via email to