Repository: spark
Updated Branches:
  refs/heads/branch-2.0 237ae54c9 -> 1c5697116


http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
index af2229a..66fb5a4 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala
@@ -49,10 +49,10 @@ class StreamExecution(
     override val id: Long,
     override val name: String,
     checkpointRoot: String,
-    private[sql] val logicalPlan: LogicalPlan,
+    val logicalPlan: LogicalPlan,
     val sink: Sink,
     val trigger: Trigger,
-    private[sql] val triggerClock: Clock,
+    val triggerClock: Clock,
     val outputMode: OutputMode)
   extends StreamingQuery with Logging {
 
@@ -74,7 +74,7 @@ class StreamExecution(
    * input source.
    */
   @volatile
-  private[sql] var committedOffsets = new StreamProgress
+  var committedOffsets = new StreamProgress
 
   /**
    * Tracks the offsets that are available to be processed, but have not yet 
be committed to the
@@ -102,10 +102,10 @@ class StreamExecution(
   private var state: State = INITIALIZED
 
   @volatile
-  private[sql] var lastExecution: QueryExecution = null
+  var lastExecution: QueryExecution = null
 
   @volatile
-  private[sql] var streamDeathCause: StreamingQueryException = null
+  var streamDeathCause: StreamingQueryException = null
 
   /* Get the call site in the caller thread; will pass this into the micro 
batch thread */
   private val callSite = Utils.getCallSite()
@@ -115,7 +115,7 @@ class StreamExecution(
    * [[org.apache.spark.util.UninterruptibleThread]] to avoid potential 
deadlocks in using
    * [[HDFSMetadataLog]]. See SPARK-14131 for more details.
    */
-  private[sql] val microBatchThread =
+  val microBatchThread =
     new UninterruptibleThread(s"stream execution thread for $name") {
       override def run(): Unit = {
         // To fix call site like "run at <unknown>:0", we bridge the call site 
from the caller
@@ -131,8 +131,7 @@ class StreamExecution(
    * processing is done.  Thus, the Nth record in this log indicated data that 
is currently being
    * processed and the N-1th entry indicates which offsets have been durably 
committed to the sink.
    */
-  private[sql] val offsetLog =
-    new HDFSMetadataLog[CompositeOffset](sparkSession, 
checkpointFile("offsets"))
+  val offsetLog = new HDFSMetadataLog[CompositeOffset](sparkSession, 
checkpointFile("offsets"))
 
   /** Whether the query is currently active or not */
   override def isActive: Boolean = state == ACTIVE
@@ -159,7 +158,7 @@ class StreamExecution(
    * Starts the execution. This returns only after the thread has started and 
[[QueryStarted]] event
    * has been posted to all the listeners.
    */
-  private[sql] def start(): Unit = {
+  def start(): Unit = {
     microBatchThread.setDaemon(true)
     microBatchThread.start()
     startLatch.await()  // Wait until thread started and QueryStart event has 
been posted
@@ -518,7 +517,7 @@ class StreamExecution(
   case object TERMINATED extends State
 }
 
-private[sql] object StreamExecution {
+object StreamExecution {
   private val _nextId = new AtomicLong(0)
 
   def nextId: Long = _nextId.getAndIncrement()

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala
index 405a5f0..db0bd9e 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamProgress.scala
@@ -26,7 +26,7 @@ class StreamProgress(
     val baseMap: immutable.Map[Source, Offset] = new immutable.HashMap[Source, 
Offset])
   extends scala.collection.immutable.Map[Source, Offset] {
 
-  private[sql] def toCompositeOffset(source: Seq[Source]): CompositeOffset = {
+  def toCompositeOffset(source: Seq[Source]): CompositeOffset = {
     CompositeOffset(source.map(get))
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
index 0667653..a67fdce 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStore.scala
@@ -113,7 +113,7 @@ case class KeyRemoved(key: UnsafeRow) extends StoreUpdate
  * the store is the active instance. Accordingly, it either keeps it loaded 
and performs
  * maintenance, or unloads the store.
  */
-private[sql] object StateStore extends Logging {
+object StateStore extends Logging {
 
   val MAINTENANCE_INTERVAL_CONFIG = 
"spark.sql.streaming.stateStore.maintenanceInterval"
   val MAINTENANCE_INTERVAL_DEFAULT_SECS = 60

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinator.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinator.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinator.scala
index e418217..d945d7a 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinator.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinator.scala
@@ -45,7 +45,7 @@ private object StopCoordinator
   extends StateStoreCoordinatorMessage
 
 /** Helper object used to create reference to [[StateStoreCoordinator]]. */
-private[sql] object StateStoreCoordinatorRef extends Logging {
+object StateStoreCoordinatorRef extends Logging {
 
   private val endpointName = "StateStoreCoordinator"
 
@@ -77,7 +77,7 @@ private[sql] object StateStoreCoordinatorRef extends Logging {
  * Reference to a [[StateStoreCoordinator]] that can be used to coordinate 
instances of
  * [[StateStore]]s across all the executors, and get their locations for job 
scheduling.
  */
-private[sql] class StateStoreCoordinatorRef private(rpcEndpointRef: 
RpcEndpointRef) {
+class StateStoreCoordinatorRef private(rpcEndpointRef: RpcEndpointRef) {
 
   private[state] def reportActiveInstance(
       storeId: StateStoreId,

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
index 4b4fa12..23fc0bd 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/ExecutionPage.scala
@@ -24,7 +24,7 @@ import scala.xml.Node
 import org.apache.spark.internal.Logging
 import org.apache.spark.ui.{UIUtils, WebUIPage}
 
-private[sql] class ExecutionPage(parent: SQLTab) extends 
WebUIPage("execution") with Logging {
+class ExecutionPage(parent: SQLTab) extends WebUIPage("execution") with 
Logging {
 
   private val listener = parent.listener
 

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
index 6e94791..60f1343 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLListener.scala
@@ -46,14 +46,14 @@ case class SparkListenerSQLExecutionEnd(executionId: Long, 
time: Long)
 case class SparkListenerDriverAccumUpdates(executionId: Long, accumUpdates: 
Seq[(Long, Long)])
   extends SparkListenerEvent
 
-private[sql] class SQLHistoryListenerFactory extends 
SparkHistoryListenerFactory {
+class SQLHistoryListenerFactory extends SparkHistoryListenerFactory {
 
   override def createListeners(conf: SparkConf, sparkUI: SparkUI): 
Seq[SparkListener] = {
     List(new SQLHistoryListener(conf, sparkUI))
   }
 }
 
-private[sql] class SQLListener(conf: SparkConf) extends SparkListener with 
Logging {
+class SQLListener(conf: SparkConf) extends SparkListener with Logging {
 
   private val retainedExecutions = 
conf.getInt("spark.sql.ui.retainedExecutions", 1000)
 
@@ -333,7 +333,7 @@ private[sql] class SQLListener(conf: SparkConf) extends 
SparkListener with Loggi
 /**
  * A [[SQLListener]] for rendering the SQL UI in the history server.
  */
-private[spark] class SQLHistoryListener(conf: SparkConf, sparkUI: SparkUI)
+class SQLHistoryListener(conf: SparkConf, sparkUI: SparkUI)
   extends SQLListener(conf) {
 
   private var sqlTabAttached = false

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLTab.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLTab.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLTab.scala
index e8675ce..d0376af 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLTab.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SQLTab.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.ui
 import org.apache.spark.internal.Logging
 import org.apache.spark.ui.{SparkUI, SparkUITab}
 
-private[sql] class SQLTab(val listener: SQLListener, sparkUI: SparkUI)
+class SQLTab(val listener: SQLListener, sparkUI: SparkUI)
   extends SparkUITab(sparkUI, "SQL") with Logging {
 
   val parent = sparkUI
@@ -32,6 +32,6 @@ private[sql] class SQLTab(val listener: SQLListener, sparkUI: 
SparkUI)
   parent.addStaticHandler(SQLTab.STATIC_RESOURCE_DIR, "/static/sql")
 }
 
-private[sql] object SQLTab {
+object SQLTab {
   private val STATIC_RESOURCE_DIR = "org/apache/spark/sql/execution/ui/static"
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
index 8f5681b..4bb9d6f 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/ui/SparkPlanGraph.scala
@@ -24,7 +24,7 @@ import scala.collection.mutable
 import org.apache.commons.lang3.StringEscapeUtils
 
 import org.apache.spark.sql.execution.{SparkPlanInfo, WholeStageCodegenExec}
-import org.apache.spark.sql.execution.metric.SQLMetrics
+
 
 /**
  * A graph used for storing information of an executionPlan of DataFrame.
@@ -32,7 +32,7 @@ import org.apache.spark.sql.execution.metric.SQLMetrics
  * Each graph is defined with a set of nodes and a set of edges. Each node 
represents a node in the
  * SparkPlan tree, and each edge represents a parent-child relationship 
between two nodes.
  */
-private[ui] case class SparkPlanGraph(
+case class SparkPlanGraph(
     nodes: Seq[SparkPlanGraphNode], edges: Seq[SparkPlanGraphEdge]) {
 
   def makeDotFile(metrics: Map[Long, String]): String = {
@@ -55,7 +55,7 @@ private[ui] case class SparkPlanGraph(
   }
 }
 
-private[sql] object SparkPlanGraph {
+object SparkPlanGraph {
 
   /**
    * Build a SparkPlanGraph from the root of a SparkPlan tree.

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
index 6c43fe3..54aee5e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.sql.internal
 
-import org.apache.hadoop.conf.Configuration
-
 import org.apache.spark.SparkContext
 import org.apache.spark.internal.Logging
 import org.apache.spark.sql.{SparkSession, SQLContext}

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala
index 15a5d79..3a8b0f1 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala
@@ -34,7 +34,6 @@ import org.apache.spark.sql.hive.MetastoreRelation
  * @param ignoreIfExists allow continue working if it's already exists, 
otherwise
  *                      raise exception
  */
-private[hive]
 case class CreateHiveTableAsSelectCommand(
     tableDesc: CatalogTable,
     query: LogicalPlan,

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
index cc3e74b..a716a3e 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala
@@ -54,7 +54,7 @@ case class HiveTableScanExec(
   require(partitionPruningPred.isEmpty || relation.hiveQlTable.isPartitioned,
     "Partition pruning predicates only supported for partitioned tables.")
 
-  private[sql] override lazy val metrics = Map(
+  override lazy val metrics = Map(
     "numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output 
rows"))
 
   override def producedAttributes: AttributeSet = outputSet ++

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
index dfb1251..9747abb 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/ScriptTransformation.scala
@@ -51,7 +51,6 @@ import org.apache.spark.util.{CircularBuffer, RedirectThread, 
SerializableConfig
  * @param script the command that should be executed.
  * @param output the attributes that are produced by the script.
  */
-private[hive]
 case class ScriptTransformation(
     input: Seq[Expression],
     script: String,
@@ -336,7 +335,6 @@ private class ScriptTransformationWriterThread(
   }
 }
 
-private[hive]
 object HiveScriptIOSchema {
   def apply(input: ScriptInputOutputSchema): HiveScriptIOSchema = {
     HiveScriptIOSchema(
@@ -355,7 +353,6 @@ object HiveScriptIOSchema {
 /**
  * The wrapper class of Hive input and output schema properties
  */
-private[hive]
 case class HiveScriptIOSchema (
     inputRowFormat: Seq[(String, String)],
     outputRowFormat: Seq[(String, String)],

http://git-wip-us.apache.org/repos/asf/spark/blob/1c569711/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
index a2c8092..9843f07 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala
@@ -47,7 +47,7 @@ import org.apache.spark.util.SerializableConfiguration
  * [[FileFormat]] for reading ORC files. If this is moved or renamed, please 
update
  * [[DataSource]]'s backwardCompatibilityMap.
  */
-private[sql] class OrcFileFormat
+class OrcFileFormat
   extends FileFormat with DataSourceRegister with Serializable {
 
   override def shortName(): String = "orc"


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to