Repository: spark
Updated Branches:
  refs/heads/branch-2.0 75dd78130 -> d357ca302


[SPARK-16813][SQL] Remove private[sql] and private[spark] from catalyst package

The catalyst package is meant to be internal, and as a result it does not make 
sense to mark things as private[sql] or private[spark]. It simply makes 
debugging harder when Spark developers need to inspect the plans at runtime.

This patch removes all private[sql] and private[spark] visibility modifiers in 
org.apache.spark.sql.catalyst.

N/A - just visibility changes.

Author: Reynold Xin <r...@databricks.com>

Closes #14418 from rxin/SPARK-16813.

(cherry picked from commit 064d91ff7342002414d3274694a8e2e37f154986)
Signed-off-by: Reynold Xin <r...@databricks.com>


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d357ca30
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d357ca30
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d357ca30

Branch: refs/heads/branch-2.0
Commit: d357ca3023c84e472927380bed65b1cee33c4e03
Parents: 75dd781
Author: Reynold Xin <r...@databricks.com>
Authored: Sun Jul 31 16:31:06 2016 +0800
Committer: Reynold Xin <r...@databricks.com>
Committed: Sun Jul 31 11:10:07 2016 -0700

----------------------------------------------------------------------
 .../spark/sql/catalyst/CatalystTypeConverters.scala   |  4 ++--
 .../apache/spark/sql/catalyst/ScalaReflection.scala   |  2 +-
 .../apache/spark/sql/catalyst/analysis/Analyzer.scala |  4 ++--
 .../spark/sql/catalyst/analysis/TypeCoercion.scala    |  2 +-
 .../spark/sql/catalyst/catalog/SessionCatalog.scala   |  6 +++---
 .../apache/spark/sql/catalyst/encoders/package.scala  |  2 +-
 .../spark/sql/catalyst/expressions/Expression.scala   |  2 +-
 .../expressions/MonotonicallyIncreasingID.scala       |  2 +-
 .../sql/catalyst/expressions/SparkPartitionID.scala   |  2 +-
 .../catalyst/expressions/aggregate/interfaces.scala   | 14 +++++++-------
 .../spark/sql/catalyst/expressions/arithmetic.scala   |  2 +-
 .../sql/catalyst/expressions/complexTypeCreator.scala |  4 ++--
 .../catalyst/expressions/complexTypeExtractors.scala  |  2 +-
 .../apache/spark/sql/catalyst/expressions/misc.scala  |  2 +-
 .../spark/sql/catalyst/expressions/predicates.scala   |  4 ++--
 .../apache/spark/sql/catalyst/expressions/rows.scala  |  2 +-
 .../plans/logical/basicLogicalOperators.scala         |  6 +++---
 .../sql/catalyst/util/AbstractScalaRowIterator.scala  |  2 +-
 18 files changed, 32 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
index 9cc7b2a..f542f5c 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/CatalystTypeConverters.scala
@@ -382,7 +382,7 @@ object CatalystTypeConverters {
    * Typical use case would be converting a collection of rows that have the 
same schema. You will
    * call this function once to get a converter, and apply it to every row.
    */
-  private[sql] def createToCatalystConverter(dataType: DataType): Any => Any = 
{
+  def createToCatalystConverter(dataType: DataType): Any => Any = {
     if (isPrimitive(dataType)) {
       // Although the `else` branch here is capable of handling inbound 
conversion of primitives,
       // we add some special-case handling for those types here. The 
motivation for this relates to
@@ -409,7 +409,7 @@ object CatalystTypeConverters {
    * Typical use case would be converting a collection of rows that have the 
same schema. You will
    * call this function once to get a converter, and apply it to every row.
    */
-  private[sql] def createToScalaConverter(dataType: DataType): Any => Any = {
+  def createToScalaConverter(dataType: DataType): Any => Any = {
     if (isPrimitive(dataType)) {
       identity
     } else {

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
index 8affb03..dd36468 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/ScalaReflection.scala
@@ -720,7 +720,7 @@ object ScalaReflection extends ScalaReflection {
   /**
    * Whether the fields of the given type is defined entirely by its 
constructor parameters.
    */
-  private[sql] def definedByConstructorParams(tpe: Type): Boolean = {
+  def definedByConstructorParams(tpe: Type): Boolean = {
     tpe <:< localTypeOf[Product] || tpe <:< 
localTypeOf[DefinedByConstructorParams]
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 2efa997..660f523 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -246,7 +246,7 @@ class Analyzer(
       }.isDefined
     }
 
-    private[sql] def hasGroupingFunction(e: Expression): Boolean = {
+    private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
       e.collectFirst {
         case g: Grouping => g
         case g: GroupingID => g
@@ -1412,7 +1412,7 @@ class Analyzer(
      * Construct the output attributes for a [[Generator]], given a list of 
names.  If the list of
      * names is empty names are assigned from field names in generator.
      */
-    private[sql] def makeGeneratorOutput(
+    private[analysis] def makeGeneratorOutput(
         generator: Generator,
         names: Seq[String]): Seq[Attribute] = {
       val elementAttrs = generator.elementSchema.toAttributes

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
index 9a040f8..8503b8d 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TypeCoercion.scala
@@ -63,7 +63,7 @@ object TypeCoercion {
 
   // See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types.
   // The conversion for integral and floating point types have a linear 
widening hierarchy:
-  private[sql] val numericPrecedence =
+  val numericPrecedence =
     IndexedSeq(
       ByteType,
       ShortType,

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index 3a2e574..977426a 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -746,7 +746,7 @@ class SessionCatalog(
    *
    * This performs reflection to decide what type of [[Expression]] to return 
in the builder.
    */
-  private[sql] def makeFunctionBuilder(name: String, functionClassName: 
String): FunctionBuilder = {
+  def makeFunctionBuilder(name: String, functionClassName: String): 
FunctionBuilder = {
     // TODO: at least support UDAFs here
     throw new UnsupportedOperationException("Use sqlContext.udf.register(...) 
instead.")
   }
@@ -790,7 +790,7 @@ class SessionCatalog(
   /**
    * Look up the [[ExpressionInfo]] associated with the specified function, 
assuming it exists.
    */
-  private[spark] def lookupFunctionInfo(name: FunctionIdentifier): 
ExpressionInfo = synchronized {
+  def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = 
synchronized {
     // TODO: just make function registry take in FunctionIdentifier instead of 
duplicating this
     val database = 
name.database.orElse(Some(currentDb)).map(formatDatabaseName)
     val qualifiedName = name.copy(database = database)
@@ -902,7 +902,7 @@ class SessionCatalog(
    *
    * This is mainly used for tests.
    */
-  private[sql] def reset(): Unit = synchronized {
+  def reset(): Unit = synchronized {
     setCurrentDatabase(DEFAULT_DATABASE)
     listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
       dropDatabase(db, ignoreIfNotExists = false, cascade = true)

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
index 03708fb..59f7969 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/encoders/package.scala
@@ -26,7 +26,7 @@ package object encoders {
    * references from a specific schema.)  This requirement allows us to 
preserve whether a given
    * object type is being bound by name or by ordinal when doing resolution.
    */
-  private[sql] def encoderFor[A : Encoder]: ExpressionEncoder[A] = 
implicitly[Encoder[A]] match {
+  def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] 
match {
     case e: ExpressionEncoder[A] =>
       e.assertUnresolved()
       e

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
index 1f37b68..7abbbe2 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/Expression.scala
@@ -526,7 +526,7 @@ abstract class BinaryOperator extends BinaryExpression with 
ExpectsInputTypes {
 }
 
 
-private[sql] object BinaryOperator {
+object BinaryOperator {
   def unapply(e: BinaryOperator): Option[(Expression, Expression)] = 
Some((e.left, e.right))
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala
index 75c6bb2..5b4922e 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/MonotonicallyIncreasingID.scala
@@ -40,7 +40,7 @@ import org.apache.spark.sql.types.{DataType, LongType}
       represent the record number within each partition. The assumption is 
that the data frame has
       less than 1 billion partitions, and each partition has less than 8 
billion records.""",
   extended = "> SELECT _FUNC_();\n 0")
-private[sql] case class MonotonicallyIncreasingID() extends LeafExpression 
with Nondeterministic {
+case class MonotonicallyIncreasingID() extends LeafExpression with 
Nondeterministic {
 
   /**
    * Record ID within each partition. By being transient, count's value is 
reset to 0 every time

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SparkPartitionID.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SparkPartitionID.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SparkPartitionID.scala
index 71af59a..1f675d5 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SparkPartitionID.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SparkPartitionID.scala
@@ -28,7 +28,7 @@ import org.apache.spark.sql.types.{DataType, IntegerType}
 @ExpressionDescription(
   usage = "_FUNC_() - Returns the current partition id of the Spark task",
   extended = "> SELECT _FUNC_();\n 0")
-private[sql] case class SparkPartitionID() extends LeafExpression with 
Nondeterministic {
+case class SparkPartitionID() extends LeafExpression with Nondeterministic {
 
   override def nullable: Boolean = false
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala
index 504cea5..7a39e56 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/interfaces.scala
@@ -24,14 +24,14 @@ import 
org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
 import org.apache.spark.sql.types._
 
 /** The mode of an [[AggregateFunction]]. */
-private[sql] sealed trait AggregateMode
+sealed trait AggregateMode
 
 /**
  * An [[AggregateFunction]] with [[Partial]] mode is used for partial 
aggregation.
  * This function updates the given aggregation buffer with the original input 
of this
  * function. When it has processed all input rows, the aggregation buffer is 
returned.
  */
-private[sql] case object Partial extends AggregateMode
+case object Partial extends AggregateMode
 
 /**
  * An [[AggregateFunction]] with [[PartialMerge]] mode is used to merge 
aggregation buffers
@@ -39,7 +39,7 @@ private[sql] case object Partial extends AggregateMode
  * This function updates the given aggregation buffer by merging multiple 
aggregation buffers.
  * When it has processed all input rows, the aggregation buffer is returned.
  */
-private[sql] case object PartialMerge extends AggregateMode
+case object PartialMerge extends AggregateMode
 
 /**
  * An [[AggregateFunction]] with [[Final]] mode is used to merge aggregation 
buffers
@@ -47,7 +47,7 @@ private[sql] case object PartialMerge extends AggregateMode
  * This function updates the given aggregation buffer by merging multiple 
aggregation buffers.
  * When it has processed all input rows, the final result of this function is 
returned.
  */
-private[sql] case object Final extends AggregateMode
+case object Final extends AggregateMode
 
 /**
  * An [[AggregateFunction]] with [[Complete]] mode is used to evaluate this 
function directly
@@ -55,13 +55,13 @@ private[sql] case object Final extends AggregateMode
  * This function updates the given aggregation buffer with the original input 
of this
  * function. When it has processed all input rows, the final result of this 
function is returned.
  */
-private[sql] case object Complete extends AggregateMode
+case object Complete extends AggregateMode
 
 /**
  * A place holder expressions used in code-gen, it does not change the 
corresponding value
  * in the row.
  */
-private[sql] case object NoOp extends Expression with Unevaluable {
+case object NoOp extends Expression with Unevaluable {
   override def nullable: Boolean = true
   override def dataType: DataType = NullType
   override def children: Seq[Expression] = Nil
@@ -84,7 +84,7 @@ object AggregateExpression {
  * A container for an [[AggregateFunction]] with its [[AggregateMode]] and a 
field
  * (`isDistinct`) indicating if DISTINCT keyword is specified for this 
function.
  */
-private[sql] case class AggregateExpression(
+case class AggregateExpression(
     aggregateFunction: AggregateFunction,
     mode: AggregateMode,
     isDistinct: Boolean,

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
index 91ffac0..7ff8795 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/arithmetic.scala
@@ -125,7 +125,7 @@ abstract class BinaryArithmetic extends BinaryOperator {
   }
 }
 
-private[sql] object BinaryArithmetic {
+object BinaryArithmetic {
   def unapply(e: BinaryArithmetic): Option[(Expression, Expression)] = 
Some((e.left, e.right))
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
index 0ca715f..09e22aa 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeCreator.scala
@@ -84,8 +84,8 @@ case class CreateArray(children: Seq[Expression]) extends 
Expression {
 @ExpressionDescription(
   usage = "_FUNC_(key0, value0, key1, value1...) - Creates a map with the 
given key/value pairs.")
 case class CreateMap(children: Seq[Expression]) extends Expression {
-  private[sql] lazy val keys = children.indices.filter(_ % 2 == 
0).map(children)
-  private[sql] lazy val values = children.indices.filter(_ % 2 != 
0).map(children)
+  lazy val keys = children.indices.filter(_ % 2 == 0).map(children)
+  lazy val values = children.indices.filter(_ % 2 != 0).map(children)
 
   override def foldable: Boolean = children.forall(_.foldable)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeExtractors.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeExtractors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeExtractors.scala
index 3b4468f..abb5594 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeExtractors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/complexTypeExtractors.scala
@@ -106,7 +106,7 @@ trait ExtractValue extends Expression
 case class GetStructField(child: Expression, ordinal: Int, name: 
Option[String] = None)
   extends UnaryExpression with ExtractValue {
 
-  private[sql] lazy val childSchema = child.dataType.asInstanceOf[StructType]
+  lazy val childSchema = child.dataType.asInstanceOf[StructType]
 
   override def dataType: DataType = childSchema(ordinal).dataType
   override def nullable: Boolean = child.nullable || 
childSchema(ordinal).nullable

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
index d2c94ec..3692075 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/misc.scala
@@ -554,7 +554,7 @@ object XxHash64Function extends InterpretedHashFunction {
 @ExpressionDescription(
   usage = "_FUNC_() - Returns the current database.",
   extended = "> SELECT _FUNC_()")
-private[sql] case class CurrentDatabase() extends LeafExpression with 
Unevaluable {
+case class CurrentDatabase() extends LeafExpression with Unevaluable {
   override def dataType: DataType = StringType
   override def foldable: Boolean = true
   override def nullable: Boolean = false

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
index a3b098a..100087e 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala
@@ -393,13 +393,13 @@ abstract class BinaryComparison extends BinaryOperator 
with Predicate {
 }
 
 
-private[sql] object BinaryComparison {
+object BinaryComparison {
   def unapply(e: BinaryComparison): Option[(Expression, Expression)] = 
Some((e.left, e.right))
 }
 
 
 /** An extractor that matches both standard 3VL equality and null-safe 
equality. */
-private[sql] object Equality {
+object Equality {
   def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match 
{
     case EqualTo(l, r) => Some((l, r))
     case EqualNullSafe(l, r) => Some((l, r))

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/rows.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/rows.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/rows.scala
index e036982..73dceb3 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/rows.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/rows.scala
@@ -218,7 +218,7 @@ class GenericRowWithSchema(values: Array[Any], override val 
schema: StructType)
  * Note that, while the array is not copied, and thus could technically be 
mutated after creation,
  * this is not allowed.
  */
-class GenericInternalRow(private[sql] val values: Array[Any]) extends 
BaseGenericInternalRow {
+class GenericInternalRow(val values: Array[Any]) extends 
BaseGenericInternalRow {
   /** No-arg constructor for serialization. */
   protected def this() = this(null)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
index b31f5aa..eb612c4 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
@@ -127,7 +127,7 @@ abstract class SetOperation(left: LogicalPlan, right: 
LogicalPlan) extends Binar
   }
 }
 
-private[sql] object SetOperation {
+object SetOperation {
   def unapply(p: SetOperation): Option[(LogicalPlan, LogicalPlan)] = 
Some((p.left, p.right))
 }
 
@@ -365,7 +365,7 @@ case class InsertIntoTable(
   override def children: Seq[LogicalPlan] = child :: Nil
   override def output: Seq[Attribute] = Seq.empty
 
-  private[spark] lazy val expectedColumns = {
+  lazy val expectedColumns = {
     if (table.output.isEmpty) {
       None
     } else {
@@ -509,7 +509,7 @@ case class Window(
   def windowOutputSet: AttributeSet = 
AttributeSet(windowExpressions.map(_.toAttribute))
 }
 
-private[sql] object Expand {
+object Expand {
   /**
    * Extract attribute set according to the grouping id.
    *

http://git-wip-us.apache.org/repos/asf/spark/blob/d357ca30/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/AbstractScalaRowIterator.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/AbstractScalaRowIterator.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/AbstractScalaRowIterator.scala
index 6d35f14..0c7205b 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/AbstractScalaRowIterator.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/AbstractScalaRowIterator.scala
@@ -23,7 +23,7 @@ package org.apache.spark.sql.catalyst.util
  * `Row` in order to work around a spurious IntelliJ compiler error. This 
cannot be an abstract
  * class because that leads to compilation errors under Scala 2.11.
  */
-private[spark] class AbstractScalaRowIterator[T] extends Iterator[T] {
+class AbstractScalaRowIterator[T] extends Iterator[T] {
   override def hasNext: Boolean = throw new NotImplementedError
 
   override def next(): T = throw new NotImplementedError


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to