This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 596a5ff [MINOR][BUILD] Update genjavadoc to 0.13
596a5ff is described below
commit 596a5ff2737e531fbca2f31db1eb9aadd8f08882
Author: Sean Owen <[email protected]>
AuthorDate: Wed Apr 24 13:44:48 2019 +0900
[MINOR][BUILD] Update genjavadoc to 0.13
## What changes were proposed in this pull request?
Kind of related to https://github.com/gatorsmile/spark/pull/5 - let's
update genjavadoc to see if it generates fewer spurious javadoc errors to begin
with.
## How was this patch tested?
Existing docs build
Closes #24443 from srowen/genjavadoc013.
Authored-by: Sean Owen <[email protected]>
Signed-off-by: HyukjinKwon <[email protected]>
---
.../org/apache/spark/rpc/RpcCallContext.scala | 2 +-
.../spark/status/api/v1/ApiRootResource.scala | 6 +++---
.../org/apache/spark/util/SizeEstimator.scala | 2 +-
.../streaming/kinesis/SparkAWSCredentials.scala | 4 ++--
.../main/scala/org/apache/spark/ml/ann/Layer.scala | 6 +++---
.../org/apache/spark/ml/attribute/attributes.scala | 2 +-
.../org/apache/spark/ml/stat/Correlation.scala | 2 +-
.../org/apache/spark/ml/tree/treeParams.scala | 25 +++++++++++-----------
.../mllib/stat/test/StreamingTestMethod.scala | 2 +-
project/SparkBuild.scala | 2 +-
.../apache/spark/sql/hive/client/HiveClient.scala | 6 +++---
11 files changed, 30 insertions(+), 29 deletions(-)
diff --git a/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala
b/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala
index 117f51c..f6b2059 100644
--- a/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala
+++ b/core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala
@@ -24,7 +24,7 @@ package org.apache.spark.rpc
private[spark] trait RpcCallContext {
/**
- * Reply a message to the sender. If the sender is [[RpcEndpoint]], its
[[RpcEndpoint.receive]]
+ * Reply a message to the sender. If the sender is [[RpcEndpoint]], its
`RpcEndpoint.receive`
* will be called.
*/
def reply(response: Any): Unit
diff --git
a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala
b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala
index 84c2ad4..83f76db 100644
--- a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala
+++ b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala
@@ -77,7 +77,7 @@ private[spark] trait UIRoot {
/**
* Runs some code with the current SparkUI instance for the app / attempt.
*
- * @throws NoSuchElementException If the app / attempt pair does not exist.
+ * @throws java.util.NoSuchElementException If the app / attempt pair does
not exist.
*/
def withSparkUI[T](appId: String, attemptId: Option[String])(fn: SparkUI =>
T): T
@@ -85,8 +85,8 @@ private[spark] trait UIRoot {
def getApplicationInfo(appId: String): Option[ApplicationInfo]
/**
- * Write the event logs for the given app to the [[ZipOutputStream]]
instance. If attemptId is
- * [[None]], event logs for all attempts of this application will be written
out.
+ * Write the event logs for the given app to the `ZipOutputStream` instance.
If attemptId is
+ * `None`, event logs for all attempts of this application will be written
out.
*/
def writeEventLogs(appId: String, attemptId: Option[String], zipStream:
ZipOutputStream): Unit = {
Response.serverError()
diff --git a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
index e09f1fc..09c69f5 100644
--- a/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
+++ b/core/src/main/scala/org/apache/spark/util/SizeEstimator.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.collection.OpenHashSet
/**
* A trait that allows a class to give [[SizeEstimator]] more accurate size
estimation.
* When a class extends it, [[SizeEstimator]] will query the `estimatedSize`
first.
- * If `estimatedSize` does not return [[None]], [[SizeEstimator]] will use the
returned size
+ * If `estimatedSize` does not return `None`, [[SizeEstimator]] will use the
returned size
* as the size of the object. Otherwise, [[SizeEstimator]] will do the
estimation work.
* The difference between a [[KnownSizeEstimation]] and
* [[org.apache.spark.util.collection.SizeTracker]] is that, a
diff --git
a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala
b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala
index dcb60b2..7488971 100644
---
a/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala
+++
b/external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/SparkAWSCredentials.scala
@@ -101,8 +101,8 @@ object SparkAWSCredentials {
*
* @note The given AWS keypair will be saved in DStream checkpoints if
checkpointing is
* enabled. Make sure that your checkpoint directory is secure. Prefer
using the
- *
[[http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default
default provider chain]]
- * instead if possible.
+ * default provider chain instead if possible
+ *
(http://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html#credentials-default).
*
* @param accessKeyId AWS access key ID
* @param secretKey AWS secret key
diff --git a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
index 014ff07..2b4b0fc 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala
@@ -371,7 +371,7 @@ private[ann] trait TopologyModel extends Serializable {
def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]]
/**
- * Prediction of the model. See {@link ProbabilisticClassificationModel}
+ * Prediction of the model. See `ProbabilisticClassificationModel``
*
* @param features input features
* @return prediction
@@ -379,7 +379,7 @@ private[ann] trait TopologyModel extends Serializable {
def predict(features: Vector): Vector
/**
- * Raw prediction of the model. See {@link ProbabilisticClassificationModel}
+ * Raw prediction of the model. See `ProbabilisticClassificationModel`
*
* @param features input features
* @return raw prediction
@@ -389,7 +389,7 @@ private[ann] trait TopologyModel extends Serializable {
def predictRaw(features: Vector): Vector
/**
- * Probability of the model. See {@link ProbabilisticClassificationModel}
+ * Probability of the model. See `ProbabilisticClassificationModel`
*
* @param rawPrediction raw prediction vector
* @return probability
diff --git
a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala
b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala
index 1cd2b1a..756dd67 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/attribute/attributes.scala
@@ -121,7 +121,7 @@ sealed abstract class Attribute extends Serializable {
private[attribute] trait AttributeFactory {
/**
- * Creates an [[Attribute]] from a [[Metadata]] instance.
+ * Creates an [[Attribute]] from a `Metadata` instance.
*/
private[attribute] def fromMetadata(metadata: Metadata): Attribute
diff --git a/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala
b/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala
index 6e885d7..8167ea6 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/stat/Correlation.scala
@@ -49,7 +49,7 @@ object Correlation {
* Supported: `pearson` (default), `spearman`
* @return A dataframe that contains the correlation matrix of the column of
vectors. This
* dataframe contains a single row and a single column of name
- * '$METHODNAME($COLUMN)'.
+ * `$METHODNAME($COLUMN)`.
* @throws IllegalArgumentException if the column is not a valid column in
the dataset, or if
* the content of this column is not of
type Vector.
*
diff --git a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
index df01dc0..c1e44e9 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/tree/treeParams.scala
@@ -40,39 +40,39 @@ private[ml] trait DecisionTreeParams extends PredictorParams
with HasCheckpointInterval with HasSeed with HasWeightCol {
/**
- * Maximum depth of the tree (>= 0).
+ * Maximum depth of the tree (nonnegative).
* E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf
nodes.
* (default = 5)
* @group param
*/
final val maxDepth: IntParam =
- new IntParam(this, "maxDepth", "Maximum depth of the tree. (>= 0)" +
+ new IntParam(this, "maxDepth", "Maximum depth of the tree. (Nonnegative)" +
" E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2
leaf nodes.",
ParamValidators.gtEq(0))
/**
* Maximum number of bins used for discretizing continuous features and for
choosing how to split
* on features at each node. More bins give higher granularity.
- * Must be >= 2 and >= number of categories in any categorical feature.
+ * Must be at least 2 and at least number of categories in any categorical
feature.
* (default = 32)
* @group param
*/
final val maxBins: IntParam = new IntParam(this, "maxBins", "Max number of
bins for" +
- " discretizing continuous features. Must be >=2 and >= number of
categories for any" +
- " categorical feature.", ParamValidators.gtEq(2))
+ " discretizing continuous features. Must be at least 2 and at least
number of categories" +
+ " for any categorical feature.", ParamValidators.gtEq(2))
/**
* Minimum number of instances each child must have after split.
* If a split causes the left or right child to have fewer than
minInstancesPerNode,
* the split will be discarded as invalid.
- * Should be >= 1.
+ * Must be at least 1.
* (default = 1)
* @group param
*/
final val minInstancesPerNode: IntParam = new IntParam(this,
"minInstancesPerNode", "Minimum" +
" number of instances each child must have after split. If a split causes
the left or right" +
" child to have fewer than minInstancesPerNode, the split will be
discarded as invalid." +
- " Should be >= 1.", ParamValidators.gtEq(1))
+ " Must be at least 1.", ParamValidators.gtEq(1))
/**
* Minimum fraction of the weighted sample count that each child must have
after split.
@@ -91,7 +91,7 @@ private[ml] trait DecisionTreeParams extends PredictorParams
/**
* Minimum information gain for a split to be considered at a tree node.
- * Should be >= 0.0.
+ * Should be at least 0.0.
* (default = 0.0)
* @group param
*/
@@ -316,7 +316,7 @@ private[ml] trait TreeEnsembleParams extends
DecisionTreeParams {
* Supported options:
* - "auto": Choose automatically for task:
* If numTrees == 1, set to "all."
- * If numTrees > 1 (forest), set to "sqrt" for classification and
+ * If numTrees greater than 1 (forest), set to "sqrt" for
classification and
* to "onethird" for regression.
* - "all": use all features
* - "onethird": use 1/3 of the features
@@ -361,8 +361,8 @@ private[ml] trait TreeEnsembleParams extends
DecisionTreeParams {
private[ml] trait RandomForestParams extends TreeEnsembleParams {
/**
- * Number of trees to train (>= 1).
- * If 1, then no bootstrapping is used. If > 1, then bootstrapping is done.
+ * Number of trees to train (at least 1).
+ * If 1, then no bootstrapping is used. If greater than 1, then
bootstrapping is done.
* TODO: Change to always do bootstrapping (simpler). SPARK-7130
* (default = 20)
*
@@ -371,7 +371,8 @@ private[ml] trait RandomForestParams extends
TreeEnsembleParams {
* are a bit different.
* @group param
*/
- final val numTrees: IntParam = new IntParam(this, "numTrees", "Number of
trees to train (>= 1)",
+ final val numTrees: IntParam =
+ new IntParam(this, "numTrees", "Number of trees to train (at least 1)",
ParamValidators.gtEq(1))
setDefault(numTrees -> 20)
diff --git
a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
index 14ac14d..8f3d0f8 100644
---
a/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
+++
b/mllib/src/main/scala/org/apache/spark/mllib/stat/test/StreamingTestMethod.scala
@@ -33,7 +33,7 @@ import org.apache.spark.util.StatCounter
/**
* Significance testing methods for [[StreamingTest]]. New 2-sample
statistical significance tests
* should extend [[StreamingTestMethod]] and introduce a new entry in
- * [[StreamingTestMethod.TEST_NAME_TO_OBJECT]]
+ * `StreamingTestMethod.TEST_NAME_TO_OBJECT`
*/
private[stat] sealed trait StreamingTestMethod extends Serializable {
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index f55f187..83fe904 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -219,7 +219,7 @@ object SparkBuild extends PomBuild {
.map(file),
incOptions := incOptions.value.withNameHashing(true),
publishMavenStyle := true,
- unidocGenjavadocVersion := "0.11",
+ unidocGenjavadocVersion := "0.13",
// Override SBT's default resolvers:
resolvers := Seq(
diff --git
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
index f697174..e1280d0 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
@@ -41,7 +41,7 @@ private[hive] trait HiveClient {
/**
* Return the associated Hive SessionState of this [[HiveClientImpl]]
- * @return [[Any]] not SessionState to avoid linkage error
+ * @return `Any` not SessionState to avoid linkage error
*/
def getState: Any
@@ -76,7 +76,7 @@ private[hive] trait HiveClient {
/** Return whether a table/view with the specified name exists. */
def tableExists(dbName: String, tableName: String): Boolean
- /** Returns the specified table, or throws [[NoSuchTableException]]. */
+ /** Returns the specified table, or throws `NoSuchTableException`. */
final def getTable(dbName: String, tableName: String): CatalogTable = {
getTableOption(dbName, tableName).getOrElse(throw new
NoSuchTableException(dbName, tableName))
}
@@ -166,7 +166,7 @@ private[hive] trait HiveClient {
table: String,
newParts: Seq[CatalogTablePartition]): Unit
- /** Returns the specified partition, or throws [[NoSuchPartitionException]].
*/
+ /** Returns the specified partition, or throws `NoSuchPartitionException`. */
final def getPartition(
dbName: String,
tableName: String,
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]