Repository: spark
Updated Branches:
  refs/heads/master cb09e93c1 -> 9dd635eb5


SPARK-2480: Resolve sbt warnings "NOTE: SPARK_YARN is deprecated, please use 
-Pyarn flag"

Author: witgo <[email protected]>

Closes #1404 from witgo/run-tests and squashes the following commits:

f703aee [witgo] fix Note: implicit method fromPairDStream is not applicable 
here because it comes after the application point and it lacks an explicit 
result type
2944f51 [witgo] Remove "NOTE: SPARK_YARN is deprecated, please use -Pyarn flag"
ef59c70 [witgo] fix Note: implicit method fromPairDStream is not applicable 
here because it comes after the application point and it lacks an explicit 
result type
6cefee5 [witgo] Remove "NOTE: SPARK_YARN is deprecated, please use -Pyarn flag"


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9dd635eb
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9dd635eb
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9dd635eb

Branch: refs/heads/master
Commit: 9dd635eb5df52835b3b7f4f2b9c789da9e813c71
Parents: cb09e93
Author: witgo <[email protected]>
Authored: Tue Jul 15 10:46:17 2014 -0700
Committer: Patrick Wendell <[email protected]>
Committed: Tue Jul 15 10:46:17 2014 -0700

----------------------------------------------------------------------
 dev/run-tests                            | 7 +++----
 dev/scalastyle                           | 6 +++---
 docs/hadoop-third-party-distributions.md | 4 ++--
 docs/sql-programming-guide.md            | 2 +-
 4 files changed, 9 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/9dd635eb/dev/run-tests
----------------------------------------------------------------------
diff --git a/dev/run-tests b/dev/run-tests
index edd17b5..51e4def 100755
--- a/dev/run-tests
+++ b/dev/run-tests
@@ -21,8 +21,7 @@
 FWDIR="$(cd `dirname $0`/..; pwd)"
 cd $FWDIR
 
-export SPARK_HADOOP_VERSION=2.3.0
-export SPARK_YARN=true
+export SBT_MAVEN_PROFILES="-Pyarn -Phadoop-2.3 -Dhadoop.version=2.3.0"
 
 # Remove work directory
 rm -rf ./work
@@ -66,8 +65,8 @@ echo 
"========================================================================="
 # (either resolution or compilation) prompts the user for input either q, r, 
 # etc to quit or retry. This echo is there to make it not block.
 if [ -n "$_RUN_SQL_TESTS" ]; then
-  echo -e "q\n" | SPARK_HIVE=true sbt/sbt clean package assembly/assembly test 
| \
-    grep -v -e "info.*Resolving" -e "warn.*Merging" -e "info.*Including"
+  echo -e "q\n" | SBT_MAVEN_PROFILES="$SBT_MAVEN_PROFILES -Phive" sbt/sbt 
clean package \
+    assembly/assembly test | grep -v -e "info.*Resolving" -e "warn.*Merging" 
-e "info.*Including"
 else
   echo -e "q\n" | sbt/sbt clean package assembly/assembly test | \
     grep -v -e "info.*Resolving" -e "warn.*Merging" -e "info.*Including"

http://git-wip-us.apache.org/repos/asf/spark/blob/9dd635eb/dev/scalastyle
----------------------------------------------------------------------
diff --git a/dev/scalastyle b/dev/scalastyle
index 0e8fd5c..a02d069 100755
--- a/dev/scalastyle
+++ b/dev/scalastyle
@@ -17,12 +17,12 @@
 # limitations under the License.
 #
 
-echo -e "q\n" | SPARK_HIVE=true sbt/sbt scalastyle > scalastyle.txt
+echo -e "q\n" | sbt/sbt -Phive scalastyle > scalastyle.txt
 # Check style with YARN alpha built too
-echo -e "q\n" | SPARK_HADOOP_VERSION=0.23.9 SPARK_YARN=true sbt/sbt 
yarn-alpha/scalastyle \
+echo -e "q\n" | sbt/sbt -Pyarn -Phadoop-0.23 -Dhadoop.version=0.23.9 
yarn-alpha/scalastyle \
   >> scalastyle.txt
 # Check style with YARN built too
-echo -e "q\n" | SPARK_HADOOP_VERSION=2.2.0 SPARK_YARN=true sbt/sbt 
yarn/scalastyle \
+echo -e "q\n" | sbt/sbt -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 
yarn/scalastyle \
   >> scalastyle.txt
 
 ERRORS=$(cat scalastyle.txt | grep -e "\<error\>")

http://git-wip-us.apache.org/repos/asf/spark/blob/9dd635eb/docs/hadoop-third-party-distributions.md
----------------------------------------------------------------------
diff --git a/docs/hadoop-third-party-distributions.md 
b/docs/hadoop-third-party-distributions.md
index 32403bc..ab1023b 100644
--- a/docs/hadoop-third-party-distributions.md
+++ b/docs/hadoop-third-party-distributions.md
@@ -48,9 +48,9 @@ the _exact_ Hadoop version you are running to avoid any 
compatibility errors.
   </tr>
 </table>
 
-In SBT, the equivalent can be achieved by setting the SPARK_HADOOP_VERSION 
flag:
+In SBT, the equivalent can be achieved by setting the the `hadoop.version` 
property:
 
-    SPARK_HADOOP_VERSION=1.0.4 sbt/sbt assembly
+    sbt/sbt -Dhadoop.version=1.0.4 assembly
 
 # Linking Applications to the Hadoop Version
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9dd635eb/docs/sql-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 522c838..3872853 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -474,7 +474,7 @@ anotherPeople = sqlContext.jsonRDD(anotherPeopleRDD)
 
 Spark SQL also supports reading and writing data stored in [Apache 
Hive](http://hive.apache.org/).
 However, since Hive has a large number of dependencies, it is not included in 
the default Spark assembly.
-In order to use Hive you must first run '`SPARK_HIVE=true sbt/sbt 
assembly/assembly`' (or use `-Phive` for maven).
+In order to use Hive you must first run '`sbt/sbt -Phive assembly/assembly`' 
(or use `-Phive` for maven).
 This command builds a new assembly jar that includes Hive. Note that this Hive 
assembly jar must also be present
 on all of the worker nodes, as they will need access to the Hive serialization 
and deserialization libraries
 (SerDes) in order to acccess data stored in Hive.

Reply via email to