Github user pwendell commented on a diff in the pull request:

    https://github.com/apache/spark/pull/772#discussion_r14536931
  
    --- Diff: project/SparkBuild.scala ---
    @@ -434,250 +235,41 @@ object SparkBuild extends Build {
           "-noqualifier", "java.lang"
         )
       )
    +}
     
    -  def replSettings = sharedSettings ++ Seq(
    -    name := "spark-repl",
    -    libraryDependencies <+= scalaVersion(v => "org.scala-lang"  % 
"scala-compiler" % v),
    -    libraryDependencies <+= scalaVersion(v => "org.scala-lang"  % "jline"  
        % v),
    -    libraryDependencies <+= scalaVersion(v => "org.scala-lang"  % 
"scala-reflect"  % v)
    -  )
    -
    -  def examplesSettings = sharedSettings ++ Seq(
    -    name := "spark-examples",
    -    jarName in assembly <<= version map {
    -      v => "spark-examples-" + v + "-hadoop" + hadoopVersion + ".jar" },
    -    libraryDependencies ++= Seq(
    -      "com.twitter"          %% "algebird-core"   % "0.1.11",
    -      "org.apache.hbase" % "hbase" % HBASE_VERSION 
excludeAll(excludeIONetty, excludeJBossNetty, excludeAsm, excludeOldAsm, 
excludeCommonsLogging, excludeJruby),
    -      "org.apache.cassandra" % "cassandra-all" % "1.2.6"
    -        exclude("com.google.guava", "guava")
    -        exclude("com.googlecode.concurrentlinkedhashmap", 
"concurrentlinkedhashmap-lru")
    -        exclude("com.ning","compress-lzf")
    -        exclude("io.netty", "netty")
    -        exclude("jline","jline")
    -        exclude("org.apache.cassandra.deps", "avro")
    -        excludeAll(excludeSLF4J, excludeIONetty),
    -      "com.github.scopt" %% "scopt" % "3.2.0"
    -    )
    -  ) ++ assemblySettings ++ extraAssemblySettings
    -
    -  def toolsSettings = sharedSettings ++ Seq(
    -    name := "spark-tools",
    -    libraryDependencies <+= scalaVersion(v => "org.scala-lang"  % 
"scala-compiler" % v),
    -    libraryDependencies <+= scalaVersion(v => "org.scala-lang"  % 
"scala-reflect"  % v )
    -  ) ++ assemblySettings ++ extraAssemblySettings
    -
    -  def graphxSettings = sharedSettings ++ Seq(
    -    name := "spark-graphx",
    -    previousArtifact := sparkPreviousArtifact("spark-graphx"),
    -    libraryDependencies ++= Seq(
    -      "org.jblas" % "jblas" % jblasVersion
    -    )
    -  )
    -
    -  def bagelSettings = sharedSettings ++ Seq(
    -    name := "spark-bagel",
    -    previousArtifact := sparkPreviousArtifact("spark-bagel")
    -  )
    -
    -  def mllibSettings = sharedSettings ++ Seq(
    -    name := "spark-mllib",
    -    previousArtifact := sparkPreviousArtifact("spark-mllib"),
    -    libraryDependencies ++= Seq(
    -      "org.jblas" % "jblas" % jblasVersion,
    -      "org.scalanlp" %% "breeze" % "0.7" excludeAll(excludeJUnit)
    -    )
    -  )
    -
    -  def catalystSettings = sharedSettings ++ Seq(
    -    name := "catalyst",
    -    // The mechanics of rewriting expression ids to compare trees in some 
test cases makes
    -    // assumptions about the the expression ids being contiguous.  Running 
tests in parallel breaks
    -    // this non-deterministically.  TODO: FIX THIS.
    -    parallelExecution in Test := false,
    -    libraryDependencies ++= Seq(
    -      "com.typesafe" %% "scalalogging-slf4j" % "1.0.1"
    -    )
    -  )
    +object TestSettings {
    +  import BuildCommons._
     
    -  def sqlCoreSettings = sharedSettings ++ Seq(
    -    name := "spark-sql",
    -    libraryDependencies ++= Seq(
    -      "com.twitter"                  % "parquet-column"             % 
parquetVersion,
    -      "com.twitter"                  % "parquet-hadoop"             % 
parquetVersion,
    -      "com.fasterxml.jackson.core"   % "jackson-databind"           % 
"2.3.0" // json4s-jackson 3.2.6 requires jackson-databind 2.3.0.
    -    ),
    -    initialCommands in console :=
    -      """
    -        |import org.apache.spark.sql.catalyst.analysis._
    -        |import org.apache.spark.sql.catalyst.dsl._
    -        |import org.apache.spark.sql.catalyst.errors._
    -        |import org.apache.spark.sql.catalyst.expressions._
    -        |import org.apache.spark.sql.catalyst.plans.logical._
    -        |import org.apache.spark.sql.catalyst.rules._
    -        |import org.apache.spark.sql.catalyst.types._
    -        |import org.apache.spark.sql.catalyst.util._
    -        |import org.apache.spark.sql.execution
    -        |import org.apache.spark.sql.test.TestSQLContext._
    -        |import org.apache.spark.sql.parquet.ParquetTestData""".stripMargin
    -  )
    +  lazy val settings = Seq (
    +    // Fork new JVMs for tests and set Java options for those
    +    fork := true,
    +    javaOptions in Test += "-Dspark.home=" + sparkHome,
    +    javaOptions in Test += "-Dspark.testing=1",
    +    javaOptions in Test += "-Dsun.io.serialization.extendedDebugInfo=true",
    +    javaOptions in Test ++= System.getProperties.filter(_._1 startsWith 
"spark").map { case (k,v) => s"-D$k=$v" }.toSeq,
    +    javaOptions in Test ++= "-Xmx3g -XX:PermSize=128M -XX:MaxNewSize=256m 
-XX:MaxPermSize=1g".split(" ").toSeq,
    --- End diff --
    
    Could we bring this into line with the 100 character limit?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to