http://git-wip-us.apache.org/repos/asf/spark-website/blob/24d32b75/site/docs/1.6.3/api/java/org/apache/spark/SparkContext.html ---------------------------------------------------------------------- diff --git a/site/docs/1.6.3/api/java/org/apache/spark/SparkContext.html b/site/docs/1.6.3/api/java/org/apache/spark/SparkContext.html new file mode 100644 index 0000000..786b34c --- /dev/null +++ b/site/docs/1.6.3/api/java/org/apache/spark/SparkContext.html @@ -0,0 +1,2961 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> +<!-- NewPage --> +<html lang="en"> +<head> +<!-- Generated by javadoc (version 1.7.0_79) on Wed Nov 02 15:16:17 PDT 2016 --> +<title>SparkContext (Spark 1.6.3 JavaDoc)</title> +<meta name="date" content="2016-11-02"> +<link rel="stylesheet" type="text/css" href="../../../stylesheet.css" title="Style"> +</head> +<body> +<script type="text/javascript"><!-- + if (location.href.indexOf('is-external=true') == -1) { + parent.document.title="SparkContext (Spark 1.6.3 JavaDoc)"; + } +//--> +</script> +<noscript> +<div>JavaScript is disabled on your browser.</div> +</noscript> +<!-- ========= START OF TOP NAVBAR ======= --> +<div class="topNav"><a name="navbar_top"> +<!-- --> +</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> +<!-- --> +</a> +<ul class="navList" title="Navigation"> +<li><a href="../../../overview-summary.html">Overview</a></li> +<li><a href="package-summary.html">Package</a></li> +<li class="navBarCell1Rev">Class</li> +<li><a href="package-tree.html">Tree</a></li> +<li><a href="../../../deprecated-list.html">Deprecated</a></li> +<li><a href="../../../index-all.html">Index</a></li> +<li><a href="../../../help-doc.html">Help</a></li> +</ul> +</div> +<div class="subNav"> +<ul class="navList"> +<li><a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark"><span class="strong">Prev Class</span></a></li> +<li><a href="../../../org/apache/spark/SparkContext.DoubleAccumulatorParam$.html" title="class in org.apache.spark"><span class="strong">Next Class</span></a></li> +</ul> +<ul class="navList"> +<li><a href="../../../index.html?org/apache/spark/SparkContext.html" target="_top">Frames</a></li> +<li><a href="SparkContext.html" target="_top">No Frames</a></li> +</ul> +<ul class="navList" id="allclasses_navbar_top"> +<li><a href="../../../allclasses-noframe.html">All Classes</a></li> +</ul> +<div> +<script type="text/javascript"><!-- + allClassesLink = document.getElementById("allclasses_navbar_top"); + if(window==top) { + allClassesLink.style.display = "block"; + } + else { + allClassesLink.style.display = "none"; + } + //--> +</script> +</div> +<div> +<ul class="subNavList"> +<li>Summary: </li> +<li><a href="#nested_class_summary">Nested</a> | </li> +<li>Field | </li> +<li><a href="#constructor_summary">Constr</a> | </li> +<li><a href="#method_summary">Method</a></li> +</ul> +<ul class="subNavList"> +<li>Detail: </li> +<li>Field | </li> +<li><a href="#constructor_detail">Constr</a> | </li> +<li><a href="#method_detail">Method</a></li> +</ul> +</div> +<a name="skip-navbar_top"> +<!-- --> +</a></div> +<!-- ========= END OF TOP NAVBAR ========= --> +<!-- ======== START OF CLASS DATA ======== --> +<div class="header"> +<div class="subTitle">org.apache.spark</div> +<h2 title="Class SparkContext" class="title">Class SparkContext</h2> +</div> +<div class="contentContainer"> +<ul class="inheritance"> +<li>Object</li> +<li> +<ul class="inheritance"> +<li>org.apache.spark.SparkContext</li> +</ul> +</li> +</ul> +<div class="description"> +<ul class="blockList"> +<li class="blockList"> +<dl> +<dt>All Implemented Interfaces:</dt> +<dd><a href="../../../org/apache/spark/Logging.html" title="interface in org.apache.spark">Logging</a></dd> +</dl> +<hr> +<br> +<pre>public class <span class="strong">SparkContext</span> +extends Object +implements <a href="../../../org/apache/spark/Logging.html" title="interface in org.apache.spark">Logging</a></pre> +<div class="block">Main entry point for Spark functionality. A SparkContext represents the connection to a Spark + cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster. + <p> + Only one SparkContext may be active per JVM. You must <code>stop()</code> the active SparkContext before + creating a new one. This limitation may eventually be removed; see SPARK-2243 for more details. + <p> + param: config a Spark Config object describing the application configuration. Any settings in + this config overrides the default configs as well as system properties.</div> +</li> +</ul> +</div> +<div class="summary"> +<ul class="blockList"> +<li class="blockList"> +<!-- ======== NESTED CLASS SUMMARY ======== --> +<ul class="blockList"> +<li class="blockList"><a name="nested_class_summary"> +<!-- --> +</a> +<h3>Nested Class Summary</h3> +<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Nested Class Summary table, listing nested classes, and an explanation"> +<caption><span>Nested Classes</span><span class="tabEnd"> </span></caption> +<tr> +<th class="colFirst" scope="col">Modifier and Type</th> +<th class="colLast" scope="col">Class and Description</th> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static class </code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.DoubleAccumulatorParam$.html" title="class in org.apache.spark">SparkContext.DoubleAccumulatorParam$</a></strong></code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static class </code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.FloatAccumulatorParam$.html" title="class in org.apache.spark">SparkContext.FloatAccumulatorParam$</a></strong></code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static class </code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.IntAccumulatorParam$.html" title="class in org.apache.spark">SparkContext.IntAccumulatorParam$</a></strong></code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static class </code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.LongAccumulatorParam$.html" title="class in org.apache.spark">SparkContext.LongAccumulatorParam$</a></strong></code> </td> +</tr> +</table> +</li> +</ul> +<!-- ======== CONSTRUCTOR SUMMARY ======== --> +<ul class="blockList"> +<li class="blockList"><a name="constructor_summary"> +<!-- --> +</a> +<h3>Constructor Summary</h3> +<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Constructor Summary table, listing constructors, and an explanation"> +<caption><span>Constructors</span><span class="tabEnd"> </span></caption> +<tr> +<th class="colOne" scope="col">Constructor and Description</th> +</tr> +<tr class="altColor"> +<td class="colOne"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SparkContext()">SparkContext</a></strong>()</code> +<div class="block">Create a SparkContext that loads settings from system properties (for instance, when + launching with ./bin/spark-submit).</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colOne"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SparkContext(org.apache.spark.SparkConf)">SparkContext</a></strong>(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config)</code> </td> +</tr> +<tr class="altColor"> +<td class="colOne"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SparkContext(org.apache.spark.SparkConf,%20scala.collection.Map)">SparkContext</a></strong>(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config, + scala.collection.Map<String,scala.collection.Set<<a href="../../../org/apache/spark/scheduler/SplitInfo.html" title="class in org.apache.spark.scheduler">SplitInfo</a>>> preferredNodeLocationData)</code> +<div class="block">:: DeveloperApi :: + Alternative constructor for setting preferred locations where Spark will create executors.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colOne"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SparkContext(java.lang.String,%20java.lang.String,%20org.apache.spark.SparkConf)">SparkContext</a></strong>(String master, + String appName, + <a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> conf)</code> +<div class="block">Alternative constructor that allows setting common Spark properties directly</div> +</td> +</tr> +<tr class="altColor"> +<td class="colOne"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SparkContext(java.lang.String,%20java.lang.String,%20java.lang.String,%20scala.collection.Seq,%20scala.collection.Map,%20scala.collection.Map)">SparkContext</a></strong>(String master, + String appName, + String sparkHome, + scala.collection.Seq<String> jars, + scala.collection.Map<String,String> environment, + scala.collection.Map<String,scala.collection.Set<<a href="../../../org/apache/spark/scheduler/SplitInfo.html" title="class in org.apache.spark.scheduler">SplitInfo</a>>> preferredNodeLocationData)</code> +<div class="block">Alternative constructor that allows setting common Spark properties directly</div> +</td> +</tr> +</table> +</li> +</ul> +<!-- ========== METHOD SUMMARY =========== --> +<ul class="blockList"> +<li class="blockList"><a name="method_summary"> +<!-- --> +</a> +<h3>Method Summary</h3> +<table class="overviewSummary" border="0" cellpadding="3" cellspacing="0" summary="Method Summary table, listing methods, and an explanation"> +<caption><span>Methods</span><span class="tabEnd"> </span></caption> +<tr> +<th class="colFirst" scope="col">Modifier and Type</th> +<th class="colLast" scope="col">Method and Description</th> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><R,T> <a href="../../../org/apache/spark/Accumulable.html" title="class in org.apache.spark">Accumulable</a><R,T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#accumulable(R,%20org.apache.spark.AccumulableParam)">accumulable</a></strong>(R initialValue, + <a href="../../../org/apache/spark/AccumulableParam.html" title="interface in org.apache.spark">AccumulableParam</a><R,T> param)</code> +<div class="block">Create an <a href="../../../org/apache/spark/Accumulable.html" title="class in org.apache.spark"><code>Accumulable</code></a> shared variable, to which tasks can add values + with <code>+=</code>.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><R,T> <a href="../../../org/apache/spark/Accumulable.html" title="class in org.apache.spark">Accumulable</a><R,T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#accumulable(R,%20java.lang.String,%20org.apache.spark.AccumulableParam)">accumulable</a></strong>(R initialValue, + String name, + <a href="../../../org/apache/spark/AccumulableParam.html" title="interface in org.apache.spark">AccumulableParam</a><R,T> param)</code> +<div class="block">Create an <a href="../../../org/apache/spark/Accumulable.html" title="class in org.apache.spark"><code>Accumulable</code></a> shared variable, with a name for display in the + Spark UI.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><R,T> <a href="../../../org/apache/spark/Accumulable.html" title="class in org.apache.spark">Accumulable</a><R,T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#accumulableCollection(R,%20scala.Function1,%20scala.reflect.ClassTag)">accumulableCollection</a></strong>(R initialValue, + scala.Function1<R,scala.collection.generic.Growable<T>> evidence$9, + scala.reflect.ClassTag<R> evidence$10)</code> +<div class="block">Create an accumulator from a "mutable collection" type.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/Accumulator.html" title="class in org.apache.spark">Accumulator</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#accumulator(T,%20org.apache.spark.AccumulatorParam)">accumulator</a></strong>(T initialValue, + <a href="../../../org/apache/spark/AccumulatorParam.html" title="interface in org.apache.spark">AccumulatorParam</a><T> param)</code> +<div class="block">Create an <a href="../../../org/apache/spark/Accumulator.html" title="class in org.apache.spark"><code>Accumulator</code></a> variable of a given type, which tasks can "add" + values to using the <code>+=</code> method.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/Accumulator.html" title="class in org.apache.spark">Accumulator</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#accumulator(T,%20java.lang.String,%20org.apache.spark.AccumulatorParam)">accumulator</a></strong>(T initialValue, + String name, + <a href="../../../org/apache/spark/AccumulatorParam.html" title="interface in org.apache.spark">AccumulatorParam</a><T> param)</code> +<div class="block">Create an <a href="../../../org/apache/spark/Accumulator.html" title="class in org.apache.spark"><code>Accumulator</code></a> variable of a given type, with a name for display + in the Spark UI.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.collection.mutable.HashMap<String,Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addedFiles()">addedFiles</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.collection.mutable.HashMap<String,Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addedJars()">addedJars</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addFile(java.lang.String)">addFile</a></strong>(String path)</code> +<div class="block">Add a file to be downloaded with this Spark job on every node.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addFile(java.lang.String,%20boolean)">addFile</a></strong>(String path, + boolean recursive)</code> +<div class="block">Add a file to be downloaded with this Spark job on every node.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addJar(java.lang.String)">addJar</a></strong>(String path)</code> +<div class="block">Adds a JAR dependency for all tasks to be executed on this SparkContext in the future.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#addSparkListener(org.apache.spark.scheduler.SparkListener)">addSparkListener</a></strong>(<a href="../../../org/apache/spark/scheduler/SparkListener.html" title="interface in org.apache.spark.scheduler">SparkListener</a> listener)</code> +<div class="block">:: DeveloperApi :: + Register a listener to receive up-calls from events that happen during execution.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.Option<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#applicationAttemptId()">applicationAttemptId</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#applicationId()">applicationId</a></strong>()</code> +<div class="block">A unique identifier for the Spark application.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#appName()">appName</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<String,<a href="../../../org/apache/spark/input/PortableDataStream.html" title="class in org.apache.spark.input">PortableDataStream</a>>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#binaryFiles(java.lang.String,%20int)">binaryFiles</a></strong>(String path, + int minPartitions)</code> +<div class="block">Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file + (useful for binary data)</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><byte[]></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#binaryRecords(java.lang.String,%20int,%20org.apache.hadoop.conf.Configuration)">binaryRecords</a></strong>(String path, + int recordLength, + org.apache.hadoop.conf.Configuration conf)</code> +<div class="block">Load data from a flat binary file, assuming the length of each record is constant.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#booleanWritableConverter()">booleanWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.BooleanWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#boolToBoolWritable(boolean)">boolToBoolWritable</a></strong>(boolean b)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/broadcast/Broadcast.html" title="class in org.apache.spark.broadcast">Broadcast</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#broadcast(T,%20scala.reflect.ClassTag)">broadcast</a></strong>(T value, + scala.reflect.ClassTag<T> evidence$11)</code> +<div class="block">Broadcast a read-only variable to the cluster, returning a + <a href="../../../org/apache/spark/broadcast/Broadcast.html" title="class in org.apache.spark.broadcast"><code>Broadcast</code></a> object for reading it in distributed functions.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.BytesWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#bytesToBytesWritable(byte[])">bytesToBytesWritable</a></strong>(byte[] aob)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<byte[]></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#bytesWritableConverter()">bytesWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#cancelAllJobs()">cancelAllJobs</a></strong>()</code> +<div class="block">Cancel all jobs that have been scheduled or are running.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#cancelJobGroup(java.lang.String)">cancelJobGroup</a></strong>(String groupId)</code> +<div class="block">Cancel active jobs for the specified group.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.Option<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#checkpointDir()">checkpointDir</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#clearCallSite()">clearCallSite</a></strong>()</code> +<div class="block">Clear the thread-local property for overriding the call sites + of actions and RDDs.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#clearFiles()">clearFiles</a></strong>()</code> +<div class="block">Clear the job's list of files added by <code>addFile</code> so that they do not get downloaded to + any new nodes.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#clearJars()">clearJars</a></strong>()</code> +<div class="block">Clear the job's list of JARs added by <code>addJar</code> so that they do not get downloaded to + any new nodes.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#clearJobGroup()">clearJobGroup</a></strong>()</code> +<div class="block">Clear the current thread's job group ID and its description.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>int</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#defaultMinPartitions()">defaultMinPartitions</a></strong>()</code> +<div class="block">Default min number of partitions for Hadoop RDDs when not given by user + Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>int</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#defaultMinSplits()">defaultMinSplits</a></strong>()</code> +<div class="block">Default min number of partitions for Hadoop RDDs when not given by user</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>int</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#defaultParallelism()">defaultParallelism</a></strong>()</code> +<div class="block">Default level of parallelism to use when not given by user (e.g.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static <a href="../../../org/apache/spark/rdd/DoubleRDDFunctions.html" title="class in org.apache.spark.rdd">DoubleRDDFunctions</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#doubleRDDToDoubleRDDFunctions(org.apache.spark.rdd.RDD)">doubleRDDToDoubleRDDFunctions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><Object> rdd)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.DoubleWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#doubleToDoubleWritable(double)">doubleToDoubleWritable</a></strong>(double d)</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#doubleWritableConverter()">doubleWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#DRIVER_IDENTIFIER()">DRIVER_IDENTIFIER</a></strong>()</code> +<div class="block">Executor id for the driver.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T> <any></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#emptyRDD(scala.reflect.ClassTag)">emptyRDD</a></strong>(scala.reflect.ClassTag<T> evidence$8)</code> +<div class="block">Get an RDD that has no partitions or elements.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.collection.mutable.HashMap<String,String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#executorEnvs()">executorEnvs</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#externalBlockStoreFolderName()">externalBlockStoreFolderName</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.collection.Seq<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#files()">files</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.FloatWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#floatToFloatWritable(float)">floatToFloatWritable</a></strong>(float f)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#floatWritableConverter()">floatWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.collection.Seq<org.apache.spark.scheduler.Schedulable></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getAllPools()">getAllPools</a></strong>()</code> +<div class="block">:: DeveloperApi :: + Return pools for fair scheduler</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.Option<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getCheckpointDir()">getCheckpointDir</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getConf()">getConf</a></strong>()</code> +<div class="block">Return a copy of this SparkContext's configuration.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.collection.Map<String,scala.Tuple2<Object,Object>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getExecutorMemoryStatus()">getExecutorMemoryStatus</a></strong>()</code> +<div class="block">Return a map from the slave to the max memory available for caching and the remaining + memory available for caching.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/storage/StorageStatus.html" title="class in org.apache.spark.storage">StorageStatus</a>[]</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getExecutorStorageStatus()">getExecutorStorageStatus</a></strong>()</code> +<div class="block">:: DeveloperApi :: + Return information about blocks stored in all of the slaves</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getLocalProperty(java.lang.String)">getLocalProperty</a></strong>(String key)</code> +<div class="block">Get a local property set in this thread, or null if it is missing.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static <a href="../../../org/apache/spark/SparkContext.html" title="class in org.apache.spark">SparkContext</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getOrCreate()">getOrCreate</a></strong>()</code> +<div class="block">This function may be used to get or instantiate a SparkContext and register it as a + singleton object.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static <a href="../../../org/apache/spark/SparkContext.html" title="class in org.apache.spark">SparkContext</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getOrCreate(org.apache.spark.SparkConf)">getOrCreate</a></strong>(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config)</code> +<div class="block">This function may be used to get or instantiate a SparkContext and register it as a + singleton object.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.collection.Map<Object,<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><?>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getPersistentRDDs()">getPersistentRDDs</a></strong>()</code> +<div class="block">Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.Option<org.apache.spark.scheduler.Schedulable></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getPoolForName(java.lang.String)">getPoolForName</a></strong>(String pool)</code> +<div class="block">:: DeveloperApi :: + Return the pool associated with the given name, if one exists</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/storage/RDDInfo.html" title="class in org.apache.spark.storage">RDDInfo</a>[]</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getRDDStorageInfo()">getRDDStorageInfo</a></strong>()</code> +<div class="block">:: DeveloperApi :: + Return information about what RDDs are cached, if they are in mem or on disk, how much space + they take, etc.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>scala.Enumeration.Value</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#getSchedulingMode()">getSchedulingMode</a></strong>()</code> +<div class="block">Return current scheduling mode</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>org.apache.hadoop.conf.Configuration</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#hadoopConfiguration()">hadoopConfiguration</a></strong>()</code> +<div class="block">A default Hadoop Configuration for the Hadoop code (e.g.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#hadoopFile(java.lang.String,%20java.lang.Class,%20java.lang.Class,%20java.lang.Class,%20int)">hadoopFile</a></strong>(String path, + Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass, + Class<K> keyClass, + Class<V> valueClass, + int minPartitions)</code> +<div class="block">Get an RDD for a Hadoop file with an arbitrary InputFormat</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><K,V,F extends org.apache.hadoop.mapred.InputFormat<K,V>> <br><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#hadoopFile(java.lang.String,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag)">hadoopFile</a></strong>(String path, + scala.reflect.ClassTag<K> km, + scala.reflect.ClassTag<V> vm, + scala.reflect.ClassTag<F> fm)</code> +<div class="block">Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, + values and the InputFormat so that users don't need to pass them directly.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V,F extends org.apache.hadoop.mapred.InputFormat<K,V>> <br><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#hadoopFile(java.lang.String,%20int,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag)">hadoopFile</a></strong>(String path, + int minPartitions, + scala.reflect.ClassTag<K> km, + scala.reflect.ClassTag<V> vm, + scala.reflect.ClassTag<F> fm)</code> +<div class="block">Smarter version of hadoopFile() that uses class tags to figure out the classes of keys, + values and the InputFormat so that users don't need to pass them directly.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><K,V> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#hadoopRDD(org.apache.hadoop.mapred.JobConf,%20java.lang.Class,%20java.lang.Class,%20java.lang.Class,%20int)">hadoopRDD</a></strong>(org.apache.hadoop.mapred.JobConf conf, + Class<? extends org.apache.hadoop.mapred.InputFormat<K,V>> inputFormatClass, + Class<K> keyClass, + Class<V> valueClass, + int minPartitions)</code> +<div class="block">Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other + necessary info (e.g.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#initLocalProperties()">initLocalProperties</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.IntWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#intToIntWritable(int)">intToIntWritable</a></strong>(int i)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#intWritableConverter()">intWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>boolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#isLocal()">isLocal</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>boolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#isStopped()">isStopped</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static scala.Option<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#jarOfClass(java.lang.Class)">jarOfClass</a></strong>(Class<?> cls)</code> +<div class="block">Find the JAR from which a given class was loaded, to make it easy for users to pass + their JARs to SparkContext.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static scala.Option<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#jarOfObject(java.lang.Object)">jarOfObject</a></strong>(Object obj)</code> +<div class="block">Find the JAR that contains the class of a particular object, to make it easy for users + to pass their JARs to SparkContext.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>scala.collection.Seq<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#jars()">jars</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>boolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#killExecutor(java.lang.String)">killExecutor</a></strong>(String executorId)</code> +<div class="block">:: DeveloperApi :: + Request that the cluster manager kill the specified executor.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>boolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#killExecutors(scala.collection.Seq)">killExecutors</a></strong>(scala.collection.Seq<String> executorIds)</code> +<div class="block">:: DeveloperApi :: + Request that the cluster manager kill the specified executors.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#LEGACY_DRIVER_IDENTIFIER()">LEGACY_DRIVER_IDENTIFIER</a></strong>()</code> +<div class="block">Legacy version of DRIVER_IDENTIFIER, retained for backwards-compatibility.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>org.apache.spark.scheduler.LiveListenerBus</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#listenerBus()">listenerBus</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.LongWritable</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#longToLongWritable(long)">longToLongWritable</a></strong>(long l)</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#longWritableConverter()">longWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#makeRDD(scala.collection.Seq,%20int,%20scala.reflect.ClassTag)">makeRDD</a></strong>(scala.collection.Seq<T> seq, + int numSlices, + scala.reflect.ClassTag<T> evidence$2)</code> +<div class="block">Distribute a local Scala collection to form an RDD.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#makeRDD(scala.collection.Seq,%20scala.reflect.ClassTag)">makeRDD</a></strong>(scala.collection.Seq<scala.Tuple2<T,scala.collection.Seq<String>>> seq, + scala.reflect.ClassTag<T> evidence$3)</code> +<div class="block">Distribute a local Scala collection to form an RDD, with one or more + location preferences (hostnames of Spark nodes) for each object.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#master()">master</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>org.apache.spark.metrics.MetricsSystem</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#metricsSystem()">metricsSystem</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V,F extends org.apache.hadoop.mapreduce.InputFormat<K,V>> <br><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#newAPIHadoopFile(java.lang.String,%20java.lang.Class,%20java.lang.Class,%20java.lang.Class,%20org.apache.hadoop.conf.Configuration)">newAPIHadoopFile</a></strong>(String path, + Class<F> fClass, + Class<K> kClass, + Class<V> vClass, + org.apache.hadoop.conf.Configuration conf)</code> +<div class="block">Get an RDD for a given Hadoop file with an arbitrary new API InputFormat + and extra configuration options to pass to the input format.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><K,V,F extends org.apache.hadoop.mapreduce.InputFormat<K,V>> <br><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#newAPIHadoopFile(java.lang.String,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag)">newAPIHadoopFile</a></strong>(String path, + scala.reflect.ClassTag<K> km, + scala.reflect.ClassTag<V> vm, + scala.reflect.ClassTag<F> fm)</code> +<div class="block">Get an RDD for a Hadoop file with an arbitrary new API InputFormat.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V,F extends org.apache.hadoop.mapreduce.InputFormat<K,V>> <br><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#newAPIHadoopRDD(org.apache.hadoop.conf.Configuration,%20java.lang.Class,%20java.lang.Class,%20java.lang.Class)">newAPIHadoopRDD</a></strong>(org.apache.hadoop.conf.Configuration conf, + Class<F> fClass, + Class<K> kClass, + Class<V> vClass)</code> +<div class="block">Get an RDD for a given Hadoop file with an arbitrary new API InputFormat + and extra configuration options to pass to the input format.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static <T> <a href="../../../org/apache/spark/rdd/DoubleRDDFunctions.html" title="class in org.apache.spark.rdd">DoubleRDDFunctions</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#numericRDDToDoubleRDDFunctions(org.apache.spark.rdd.RDD,%20scala.math.Numeric)">numericRDDToDoubleRDDFunctions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.math.Numeric<T> num)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#objectFile(java.lang.String,%20int,%20scala.reflect.ClassTag)">objectFile</a></strong>(String path, + int minPartitions, + scala.reflect.ClassTag<T> evidence$4)</code> +<div class="block">Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and + BytesWritable values that contain a serialized partition.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#parallelize(scala.collection.Seq,%20int,%20scala.reflect.ClassTag)">parallelize</a></strong>(scala.collection.Seq<T> seq, + int numSlices, + scala.reflect.ClassTag<T> evidence$1)</code> +<div class="block">Distribute a local Scala collection to form an RDD.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><any></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#persistentRdds()">persistentRdds</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><Object></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#range(long,%20long,%20long,%20int)">range</a></strong>(long start, + long end, + long step, + int numSlices)</code> +<div class="block">Creates a new RDD[Long] containing elements from <code>start</code> to <code>end</code>(exclusive), increased by + <code>step</code> every element.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#RDD_SCOPE_KEY()">RDD_SCOPE_KEY</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#RDD_SCOPE_NO_OVERRIDE_KEY()">RDD_SCOPE_NO_OVERRIDE_KEY</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static <T> <a href="../../../org/apache/spark/rdd/AsyncRDDActions.html" title="class in org.apache.spark.rdd">AsyncRDDActions</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#rddToAsyncRDDActions(org.apache.spark.rdd.RDD,%20scala.reflect.ClassTag)">rddToAsyncRDDActions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.reflect.ClassTag<T> evidence$22)</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static <K,V> <a href="../../../org/apache/spark/rdd/OrderedRDDFunctions.html" title="class in org.apache.spark.rdd">OrderedRDDFunctions</a><K,V,scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#rddToOrderedRDDFunctions(org.apache.spark.rdd.RDD,%20scala.math.Ordering,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag)">rddToOrderedRDDFunctions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.math.Ordering<K> evidence$27, + scala.reflect.ClassTag<K> evidence$28, + scala.reflect.ClassTag<V> evidence$29)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static <K,V> <a href="../../../org/apache/spark/rdd/PairRDDFunctions.html" title="class in org.apache.spark.rdd">PairRDDFunctions</a><K,V></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#rddToPairRDDFunctions(org.apache.spark.rdd.RDD,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag,%20scala.math.Ordering)">rddToPairRDDFunctions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.reflect.ClassTag<K> kt, + scala.reflect.ClassTag<V> vt, + scala.math.Ordering<K> ord)</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static <K,V> <a href="../../../org/apache/spark/rdd/SequenceFileRDDFunctions.html" title="class in org.apache.spark.rdd">SequenceFileRDDFunctions</a><K,V></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#rddToSequenceFileRDDFunctions(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.reflect.ClassTag,%20scala.Function1,%20scala.reflect.ClassTag)">rddToSequenceFileRDDFunctions</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.Function1<K,org.apache.hadoop.io.Writable> evidence$23, + scala.reflect.ClassTag<K> evidence$24, + scala.Function1<V,org.apache.hadoop.io.Writable> evidence$25, + scala.reflect.ClassTag<V> evidence$26)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>boolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#requestExecutors(int)">requestExecutors</a></strong>(int numAdditionalExecutors)</code> +<div class="block">:: DeveloperApi :: + Request an additional number of executors from the cluster manager.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U,R> <a href="../../../org/apache/spark/partial/PartialResult.html" title="class in org.apache.spark.partial">PartialResult</a><R></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runApproximateJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20,%20long)">runApproximateJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + <any> evaluator, + long timeout)</code> +<div class="block">:: DeveloperApi :: + Run a job that can return approximate results.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function1<scala.collection.Iterator<T>,U> func, + scala.reflect.ClassTag<U> evidence$19)</code> +<div class="block">Run a job on all partitions in an RDD and return the results in an array.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U> void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.Function2,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function1<scala.collection.Iterator<T>,U> processPartition, + scala.Function2<Object,U,scala.runtime.BoxedUnit> resultHandler, + scala.reflect.ClassTag<U> evidence$21)</code> +<div class="block">Run a job on all partitions in an RDD and pass the results to a handler function.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.collection.Seq,%20boolean,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function1<scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + boolean allowLocal, + scala.reflect.ClassTag<U> evidence$17)</code> +<div class="block">Run a job on a given set of partitions of an RDD, but take a function of type + <code>Iterator[T] => U</code> instead of <code>(TaskContext, Iterator[T]) => U</code>.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.collection.Seq,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function1<scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + scala.reflect.ClassTag<U> evidence$14)</code> +<div class="block">Run a job on a given set of partitions of an RDD, but take a function of type + <code>Iterator[T] => U</code> instead of <code>(TaskContext, Iterator[T]) => U</code>.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + scala.reflect.ClassTag<U> evidence$18)</code> +<div class="block">Run a job on all partitions in an RDD and return the results in an array.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U> void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.Function2,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> processPartition, + scala.Function2<Object,U,scala.runtime.BoxedUnit> resultHandler, + scala.reflect.ClassTag<U> evidence$20)</code> +<div class="block">Run a job on all partitions in an RDD and pass the results to a handler function.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.collection.Seq,%20boolean,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + boolean allowLocal, + scala.reflect.ClassTag<U> evidence$16)</code> +<div class="block">Run a function on a given set of partitions in an RDD and return the results as an array.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U> void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.collection.Seq,%20boolean,%20scala.Function2,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + boolean allowLocal, + scala.Function2<Object,U,scala.runtime.BoxedUnit> resultHandler, + scala.reflect.ClassTag<U> evidence$15)</code> +<div class="block">Run a function on a given set of partitions in an RDD and pass the results to the given + handler function.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T,U> Object</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.collection.Seq,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + scala.reflect.ClassTag<U> evidence$13)</code> +<div class="block">Run a function on a given set of partitions in an RDD and return the results as an array.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U> void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#runJob(org.apache.spark.rdd.RDD,%20scala.Function2,%20scala.collection.Seq,%20scala.Function2,%20scala.reflect.ClassTag)">runJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function2<<a href="../../../org/apache/spark/TaskContext.html" title="class in org.apache.spark">TaskContext</a>,scala.collection.Iterator<T>,U> func, + scala.collection.Seq<Object> partitions, + scala.Function2<Object,U,scala.runtime.BoxedUnit> resultHandler, + scala.reflect.ClassTag<U> evidence$12)</code> +<div class="block">Run a function on a given set of partitions in an RDD and pass the results to the given + handler function.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#sequenceFile(java.lang.String,%20java.lang.Class,%20java.lang.Class)">sequenceFile</a></strong>(String path, + Class<K> keyClass, + Class<V> valueClass)</code> +<div class="block">Get an RDD for a Hadoop SequenceFile with given key and value types.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><K,V> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#sequenceFile(java.lang.String,%20java.lang.Class,%20java.lang.Class,%20int)">sequenceFile</a></strong>(String path, + Class<K> keyClass, + Class<V> valueClass, + int minPartitions)</code> +<div class="block">Get an RDD for a Hadoop SequenceFile with given key and value types.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><K,V> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#sequenceFile(java.lang.String,%20int,%20scala.reflect.ClassTag,%20scala.reflect.ClassTag,%20scala.Function0,%20scala.Function0)">sequenceFile</a></strong>(String path, + int minPartitions, + scala.reflect.ClassTag<K> km, + scala.reflect.ClassTag<V> vm, + scala.Function0<org.apache.spark.WritableConverter<K>> kcf, + scala.Function0<org.apache.spark.WritableConverter<V>> vcf)</code> +<div class="block">Version of sequenceFile() for types implicitly convertible to Writables through a + WritableConverter.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setCallSite(java.lang.String)">setCallSite</a></strong>(String shortCallSite)</code> +<div class="block">Set the thread-local property for overriding the call sites + of actions and RDDs.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setCheckpointDir(java.lang.String)">setCheckpointDir</a></strong>(String directory)</code> +<div class="block">Set the directory under which RDDs are going to be checkpointed.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setJobDescription(java.lang.String)">setJobDescription</a></strong>(String value)</code> +<div class="block">Set a human readable description of the current job.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setJobGroup(java.lang.String,%20java.lang.String,%20boolean)">setJobGroup</a></strong>(String groupId, + String description, + boolean interruptOnCancel)</code> +<div class="block">Assigns a group ID to all the jobs started by this thread until the group ID is set to a + different value or cleared.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setLocalProperty(java.lang.String,%20java.lang.String)">setLocalProperty</a></strong>(String key, + String value)</code> +<div class="block">Set a local property that affects jobs submitted from this thread, such as the + Spark fair scheduler pool.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#setLogLevel(java.lang.String)">setLogLevel</a></strong>(String logLevel)</code> +<div class="block">Control our logLevel.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SPARK_JOB_DESCRIPTION()">SPARK_JOB_DESCRIPTION</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SPARK_JOB_GROUP_ID()">SPARK_JOB_GROUP_ID</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#SPARK_JOB_INTERRUPT_ON_CANCEL()">SPARK_JOB_INTERRUPT_ON_CANCEL</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#sparkUser()">sparkUser</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>long</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#startTime()">startTime</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/SparkStatusTracker.html" title="class in org.apache.spark">SparkStatusTracker</a></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#statusTracker()">statusTracker</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>void</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#stop()">stop</a></strong>()</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>java.util.concurrent.atomic.AtomicBoolean</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#stopped()">stopped</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code>static org.apache.hadoop.io.Text</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#stringToText(java.lang.String)">stringToText</a></strong>(String s)</code> </td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static org.apache.spark.WritableConverter<String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#stringWritableConverter()">stringWritableConverter</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T,U,R> <a href="../../../org/apache/spark/SimpleFutureAction.html" title="class in org.apache.spark">SimpleFutureAction</a><R></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#submitJob(org.apache.spark.rdd.RDD,%20scala.Function1,%20scala.collection.Seq,%20scala.Function2,%20scala.Function0)">submitJob</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.Function1<scala.collection.Iterator<T>,U> processPartition, + scala.collection.Seq<Object> partitions, + scala.Function2<Object,U,scala.runtime.BoxedUnit> resultHandler, + scala.Function0<R> resultFunc)</code> +<div class="block">Submit a job for execution and return a FutureJob holding the result.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#tachyonFolderName()">tachyonFolderName</a></strong>()</code> </td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><String></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#textFile(java.lang.String,%20int)">textFile</a></strong>(String path, + int minPartitions)</code> +<div class="block">Read a text file from HDFS, a local file system (available on all nodes), or any + Hadoop-supported file system URI, and return it as an RDD of Strings.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#union(org.apache.spark.rdd.RDD,%20scala.collection.Seq,%20scala.reflect.ClassTag)">union</a></strong>(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> first, + scala.collection.Seq<<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T>> rest, + scala.reflect.ClassTag<T> evidence$7)</code> +<div class="block">Build the union of a list of RDDs passed as variable-length arguments.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><T> <a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#union(scala.collection.Seq,%20scala.reflect.ClassTag)">union</a></strong>(scala.collection.Seq<<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T>> rdds, + scala.reflect.ClassTag<T> evidence$6)</code> +<div class="block">Build the union of a list of RDDs.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>String</code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#version()">version</a></strong>()</code> +<div class="block">The version of Spark on which this application is running.</div> +</td> +</tr> +<tr class="rowColor"> +<td class="colFirst"><code><a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<String,String>></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#wholeTextFiles(java.lang.String,%20int)">wholeTextFiles</a></strong>(String path, + int minPartitions)</code> +<div class="block">Read a directory of text files from HDFS, a local file system (available on all nodes), or any + Hadoop-supported file system URI.</div> +</td> +</tr> +<tr class="altColor"> +<td class="colFirst"><code>static <T extends org.apache.hadoop.io.Writable> <br>org.apache.spark.WritableConverter<T></code></td> +<td class="colLast"><code><strong><a href="../../../org/apache/spark/SparkContext.html#writableWritableConverter()">writableWritableConverter</a></strong>()</code> </td> +</tr> +</table> +<ul class="blockList"> +<li class="blockList"><a name="methods_inherited_from_class_Object"> +<!-- --> +</a> +<h3>Methods inherited from class Object</h3> +<code>equals, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait</code></li> +</ul> +<ul class="blockList"> +<li class="blockList"><a name="methods_inherited_from_class_org.apache.spark.Logging"> +<!-- --> +</a> +<h3>Methods inherited from interface org.apache.spark.<a href="../../../org/apache/spark/Logging.html" title="interface in org.apache.spark">Logging</a></h3> +<code><a href="../../../org/apache/spark/Logging.html#initializeIfNecessary()">initializeIfNecessary</a>, <a href="../../../org/apache/spark/Logging.html#initializeLogging()">initializeLogging</a>, <a href="../../../org/apache/spark/Logging.html#isTraceEnabled()">isTraceEnabled</a>, <a href="../../../org/apache/spark/Logging.html#log_()">log_</a>, <a href="../../../org/apache/spark/Logging.html#log()">log</a>, <a href="../../../org/apache/spark/Logging.html#logDebug(scala.Function0)">logDebug</a>, <a href="../../../org/apache/spark/Logging.html#logDebug(scala.Function0,%20java.lang.Throwable)">logDebug</a>, <a href="../../../org/apache/spark/Logging.html#logError(scala.Function0)">logError</a>, <a href="../../../org/apache/spark/Logging.html#logError(scala.Function0,%20java.lang.Throwable)">logError</a>, <a href="../../../org/apache/spark/Logging.html#logInfo(scala.Function0)">logInfo</a>, <a href="../../../org/apache/spark/Logging.html#logInfo(scala.Function0,%20java.lang.Throwable )">logInfo</a>, <a href="../../../org/apache/spark/Logging.html#logName()">logName</a>, <a href="../../../org/apache/spark/Logging.html#logTrace(scala.Function0)">logTrace</a>, <a href="../../../org/apache/spark/Logging.html#logTrace(scala.Function0,%20java.lang.Throwable)">logTrace</a>, <a href="../../../org/apache/spark/Logging.html#logWarning(scala.Function0)">logWarning</a>, <a href="../../../org/apache/spark/Logging.html#logWarning(scala.Function0,%20java.lang.Throwable)">logWarning</a></code></li> +</ul> +</li> +</ul> +</li> +</ul> +</div> +<div class="details"> +<ul class="blockList"> +<li class="blockList"> +<!-- ========= CONSTRUCTOR DETAIL ======== --> +<ul class="blockList"> +<li class="blockList"><a name="constructor_detail"> +<!-- --> +</a> +<h3>Constructor Detail</h3> +<a name="SparkContext(org.apache.spark.SparkConf)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SparkContext</h4> +<pre>public SparkContext(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config)</pre> +</li> +</ul> +<a name="SparkContext()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SparkContext</h4> +<pre>public SparkContext()</pre> +<div class="block">Create a SparkContext that loads settings from system properties (for instance, when + launching with ./bin/spark-submit).</div> +</li> +</ul> +<a name="SparkContext(org.apache.spark.SparkConf, scala.collection.Map)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SparkContext</h4> +<pre>public SparkContext(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config, + scala.collection.Map<String,scala.collection.Set<<a href="../../../org/apache/spark/scheduler/SplitInfo.html" title="class in org.apache.spark.scheduler">SplitInfo</a>>> preferredNodeLocationData)</pre> +<div class="block">:: DeveloperApi :: + Alternative constructor for setting preferred locations where Spark will create executors. + <p></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>config</code> - a <a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark"><code>SparkConf</code></a> object specifying other Spark parameters</dd><dd><code>preferredNodeLocationData</code> - not used. Left for backward compatibility.</dd></dl> +</li> +</ul> +<a name="SparkContext(java.lang.String, java.lang.String, org.apache.spark.SparkConf)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SparkContext</h4> +<pre>public SparkContext(String master, + String appName, + <a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> conf)</pre> +<div class="block">Alternative constructor that allows setting common Spark properties directly + <p></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>master</code> - Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).</dd><dd><code>appName</code> - A name for your application, to display on the cluster web UI</dd><dd><code>conf</code> - a <a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark"><code>SparkConf</code></a> object specifying other Spark parameters</dd></dl> +</li> +</ul> +<a name="SparkContext(java.lang.String, java.lang.String, java.lang.String, scala.collection.Seq, scala.collection.Map, scala.collection.Map)"> +<!-- --> +</a> +<ul class="blockListLast"> +<li class="blockList"> +<h4>SparkContext</h4> +<pre>public SparkContext(String master, + String appName, + String sparkHome, + scala.collection.Seq<String> jars, + scala.collection.Map<String,String> environment, + scala.collection.Map<String,scala.collection.Set<<a href="../../../org/apache/spark/scheduler/SplitInfo.html" title="class in org.apache.spark.scheduler">SplitInfo</a>>> preferredNodeLocationData)</pre> +<div class="block">Alternative constructor that allows setting common Spark properties directly + <p></div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>master</code> - Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).</dd><dd><code>appName</code> - A name for your application, to display on the cluster web UI.</dd><dd><code>sparkHome</code> - Location where Spark is installed on cluster nodes.</dd><dd><code>jars</code> - Collection of JARs to send to the cluster. These can be paths on the local file + system or HDFS, HTTP, HTTPS, or FTP URLs.</dd><dd><code>environment</code> - Environment variables to set on worker nodes.</dd><dd><code>preferredNodeLocationData</code> - not used. Left for backward compatibility.</dd></dl> +</li> +</ul> +</li> +</ul> +<!-- ============ METHOD DETAIL ========== --> +<ul class="blockList"> +<li class="blockList"><a name="method_detail"> +<!-- --> +</a> +<h3>Method Detail</h3> +<a name="getOrCreate(org.apache.spark.SparkConf)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>getOrCreate</h4> +<pre>public static <a href="../../../org/apache/spark/SparkContext.html" title="class in org.apache.spark">SparkContext</a> getOrCreate(<a href="../../../org/apache/spark/SparkConf.html" title="class in org.apache.spark">SparkConf</a> config)</pre> +<div class="block">This function may be used to get or instantiate a SparkContext and register it as a + singleton object. Because we can only have one active SparkContext per JVM, + this is useful when applications may wish to share a SparkContext. + <p> + Note: This function cannot be used to create multiple SparkContext instances + even if multiple contexts are allowed.</div> +<dl><dt><span class="strong">Parameters:</span></dt><dd><code>config</code> - (undocumented)</dd> +<dt><span class="strong">Returns:</span></dt><dd>(undocumented)</dd></dl> +</li> +</ul> +<a name="getOrCreate()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>getOrCreate</h4> +<pre>public static <a href="../../../org/apache/spark/SparkContext.html" title="class in org.apache.spark">SparkContext</a> getOrCreate()</pre> +<div class="block">This function may be used to get or instantiate a SparkContext and register it as a + singleton object. Because we can only have one active SparkContext per JVM, + this is useful when applications may wish to share a SparkContext. + <p> + This method allows not passing a SparkConf (useful if just retrieving). + <p> + Note: This function cannot be used to create multiple SparkContext instances + even if multiple contexts are allowed.</div> +<dl><dt><span class="strong">Returns:</span></dt><dd>(undocumented)</dd></dl> +</li> +</ul> +<a name="SPARK_JOB_DESCRIPTION()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SPARK_JOB_DESCRIPTION</h4> +<pre>public static String SPARK_JOB_DESCRIPTION()</pre> +</li> +</ul> +<a name="SPARK_JOB_GROUP_ID()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SPARK_JOB_GROUP_ID</h4> +<pre>public static String SPARK_JOB_GROUP_ID()</pre> +</li> +</ul> +<a name="SPARK_JOB_INTERRUPT_ON_CANCEL()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>SPARK_JOB_INTERRUPT_ON_CANCEL</h4> +<pre>public static String SPARK_JOB_INTERRUPT_ON_CANCEL()</pre> +</li> +</ul> +<a name="RDD_SCOPE_KEY()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>RDD_SCOPE_KEY</h4> +<pre>public static String RDD_SCOPE_KEY()</pre> +</li> +</ul> +<a name="RDD_SCOPE_NO_OVERRIDE_KEY()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>RDD_SCOPE_NO_OVERRIDE_KEY</h4> +<pre>public static String RDD_SCOPE_NO_OVERRIDE_KEY()</pre> +</li> +</ul> +<a name="DRIVER_IDENTIFIER()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>DRIVER_IDENTIFIER</h4> +<pre>public static String DRIVER_IDENTIFIER()</pre> +<div class="block">Executor id for the driver. In earlier versions of Spark, this was <code><driver></code>, but this was + changed to <code>driver</code> because the angle brackets caused escaping issues in URLs and XML (see + SPARK-6716 for more details).</div> +<dl><dt><span class="strong">Returns:</span></dt><dd>(undocumented)</dd></dl> +</li> +</ul> +<a name="LEGACY_DRIVER_IDENTIFIER()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>LEGACY_DRIVER_IDENTIFIER</h4> +<pre>public static String LEGACY_DRIVER_IDENTIFIER()</pre> +<div class="block">Legacy version of DRIVER_IDENTIFIER, retained for backwards-compatibility.</div> +<dl><dt><span class="strong">Returns:</span></dt><dd>(undocumented)</dd></dl> +</li> +</ul> +<a name="rddToPairRDDFunctions(org.apache.spark.rdd.RDD, scala.reflect.ClassTag, scala.reflect.ClassTag, scala.math.Ordering)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>rddToPairRDDFunctions</h4> +<pre>public static <K,V> <a href="../../../org/apache/spark/rdd/PairRDDFunctions.html" title="class in org.apache.spark.rdd">PairRDDFunctions</a><K,V> rddToPairRDDFunctions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.reflect.ClassTag<K> kt, + scala.reflect.ClassTag<V> vt, + scala.math.Ordering<K> ord)</pre> +</li> +</ul> +<a name="rddToAsyncRDDActions(org.apache.spark.rdd.RDD, scala.reflect.ClassTag)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>rddToAsyncRDDActions</h4> +<pre>public static <T> <a href="../../../org/apache/spark/rdd/AsyncRDDActions.html" title="class in org.apache.spark.rdd">AsyncRDDActions</a><T> rddToAsyncRDDActions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.reflect.ClassTag<T> evidence$22)</pre> +</li> +</ul> +<a name="rddToSequenceFileRDDFunctions(org.apache.spark.rdd.RDD, scala.Function1, scala.reflect.ClassTag, scala.Function1, scala.reflect.ClassTag)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>rddToSequenceFileRDDFunctions</h4> +<pre>public static <K,V> <a href="../../../org/apache/spark/rdd/SequenceFileRDDFunctions.html" title="class in org.apache.spark.rdd">SequenceFileRDDFunctions</a><K,V> rddToSequenceFileRDDFunctions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.Function1<K,org.apache.hadoop.io.Writable> evidence$23, + scala.reflect.ClassTag<K> evidence$24, + scala.Function1<V,org.apache.hadoop.io.Writable> evidence$25, + scala.reflect.ClassTag<V> evidence$26)</pre> +</li> +</ul> +<a name="rddToOrderedRDDFunctions(org.apache.spark.rdd.RDD, scala.math.Ordering, scala.reflect.ClassTag, scala.reflect.ClassTag)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>rddToOrderedRDDFunctions</h4> +<pre>public static <K,V> <a href="../../../org/apache/spark/rdd/OrderedRDDFunctions.html" title="class in org.apache.spark.rdd">OrderedRDDFunctions</a><K,V,scala.Tuple2<K,V>> rddToOrderedRDDFunctions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><scala.Tuple2<K,V>> rdd, + scala.math.Ordering<K> evidence$27, + scala.reflect.ClassTag<K> evidence$28, + scala.reflect.ClassTag<V> evidence$29)</pre> +</li> +</ul> +<a name="doubleRDDToDoubleRDDFunctions(org.apache.spark.rdd.RDD)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>doubleRDDToDoubleRDDFunctions</h4> +<pre>public static <a href="../../../org/apache/spark/rdd/DoubleRDDFunctions.html" title="class in org.apache.spark.rdd">DoubleRDDFunctions</a> doubleRDDToDoubleRDDFunctions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><Object> rdd)</pre> +</li> +</ul> +<a name="numericRDDToDoubleRDDFunctions(org.apache.spark.rdd.RDD, scala.math.Numeric)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>numericRDDToDoubleRDDFunctions</h4> +<pre>public static <T> <a href="../../../org/apache/spark/rdd/DoubleRDDFunctions.html" title="class in org.apache.spark.rdd">DoubleRDDFunctions</a> numericRDDToDoubleRDDFunctions(<a href="../../../org/apache/spark/rdd/RDD.html" title="class in org.apache.spark.rdd">RDD</a><T> rdd, + scala.math.Numeric<T> num)</pre> +</li> +</ul> +<a name="intToIntWritable(int)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>intToIntWritable</h4> +<pre>public static org.apache.hadoop.io.IntWritable intToIntWritable(int i)</pre> +</li> +</ul> +<a name="longToLongWritable(long)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>longToLongWritable</h4> +<pre>public static org.apache.hadoop.io.LongWritable longToLongWritable(long l)</pre> +</li> +</ul> +<a name="floatToFloatWritable(float)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>floatToFloatWritable</h4> +<pre>public static org.apache.hadoop.io.FloatWritable floatToFloatWritable(float f)</pre> +</li> +</ul> +<a name="doubleToDoubleWritable(double)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>doubleToDoubleWritable</h4> +<pre>public static org.apache.hadoop.io.DoubleWritable doubleToDoubleWritable(double d)</pre> +</li> +</ul> +<a name="boolToBoolWritable(boolean)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>boolToBoolWritable</h4> +<pre>public static org.apache.hadoop.io.BooleanWritable boolToBoolWritable(boolean b)</pre> +</li> +</ul> +<a name="bytesToBytesWritable(byte[])"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>bytesToBytesWritable</h4> +<pre>public static org.apache.hadoop.io.BytesWritable bytesToBytesWritable(byte[] aob)</pre> +</li> +</ul> +<a name="stringToText(java.lang.String)"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>stringToText</h4> +<pre>public static org.apache.hadoop.io.Text stringToText(String s)</pre> +</li> +</ul> +<a name="intWritableConverter()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>intWritableConverter</h4> +<pre>public static org.apache.spark.WritableConverter<Object> intWritableConverter()</pre> +</li> +</ul> +<a name="longWritableConverter()"> +<!-- --> +</a> +<ul class="blockList"> +<li class="blockList"> +<h4>longW
<TRUNCATED> --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org