Github user tdas commented on a diff in the pull request:
https://github.com/apache/spark/pull/2538#discussion_r18193637
--- Diff: python/pyspark/streaming/dstream.py ---
@@ -0,0 +1,633 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from itertools import chain, ifilter, imap
+import operator
+import time
+from datetime import datetime
+
+from pyspark import RDD
+from pyspark.storagelevel import StorageLevel
+from pyspark.streaming.util import rddToFileName, RDDFunction
+from pyspark.rdd import portable_hash
+from pyspark.resultiterable import ResultIterable
+
+__all__ = ["DStream"]
+
+
+class DStream(object):
+ def __init__(self, jdstream, ssc, jrdd_deserializer):
+ self._jdstream = jdstream
+ self._ssc = ssc
+ self.ctx = ssc._sc
+ self._jrdd_deserializer = jrdd_deserializer
+ self.is_cached = False
+ self.is_checkpointed = False
+
+ def context(self):
+ """
+ Return the StreamingContext associated with this DStream
+ """
+ return self._ssc
+
+ def count(self):
+ """
+ Return a new DStream which contains the number of elements in this
DStream.
+ """
+ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
+
+ def sum(self):
+ """
+ Add up the elements in this DStream.
+ """
+ return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
+
+ def filter(self, f):
+ """
+ Return a new DStream containing only the elements that satisfy
predicate.
+ """
+ def func(iterator):
+ return ifilter(f, iterator)
+ return self.mapPartitions(func, True)
+
+ def flatMap(self, f, preservesPartitioning=False):
+ """
+ Pass each value in the key-value pair DStream through flatMap
function
+ without changing the keys: this also retains the original RDD's
partition.
+ """
+ def func(s, iterator):
+ return chain.from_iterable(imap(f, iterator))
+ return self.mapPartitionsWithIndex(func, preservesPartitioning)
+
+ def map(self, f, preservesPartitioning=False):
+ """
+ Return a new DStream by applying a function to each element of
DStream.
+ """
+ def func(iterator):
+ return imap(f, iterator)
+ return self.mapPartitions(func, preservesPartitioning)
+
+ def mapPartitions(self, f, preservesPartitioning=False):
+ """
+ Return a new DStream by applying a function to each partition of
this DStream.
+ """
+ def func(s, iterator):
+ return f(iterator)
+ return self.mapPartitionsWithIndex(func, preservesPartitioning)
+
+ def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
+ """
+ Return a new DStream by applying a function to each partition of
this DStream,
+ while tracking the index of the original partition.
+ """
+ return self.transform(lambda rdd: rdd.mapPartitionsWithIndex(f,
preservesPartitioning))
+
+ def reduce(self, func):
+ """
+ Return a new DStream by reduceing the elements of this RDD using
the specified
+ commutative and associative binary operator.
+ """
+ return self.map(lambda x: (None, x)).reduceByKey(func,
1).map(lambda x: x[1])
+
+ def reduceByKey(self, func, numPartitions=None):
+ """
+ Merge the value for each key using an associative reduce function.
+
+ This will also perform the merging locally on each mapper before
+ sending results to reducer, similarly to a "combiner" in MapReduce.
+
+ Output will be hash-partitioned with C{numPartitions} partitions,
or
+ the default parallelism level if C{numPartitions} is not specified.
+ """
+ return self.combineByKey(lambda x: x, func, func, numPartitions)
+
+ def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
+ numPartitions=None):
+ """
+ Count the number of elements for each key, and return the result
to the
+ master as a dictionary
+ """
+ def func(rdd):
+ return rdd.combineByKey(createCombiner, mergeValue,
mergeCombiners, numPartitions)
--- End diff --
There is a slight inconsistency with this approach of the setting the
default of number of partitions. The Scala/Java DStream API uses an older way
of determining the number of partitions, which uses the default parallelism
configuration (see code in PairDStreamFunctions). But this uses the smarter
default partitions based on the number of partitions of the pre-shuffle RDD.
There maybe significant performance difference. It maybe a good idea to keep
things consistent, and consider improving this in a different JIRA+PR.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]