text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Return a list that contains all of the elements in this RDD. <END_TASK> <USER_TASK:> Description: def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """
with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(sock_info, self._jrdd_deserializer))
<SYSTEM_TASK:> Reduces the elements of this RDD using the specified commutative and <END_TASK> <USER_TASK:> Description: def reduce(self, f): """ Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD """
f = fail_on_stopiteration(f) def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError("Can not reduce() empty RDD")
<SYSTEM_TASK:> Aggregate the elements of each partition, and then the results for all <END_TASK> <USER_TASK:> Description: def fold(self, zeroValue, op): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 """
op = fail_on_stopiteration(op) def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue)
<SYSTEM_TASK:> Aggregate the elements of each partition, and then the results for all <END_TASK> <USER_TASK:> Description: def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) """
seqOp = fail_on_stopiteration(seqOp) combOp = fail_on_stopiteration(combOp) def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue)
<SYSTEM_TASK:> Aggregates the elements of this RDD in a multi-level tree <END_TASK> <USER_TASK:> Description: def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """
if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp)
<SYSTEM_TASK:> Find the maximum item in this RDD. <END_TASK> <USER_TASK:> Description: def max(self, key=None): """ Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 """
if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key))
<SYSTEM_TASK:> Find the minimum item in this RDD. <END_TASK> <USER_TASK:> Description: def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """
if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key))
<SYSTEM_TASK:> Add up the elements in this RDD. <END_TASK> <USER_TASK:> Description: def sum(self): """ Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 """
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
<SYSTEM_TASK:> Get the top N elements from an RDD. <END_TASK> <USER_TASK:> Description: def top(self, num, key=None): """ Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] """
def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge)
<SYSTEM_TASK:> Get the N elements from an RDD ordered in ascending order or as <END_TASK> <USER_TASK:> Description: def takeOrdered(self, num, key=None): """ Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] """
def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
<SYSTEM_TASK:> Take the first num elements of the RDD. <END_TASK> <USER_TASK:> Description: def take(self, num): """ Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] """
items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: # The number of partitions to try in this iteration. # It is ok for this number to be greater than totalParts because # we actually cap it at totalParts in runJob. numPartsToTry = 1 if partsScanned > 0: # If we didn't find any rows after the previous iteration, # quadruple and retry. Otherwise, interpolate the number of # partitions we need to try, but overestimate it by 50%. # We also cap the estimation in the end. if len(items) == 0: numPartsToTry = partsScanned * 4 else: # the first parameter of max is >=1 whenever partsScanned >= 2 numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: try: yield next(iterator) except StopIteration: return taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num]
<SYSTEM_TASK:> Merge the values for each key using an associative and commutative reduce function. <END_TASK> <USER_TASK:> Description: def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
<SYSTEM_TASK:> Merge the values for each key using an associative and commutative reduce function, but <END_TASK> <USER_TASK:> Description: def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """
func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps)
<SYSTEM_TASK:> Return a copy of the RDD partitioned using the specified partitioner. <END_TASK> <USER_TASK:> Description: def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 """
if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self # Transferring O(n) objects to Java is too expensive. # Instead, we'll form the hash buckets in Python, # transferring O(numPartitions) objects to Java. # Each object is a (splitNumber, [objects]) pair. # In order to avoid too huge objects, the objects are # grouped into chunks. outputSerializer = self.ctx._unbatched_serializer limit = (_parse_memory(self.ctx._conf.get( "spark.python.worker.memory", "512m")) / 2) def add_shuffle_key(split, iterator): buckets = defaultdict(list) c, batch = 0, min(10 * numPartitions, 1000) for k, v in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 # check used memory and avg size of chunk of objects if (c % 1000 == 0 and get_used_memory() > limit or c > batch): n, size = len(buckets), 0 for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 # let 1M < avg < 10M if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for split, items in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD( keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd
<SYSTEM_TASK:> Generic function to combine the elements for each key using a custom <END_TASK> <USER_TASK:> Description: def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash): """ Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] """
if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
<SYSTEM_TASK:> Aggregate the values of each key, using given combine functions and a neutral <END_TASK> <USER_TASK:> Description: def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash): """ Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. """
def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey( lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
<SYSTEM_TASK:> Group the values for each key in the RDD into a single sequence. <END_TASK> <USER_TASK:> Description: def groupByKey(self, numPartitions=None, partitionFunc=portable_hash): """ Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])] """
def createCombiner(x): return [x] def mergeValue(xs, x): xs.append(x) return xs def mergeCombiners(a, b): a.extend(b) return a memory = self._memory_limit() serializer = self._jrdd_deserializer agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combine(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combine, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def groupByKey(it): merger = ExternalGroupBy(agg, memory, serializer) merger.mergeCombiners(it) return merger.items() return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
<SYSTEM_TASK:> Pass each value in the key-value pair RDD through a flatMap function <END_TASK> <USER_TASK:> Description: def flatMapValues(self, f): """ Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] """
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True)
<SYSTEM_TASK:> Pass each value in the key-value pair RDD through a map function <END_TASK> <USER_TASK:> Description: def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """
map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True)
<SYSTEM_TASK:> Zips this RDD with its element indices. <END_TASK> <USER_TASK:> Description: def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """
starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func)
<SYSTEM_TASK:> Zips this RDD with generated unique Long ids. <END_TASK> <USER_TASK:> Description: def zipWithUniqueId(self): """ Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] """
n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func)
<SYSTEM_TASK:> Get the RDD's current storage level. <END_TASK> <USER_TASK:> Description: def getStorageLevel(self): """ Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated """
java_storage_level = self._jrdd.getStorageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level
<SYSTEM_TASK:> Return the list of values in the RDD for key `key`. This operation <END_TASK> <USER_TASK:> Description: def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """
values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect()
<SYSTEM_TASK:> Return an iterator that contains all of the elements in this RDD. <END_TASK> <USER_TASK:> Description: def toLocalIterator(self): """ Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """
with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd()) return _load_from_socket(sock_info, self._jrdd_deserializer)
<SYSTEM_TASK:> Create a method for given unary operator <END_TASK> <USER_TASK:> Description: def _unary_op(name, doc="unary operator"): """ Create a method for given unary operator """
def _(self): jc = getattr(self._jc, name)() return Column(jc) _.__doc__ = doc return _
<SYSTEM_TASK:> Create a method for given binary operator <END_TASK> <USER_TASK:> Description: def _bin_op(name, doc="binary operator"): """ Create a method for given binary operator """
def _(self, other): jc = other._jc if isinstance(other, Column) else other njc = getattr(self._jc, name)(jc) return Column(njc) _.__doc__ = doc return _
<SYSTEM_TASK:> A boolean expression that is evaluated to true if the value of this <END_TASK> <USER_TASK:> Description: def isin(self, *cols): """ A boolean expression that is evaluated to true if the value of this expression is contained by the evaluated values of the arguments. >>> df[df.name.isin("Bob", "Mike")].collect() [Row(age=5, name=u'Bob')] >>> df[df.age.isin([1, 2, 3])].collect() [Row(age=2, name=u'Alice')] """
if len(cols) == 1 and isinstance(cols[0], (list, set)): cols = cols[0] cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols] sc = SparkContext._active_spark_context jc = getattr(self._jc, "isin")(_to_seq(sc, cols)) return Column(jc)
<SYSTEM_TASK:> Convert the column into type ``dataType``. <END_TASK> <USER_TASK:> Description: def cast(self, dataType): """ Convert the column into type ``dataType``. >>> df.select(df.age.cast("string").alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] >>> df.select(df.age.cast(StringType()).alias('ages')).collect() [Row(ages=u'2'), Row(ages=u'5')] """
if isinstance(dataType, basestring): jc = self._jc.cast(dataType) elif isinstance(dataType, DataType): from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() jdt = spark._jsparkSession.parseDataType(dataType.json()) jc = self._jc.cast(jdt) else: raise TypeError("unexpected type: %s" % type(dataType)) return Column(jc)
<SYSTEM_TASK:> Computes the mean and variance and stores as a model to be used <END_TASK> <USER_TASK:> Description: def fit(self, dataset): """ Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel """
dataset = dataset.map(_convert_to_vector) jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset) return StandardScalerModel(jmodel)
<SYSTEM_TASK:> Returns a ChiSquared feature selector. <END_TASK> <USER_TASK:> Description: def fit(self, data): """ Returns a ChiSquared feature selector. :param data: an `RDD[LabeledPoint]` containing the labeled dataset with categorical features. Real-valued features will be treated as categorical for each distinct value. Apply feature discretizer before using this function. """
jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures, self.percentile, self.fpr, self.fdr, self.fwe, data) return ChiSqSelectorModel(jmodel)
<SYSTEM_TASK:> Computes the inverse document frequency. <END_TASK> <USER_TASK:> Description: def fit(self, dataset): """ Computes the inverse document frequency. :param dataset: an RDD of term frequency vectors """
if not isinstance(dataset, RDD): raise TypeError("dataset should be an RDD of term frequency vectors") jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector)) return IDFModel(jmodel)
<SYSTEM_TASK:> Find synonyms of a word <END_TASK> <USER_TASK:> Description: def findSynonyms(self, word, num): """ Find synonyms of a word :param word: a word or a vector representation of word :param num: number of synonyms to find :return: array of (word, cosineSimilarity) .. note:: Local use only """
if not isinstance(word, basestring): word = _convert_to_vector(word) words, similarity = self.call("findSynonyms", word, num) return zip(words, similarity)
<SYSTEM_TASK:> Computes the Hadamard product of the vector. <END_TASK> <USER_TASK:> Description: def transform(self, vector): """ Computes the Hadamard product of the vector. """
if isinstance(vector, RDD): vector = vector.map(_convert_to_vector) else: vector = _convert_to_vector(vector) return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
<SYSTEM_TASK:> Train a decision tree model for classification. <END_TASK> <USER_TASK:> Description: def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for classification. :param data: Training data: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from numpy import array >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(1.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {}) >>> print(model) DecisionTreeModel classifier of depth 1 with 3 nodes >>> print(model.toDebugString()) DecisionTreeModel classifier of depth 1 with 3 nodes If (feature 0 <= 0.5) Predict: 0.0 Else (feature 0 > 0.5) Predict: 1.0 <BLANKLINE> >>> model.predict(array([1.0])) 1.0 >>> model.predict(array([0.0])) 0.0 >>> rdd = sc.parallelize([[1.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
<SYSTEM_TASK:> Train a decision tree model for regression. <END_TASK> <USER_TASK:> Description: def trainRegressor(cls, data, categoricalFeaturesInfo, impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0): """ Train a decision tree model for regression. :param data: Training data: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 5) :param maxBins: Number of bins used for finding splits at each node. (default: 32) :param minInstancesPerNode: Minimum number of instances required at child nodes to create the parent split. (default: 1) :param minInfoGain: Minimum info gain required to create a split. (default: 0.0) :return: DecisionTreeModel. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import DecisionTree >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 0.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {}) >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {1: 0.0})) 0.0 >>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """
return cls._train(data, "regression", 0, categoricalFeaturesInfo, impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
<SYSTEM_TASK:> Train a random forest model for binary or multiclass <END_TASK> <USER_TASK:> Description: def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for binary or multiclass classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1, ..., numClasses-1}. :param numClasses: Number of classes for classification. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "sqrt". (default: "auto") :param impurity: Criterion used for information gain calculation. Supported values: "gini" or "entropy". (default: "gini") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42) >>> model.numTrees() 3 >>> model.totalNumNodes() 7 >>> print(model) TreeEnsembleModel classifier with 3 trees <BLANKLINE> >>> print(model.toDebugString()) TreeEnsembleModel classifier with 3 trees <BLANKLINE> Tree 0: Predict: 1.0 Tree 1: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 Tree 2: If (feature 0 <= 1.5) Predict: 0.0 Else (feature 0 > 1.5) Predict: 1.0 <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[3.0], [1.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
<SYSTEM_TASK:> Train a random forest model for regression. <END_TASK> <USER_TASK:> Description: def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto", impurity="variance", maxDepth=4, maxBins=32, seed=None): """ Train a random forest model for regression. :param data: Training dataset: RDD of LabeledPoint. Labels are real numbers. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param numTrees: Number of trees in the random forest. :param featureSubsetStrategy: Number of features to consider for splits at each node. Supported values: "auto", "all", "sqrt", "log2", "onethird". If "auto" is set, this parameter is set based on numTrees: if numTrees == 1, set to "all"; if numTrees > 1 (forest) set to "onethird" for regression. (default: "auto") :param impurity: Criterion used for information gain calculation. The only supported value for regression is "variance". (default: "variance") :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 4) :param maxBins: Maximum number of bins used for splitting features. (default: 32) :param seed: Random seed for bootstrapping and choosing feature subsets. Set as None to generate seed based on system time. (default: None) :return: RandomForestModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import RandomForest >>> from pyspark.mllib.linalg import SparseVector >>> >>> sparse_data = [ ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 1.0})), ... LabeledPoint(0.0, SparseVector(2, {0: 1.0})), ... LabeledPoint(1.0, SparseVector(2, {1: 2.0})) ... ] >>> >>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42) >>> model.numTrees() 2 >>> model.totalNumNodes() 4 >>> model.predict(SparseVector(2, {1: 1.0})) 1.0 >>> model.predict(SparseVector(2, {0: 1.0})) 0.5 >>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]]) >>> model.predict(rdd).collect() [1.0, 0.5] """
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
<SYSTEM_TASK:> Train a gradient-boosted trees model for classification. <END_TASK> <USER_TASK:> Description: def trainClassifier(cls, data, categoricalFeaturesInfo, loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3, maxBins=32): """ Train a gradient-boosted trees model for classification. :param data: Training dataset: RDD of LabeledPoint. Labels should take values {0, 1}. :param categoricalFeaturesInfo: Map storing arity of categorical features. An entry (n -> k) indicates that feature n is categorical with k categories indexed from 0: {0, 1, ..., k-1}. :param loss: Loss function used for minimization during gradient boosting. Supported values: "logLoss", "leastSquaresError", "leastAbsoluteError". (default: "logLoss") :param numIterations: Number of iterations of boosting. (default: 100) :param learningRate: Learning rate for shrinking the contribution of each estimator. The learning rate should be between in the interval (0, 1]. (default: 0.1) :param maxDepth: Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1 means 1 internal node + 2 leaf nodes). (default: 3) :param maxBins: Maximum number of bins used for splitting features. DecisionTree requires maxBins >= max categories. (default: 32) :return: GradientBoostedTreesModel that can be used for prediction. Example usage: >>> from pyspark.mllib.regression import LabeledPoint >>> from pyspark.mllib.tree import GradientBoostedTrees >>> >>> data = [ ... LabeledPoint(0.0, [0.0]), ... LabeledPoint(0.0, [1.0]), ... LabeledPoint(1.0, [2.0]), ... LabeledPoint(1.0, [3.0]) ... ] >>> >>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10) >>> model.numTrees() 10 >>> model.totalNumNodes() 30 >>> print(model) # it already has newline TreeEnsembleModel classifier with 10 trees <BLANKLINE> >>> model.predict([2.0]) 1.0 >>> model.predict([0.0]) 0.0 >>> rdd = sc.parallelize([[2.0], [0.0]]) >>> model.predict(rdd).collect() [1.0, 0.0] """
return cls._train(data, "classification", categoricalFeaturesInfo, loss, numIterations, learningRate, maxDepth, maxBins)
<SYSTEM_TASK:> Set a configuration property. <END_TASK> <USER_TASK:> Description: def set(self, key, value): """Set a configuration property."""
# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet. if self._jconf is not None: self._jconf.set(key, unicode(value)) else: self._conf[key] = unicode(value) return self
<SYSTEM_TASK:> Set a configuration property, if not already set. <END_TASK> <USER_TASK:> Description: def setIfMissing(self, key, value): """Set a configuration property, if not already set."""
if self.get(key) is None: self.set(key, value) return self
<SYSTEM_TASK:> Set an environment variable to be passed to executors. <END_TASK> <USER_TASK:> Description: def setExecutorEnv(self, key=None, value=None, pairs=None): """Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None): raise Exception("Either pass one key-value pair or a list of pairs") elif key is not None: self.set("spark.executorEnv." + key, value) elif pairs is not None: for (k, v) in pairs: self.set("spark.executorEnv." + k, v) return self
<SYSTEM_TASK:> Set multiple parameters, passed as a list of key-value pairs. <END_TASK> <USER_TASK:> Description: def setAll(self, pairs): """ Set multiple parameters, passed as a list of key-value pairs. :param pairs: list of key-value pairs to set """
for (k, v) in pairs: self.set(k, v) return self
<SYSTEM_TASK:> Get the configured value for some key, or return a default otherwise. <END_TASK> <USER_TASK:> Description: def get(self, key, defaultValue=None): """Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None if self._jconf is not None: if not self._jconf.contains(key): return None return self._jconf.get(key) else: if key not in self._conf: return None return self._conf[key] else: if self._jconf is not None: return self._jconf.get(key, defaultValue) else: return self._conf.get(key, defaultValue)
<SYSTEM_TASK:> Get all values as a list of key-value pairs. <END_TASK> <USER_TASK:> Description: def getAll(self): """Get all values as a list of key-value pairs."""
if self._jconf is not None: return [(elem._1(), elem._2()) for elem in self._jconf.getAll()] else: return self._conf.items()
<SYSTEM_TASK:> Does this configuration contain a given key? <END_TASK> <USER_TASK:> Description: def contains(self, key): """Does this configuration contain a given key?"""
if self._jconf is not None: return self._jconf.contains(key) else: return key in self._conf
<SYSTEM_TASK:> Returns a printable version of the configuration, as a list of <END_TASK> <USER_TASK:> Description: def toDebugString(self): """ Returns a printable version of the configuration, as a list of key=value pairs, one per line. """
if self._jconf is not None: return self._jconf.toDebugString() else: return '\n'.join('%s=%s' % (k, v) for k, v in self._conf.items())
<SYSTEM_TASK:> Returns a list of databases available across all sessions. <END_TASK> <USER_TASK:> Description: def listDatabases(self): """Returns a list of databases available across all sessions."""
iter = self._jcatalog.listDatabases().toLocalIterator() databases = [] while iter.hasNext(): jdb = iter.next() databases.append(Database( name=jdb.name(), description=jdb.description(), locationUri=jdb.locationUri())) return databases
<SYSTEM_TASK:> Returns a list of functions registered in the specified database. <END_TASK> <USER_TASK:> Description: def listFunctions(self, dbName=None): """Returns a list of functions registered in the specified database. If no database is specified, the current database is used. This includes all temporary functions. """
if dbName is None: dbName = self.currentDatabase() iter = self._jcatalog.listFunctions(dbName).toLocalIterator() functions = [] while iter.hasNext(): jfunction = iter.next() functions.append(Function( name=jfunction.name(), description=jfunction.description(), className=jfunction.className(), isTemporary=jfunction.isTemporary())) return functions
<SYSTEM_TASK:> Load data from a given socket, this is a blocking method thus only return when the socket <END_TASK> <USER_TASK:> Description: def _load_from_socket(port, auth_secret): """ Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed. """
(sockfile, sock) = local_connect_and_auth(port, auth_secret) # The barrier() call may block forever, so no timeout sock.settimeout(None) # Make a barrier() function call. write_int(BARRIER_FUNCTION, sockfile) sockfile.flush() # Collect result. res = UTF8Deserializer().loads(sockfile) # Release resources. sockfile.close() sock.close() return res
<SYSTEM_TASK:> Internal function to get or create global BarrierTaskContext. We need to make sure <END_TASK> <USER_TASK:> Description: def _getOrCreate(cls): """ Internal function to get or create global BarrierTaskContext. We need to make sure BarrierTaskContext is returned from here because it is needed in python worker reuse scenario, see SPARK-25921 for more details. """
if not isinstance(cls._taskContext, BarrierTaskContext): cls._taskContext = object.__new__(cls) return cls._taskContext
<SYSTEM_TASK:> Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called <END_TASK> <USER_TASK:> Description: def _initialize(cls, port, secret): """ Initialize BarrierTaskContext, other methods within BarrierTaskContext can only be called after BarrierTaskContext is initialized. """
cls._port = port cls._secret = secret
<SYSTEM_TASK:> A decorator that annotates a function to append the version of Spark the function was added. <END_TASK> <USER_TASK:> Description: def since(version): """ A decorator that annotates a function to append the version of Spark the function was added. """
import re indent_p = re.compile(r'\n( +)') def deco(f): indents = indent_p.findall(f.__doc__) indent = ' ' * (min(len(m) for m in indents) if indents else 0) f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version) return f return deco
<SYSTEM_TASK:> A decorator that forces keyword arguments in the wrapped method <END_TASK> <USER_TASK:> Description: def keyword_only(func): """ A decorator that forces keyword arguments in the wrapped method and saves actual input keyword arguments in `_input_kwargs`. .. note:: Should only be used to wrap a method where first arg is `self` """
@wraps(func) def wrapper(self, *args, **kwargs): if len(args) > 0: raise TypeError("Method %s forces keyword arguments." % func.__name__) self._input_kwargs = kwargs return func(self, **kwargs) return wrapper
<SYSTEM_TASK:> Generates Python code for a shared param class. <END_TASK> <USER_TASK:> Description: def _gen_param_code(name, doc, defaultValueStr): """ Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string """
# TODO: How to correctly inherit instance attributes? template = ''' def set$Name(self, value): """ Sets the value of :py:attr:`$name`. """ return self._set($name=value) def get$Name(self): """ Gets the value of $name or its default value. """ return self.getOrDefault(self.$name)''' Name = name[0].upper() + name[1:] return template \ .replace("$name", name) \ .replace("$Name", Name) \ .replace("$doc", doc) \ .replace("$defaultValueStr", str(defaultValueStr))
<SYSTEM_TASK:> Runs the bisecting k-means algorithm return the model. <END_TASK> <USER_TASK:> Description: def train(self, rdd, k=4, maxIterations=20, minDivisibleClusterSize=1.0, seed=-1888008604): """ Runs the bisecting k-means algorithm return the model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: The desired number of leaf clusters. The actual number could be smaller if there are no divisible leaf clusters. (default: 4) :param maxIterations: Maximum number of iterations allowed to split clusters. (default: 20) :param minDivisibleClusterSize: Minimum number of points (if >= 1.0) or the minimum proportion of points (if < 1.0) of a divisible cluster. (default: 1) :param seed: Random seed value for cluster initialization. (default: -1888008604 from classOf[BisectingKMeans].getName.##) """
java_model = callMLlibFunc( "trainBisectingKMeans", rdd.map(_convert_to_vector), k, maxIterations, minDivisibleClusterSize, seed) return BisectingKMeansModel(java_model)
<SYSTEM_TASK:> Train a k-means clustering model. <END_TASK> <USER_TASK:> Description: def train(cls, rdd, k, maxIterations=100, runs=1, initializationMode="k-means||", seed=None, initializationSteps=2, epsilon=1e-4, initialModel=None): """ Train a k-means clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of clusters to create. :param maxIterations: Maximum number of iterations allowed. (default: 100) :param runs: This param has no effect since Spark 2.0.0. :param initializationMode: The initialization algorithm. This can be either "random" or "k-means||". (default: "k-means||") :param seed: Random seed value for cluster initialization. Set as None to generate seed based on system time. (default: None) :param initializationSteps: Number of steps for the k-means|| initialization mode. This is an advanced setting -- the default of 2 is almost always enough. (default: 2) :param epsilon: Distance threshold within which a center will be considered to have converged. If all centers move less than this Euclidean distance, iterations are stopped. (default: 1e-4) :param initialModel: Initial cluster centers can be provided as a KMeansModel object rather than using the random or k-means|| initializationModel. (default: None) """
if runs != 1: warnings.warn("The param `runs` has no effect since Spark 2.0.0.") clusterInitialModel = [] if initialModel is not None: if not isinstance(initialModel, KMeansModel): raise Exception("initialModel is of "+str(type(initialModel))+". It needs " "to be of <type 'KMeansModel'>") clusterInitialModel = [_convert_to_vector(c) for c in initialModel.clusterCenters] model = callMLlibFunc("trainKMeansModel", rdd.map(_convert_to_vector), k, maxIterations, runs, initializationMode, seed, initializationSteps, epsilon, clusterInitialModel) centers = callJavaFunc(rdd.context, model.clusterCenters) return KMeansModel([c.toArray() for c in centers])
<SYSTEM_TASK:> Train a Gaussian Mixture clustering model. <END_TASK> <USER_TASK:> Description: def train(cls, rdd, k, convergenceTol=1e-3, maxIterations=100, seed=None, initialModel=None): """ Train a Gaussian Mixture clustering model. :param rdd: Training points as an `RDD` of `Vector` or convertible sequence types. :param k: Number of independent Gaussians in the mixture model. :param convergenceTol: Maximum change in log-likelihood at which convergence is considered to have occurred. (default: 1e-3) :param maxIterations: Maximum number of iterations allowed. (default: 100) :param seed: Random seed for initial Gaussian distribution. Set as None to generate seed based on system time. (default: None) :param initialModel: Initial GMM starting point, bypassing the random initialization. (default: None) """
initialModelWeights = None initialModelMu = None initialModelSigma = None if initialModel is not None: if initialModel.k != k: raise Exception("Mismatched cluster count, initialModel.k = %s, however k = %s" % (initialModel.k, k)) initialModelWeights = list(initialModel.weights) initialModelMu = [initialModel.gaussians[i].mu for i in range(initialModel.k)] initialModelSigma = [initialModel.gaussians[i].sigma for i in range(initialModel.k)] java_model = callMLlibFunc("trainGaussianMixtureModel", rdd.map(_convert_to_vector), k, convergenceTol, maxIterations, seed, initialModelWeights, initialModelMu, initialModelSigma) return GaussianMixtureModel(java_model)
<SYSTEM_TASK:> Update the centroids, according to data <END_TASK> <USER_TASK:> Description: def update(self, data, decayFactor, timeUnit): """Update the centroids, according to data :param data: RDD with new data for the model update. :param decayFactor: Forgetfulness of the previous centroids. :param timeUnit: Can be "batches" or "points". If points, then the decay factor is raised to the power of number of new points and if batches, then decay factor will be used as is. """
if not isinstance(data, RDD): raise TypeError("Data should be of an RDD, got %s." % type(data)) data = data.map(_convert_to_vector) decayFactor = float(decayFactor) if timeUnit not in ["batches", "points"]: raise ValueError( "timeUnit should be 'batches' or 'points', got %s." % timeUnit) vectorCenters = [_convert_to_vector(center) for center in self.centers] updatedModel = callMLlibFunc( "updateStreamingKMeansModel", vectorCenters, self._clusterWeights, data, decayFactor, timeUnit) self.centers = array(updatedModel[0]) self._clusterWeights = list(updatedModel[1]) return self
<SYSTEM_TASK:> Set number of batches after which the centroids of that <END_TASK> <USER_TASK:> Description: def setHalfLife(self, halfLife, timeUnit): """ Set number of batches after which the centroids of that particular batch has half the weightage. """
self._timeUnit = timeUnit self._decayFactor = exp(log(0.5) / halfLife) return self
<SYSTEM_TASK:> Set initial centers. Should be set before calling trainOn. <END_TASK> <USER_TASK:> Description: def setInitialCenters(self, centers, weights): """ Set initial centers. Should be set before calling trainOn. """
self._model = StreamingKMeansModel(centers, weights) return self
<SYSTEM_TASK:> Set the initial centres to be random samples from <END_TASK> <USER_TASK:> Description: def setRandomCenters(self, dim, weight, seed): """ Set the initial centres to be random samples from a gaussian population with constant weights. """
rng = random.RandomState(seed) clusterCenters = rng.randn(self._k, dim) clusterWeights = tile(weight, self._k) self._model = StreamingKMeansModel(clusterCenters, clusterWeights) return self
<SYSTEM_TASK:> Train the model on the incoming dstream. <END_TASK> <USER_TASK:> Description: def trainOn(self, dstream): """Train the model on the incoming dstream."""
self._validate(dstream) def update(rdd): self._model.update(rdd, self._decayFactor, self._timeUnit) dstream.foreachRDD(update)
<SYSTEM_TASK:> Make predictions on a keyed dstream. <END_TASK> <USER_TASK:> Description: def predictOnValues(self, dstream): """ Make predictions on a keyed dstream. Returns a transformed dstream object. """
self._validate(dstream) return dstream.mapValues(lambda x: self._model.predict(x))
<SYSTEM_TASK:> Return the topics described by weighted terms. <END_TASK> <USER_TASK:> Description: def describeTopics(self, maxTermsPerTopic=None): """Return the topics described by weighted terms. WARNING: If vocabSize and k are large, this can return a large object! :param maxTermsPerTopic: Maximum number of terms to collect for each topic. (default: vocabulary size) :return: Array over topics. Each topic is represented as a pair of matching arrays: (term indices, term weights in topic). Each topic's terms are sorted in order of decreasing weight. """
if maxTermsPerTopic is None: topics = self.call("describeTopics") else: topics = self.call("describeTopics", maxTermsPerTopic) return topics
<SYSTEM_TASK:> Load the LDAModel from disk. <END_TASK> <USER_TASK:> Description: def load(cls, sc, path): """Load the LDAModel from disk. :param sc: SparkContext. :param path: Path to where the model is stored. """
if not isinstance(sc, SparkContext): raise TypeError("sc should be a SparkContext, got type %s" % type(sc)) if not isinstance(path, basestring): raise TypeError("path should be a basestring, got type %s" % type(path)) model = callMLlibFunc("loadLDAModel", sc, path) return LDAModel(model)
<SYSTEM_TASK:> Train a LDA model. <END_TASK> <USER_TASK:> Description: def train(cls, rdd, k=10, maxIterations=20, docConcentration=-1.0, topicConcentration=-1.0, seed=None, checkpointInterval=10, optimizer="em"): """Train a LDA model. :param rdd: RDD of documents, which are tuples of document IDs and term (word) count vectors. The term count vectors are "bags of words" with a fixed-size vocabulary (where the vocabulary size is the length of the vector). Document IDs must be unique and >= 0. :param k: Number of topics to infer, i.e., the number of soft cluster centers. (default: 10) :param maxIterations: Maximum number of iterations allowed. (default: 20) :param docConcentration: Concentration parameter (commonly named "alpha") for the prior placed on documents' distributions over topics ("theta"). (default: -1.0) :param topicConcentration: Concentration parameter (commonly named "beta" or "eta") for the prior placed on topics' distributions over terms. (default: -1.0) :param seed: Random seed for cluster initialization. Set as None to generate seed based on system time. (default: None) :param checkpointInterval: Period (in iterations) between checkpoints. (default: 10) :param optimizer: LDAOptimizer used to perform the actual calculation. Currently "em", "online" are supported. (default: "em") """
model = callMLlibFunc("trainLDAModel", rdd, k, maxIterations, docConcentration, topicConcentration, seed, checkpointInterval, optimizer) return LDAModel(model)
<SYSTEM_TASK:> Convert Python object into Java <END_TASK> <USER_TASK:> Description: def _py2java(sc, obj): """ Convert Python object into Java """
if isinstance(obj, RDD): obj = _to_java_object_rdd(obj) elif isinstance(obj, DataFrame): obj = obj._jdf elif isinstance(obj, SparkContext): obj = obj._jsc elif isinstance(obj, list): obj = [_py2java(sc, x) for x in obj] elif isinstance(obj, JavaObject): pass elif isinstance(obj, (int, long, float, bool, bytes, unicode)): pass else: data = bytearray(PickleSerializer().dumps(obj)) obj = sc._jvm.org.apache.spark.mllib.api.python.SerDe.loads(data) return obj
<SYSTEM_TASK:> A decorator that makes a class inherit documentation from its parents. <END_TASK> <USER_TASK:> Description: def inherit_doc(cls): """ A decorator that makes a class inherit documentation from its parents. """
for name, func in vars(cls).items(): # only inherit docstring for public functions if name.startswith("_"): continue if not func.__doc__: for parent in cls.__bases__: parent_func = getattr(parent, name, None) if parent_func and getattr(parent_func, "__doc__", None): func.__doc__ = parent_func.__doc__ break return cls
<SYSTEM_TASK:> Return a new DStream in which each RDD has a single element <END_TASK> <USER_TASK:> Description: def count(self): """ Return a new DStream in which each RDD has a single element generated by counting each RDD of this DStream. """
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
<SYSTEM_TASK:> Return a new DStream containing only the elements that satisfy predicate. <END_TASK> <USER_TASK:> Description: def filter(self, f): """ Return a new DStream containing only the elements that satisfy predicate. """
def func(iterator): return filter(f, iterator) return self.mapPartitions(func, True)
<SYSTEM_TASK:> Return a new DStream by applying a function to each element of DStream. <END_TASK> <USER_TASK:> Description: def map(self, f, preservesPartitioning=False): """ Return a new DStream by applying a function to each element of DStream. """
def func(iterator): return map(f, iterator) return self.mapPartitions(func, preservesPartitioning)
<SYSTEM_TASK:> Return a new DStream in which each RDD has a single element <END_TASK> <USER_TASK:> Description: def reduce(self, func): """ Return a new DStream in which each RDD has a single element generated by reducing each RDD of this DStream. """
return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
<SYSTEM_TASK:> Return a copy of the DStream in which each RDD are partitioned <END_TASK> <USER_TASK:> Description: def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the DStream in which each RDD are partitioned using the specified partitioner. """
return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
<SYSTEM_TASK:> Apply a function to each RDD in this DStream. <END_TASK> <USER_TASK:> Description: def foreachRDD(self, func): """ Apply a function to each RDD in this DStream. """
if func.__code__.co_argcount == 1: old_func = func func = lambda t, rdd: old_func(rdd) jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer) api = self._ssc._jvm.PythonDStream api.callForeachRDD(self._jdstream, jfunc)
<SYSTEM_TASK:> Print the first num elements of each RDD generated in this DStream. <END_TASK> <USER_TASK:> Description: def pprint(self, num=10): """ Print the first num elements of each RDD generated in this DStream. @param num: the number of elements from the first will be printed. """
def takeAndPrint(time, rdd): taken = rdd.take(num + 1) print("-------------------------------------------") print("Time: %s" % time) print("-------------------------------------------") for record in taken[:num]: print(record) if len(taken) > num: print("...") print("") self.foreachRDD(takeAndPrint)
<SYSTEM_TASK:> Persist the RDDs of this DStream with the given storage level <END_TASK> <USER_TASK:> Description: def persist(self, storageLevel): """ Persist the RDDs of this DStream with the given storage level """
self.is_cached = True javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel) self._jdstream.persist(javaStorageLevel) return self
<SYSTEM_TASK:> Enable periodic checkpointing of RDDs of this DStream <END_TASK> <USER_TASK:> Description: def checkpoint(self, interval): """ Enable periodic checkpointing of RDDs of this DStream @param interval: time in seconds, after each period of that, generated RDD will be checkpointed """
self.is_checkpointed = True self._jdstream.checkpoint(self._ssc._jduration(interval)) return self
<SYSTEM_TASK:> Return a new DStream in which each RDD contains the counts of each <END_TASK> <USER_TASK:> Description: def countByValue(self): """ Return a new DStream in which each RDD contains the counts of each distinct value in each RDD of this DStream. """
return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
<SYSTEM_TASK:> Save each RDD in this DStream as at text file, using string <END_TASK> <USER_TASK:> Description: def saveAsTextFiles(self, prefix, suffix=None): """ Save each RDD in this DStream as at text file, using string representation of elements. """
def saveAsTextFile(t, rdd): path = rddToFileName(prefix, suffix, t) try: rdd.saveAsTextFile(path) except Py4JJavaError as e: # after recovered from checkpointing, the foreachRDD may # be called twice if 'FileAlreadyExistsException' not in str(e): raise return self.foreachRDD(saveAsTextFile)
<SYSTEM_TASK:> Return a new DStream in which each RDD is generated by applying a function <END_TASK> <USER_TASK:> Description: def transform(self, func): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream. `func` can have one argument of `rdd`, or have two arguments of (`time`, `rdd`) """
if func.__code__.co_argcount == 1: oldfunc = func func = lambda t, rdd: oldfunc(rdd) assert func.__code__.co_argcount == 2, "func should take one or two arguments" return TransformedDStream(self, func)
<SYSTEM_TASK:> Return a new DStream in which each RDD is generated by applying a function <END_TASK> <USER_TASK:> Description: def transformWith(self, func, other, keepSerializer=False): """ Return a new DStream in which each RDD is generated by applying a function on each RDD of this DStream and 'other' DStream. `func` can have two arguments of (`rdd_a`, `rdd_b`) or have three arguments of (`time`, `rdd_a`, `rdd_b`) """
if func.__code__.co_argcount == 2: oldfunc = func func = lambda t, a, b: oldfunc(a, b) assert func.__code__.co_argcount == 3, "func should take two or three arguments" jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer) dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(), other._jdstream.dstream(), jfunc) jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
<SYSTEM_TASK:> Return a new DStream by unifying data of another DStream with this DStream. <END_TASK> <USER_TASK:> Description: def union(self, other): """ Return a new DStream by unifying data of another DStream with this DStream. @param other: Another DStream having the same interval (i.e., slideDuration) as this DStream. """
if self._slideDuration != other._slideDuration: raise ValueError("the two DStream should have same slide duration") return self.transformWith(lambda a, b: a.union(b), other, True)
<SYSTEM_TASK:> Return a new DStream by applying 'cogroup' between RDDs of this <END_TASK> <USER_TASK:> Description: def cogroup(self, other, numPartitions=None): """ Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. """
if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
<SYSTEM_TASK:> Convert datetime or unix_timestamp into Time <END_TASK> <USER_TASK:> Description: def _jtime(self, timestamp): """ Convert datetime or unix_timestamp into Time """
if isinstance(timestamp, datetime): timestamp = time.mktime(timestamp.timetuple()) return self._sc._jvm.Time(long(timestamp * 1000))
<SYSTEM_TASK:> Return a new DStream in which each RDD contains all the elements in seen in a <END_TASK> <USER_TASK:> Description: def window(self, windowDuration, slideDuration=None): """ Return a new DStream in which each RDD contains all the elements in seen in a sliding window of time over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """
self._validate_window_param(windowDuration, slideDuration) d = self._ssc._jduration(windowDuration) if slideDuration is None: return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer) s = self._ssc._jduration(slideDuration) return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
<SYSTEM_TASK:> Return a new DStream in which each RDD has a single element generated by reducing all <END_TASK> <USER_TASK:> Description: def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration): """ Return a new DStream in which each RDD has a single element generated by reducing all elements in a sliding window over this DStream. if `invReduceFunc` is not None, the reduction is done incrementally using the old window's reduced value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) This is more efficient than `invReduceFunc` is None. @param reduceFunc: associative and commutative reduce function @param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y, and invertible x: `invReduceFunc(reduceFunc(x, y), x) = y` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval """
keyed = self.map(lambda x: (1, x)) reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, 1) return reduced.map(lambda kv: kv[1])
<SYSTEM_TASK:> Return a new DStream in which each RDD contains the count of distinct elements in <END_TASK> <USER_TASK:> Description: def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None): """ Return a new DStream in which each RDD contains the count of distinct elements in RDDs in a sliding window over this DStream. @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. """
keyed = self.map(lambda x: (x, 1)) counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub, windowDuration, slideDuration, numPartitions) return counted.filter(lambda kv: kv[1] > 0)
<SYSTEM_TASK:> Return a new DStream by applying incremental `reduceByKey` over a sliding window. <END_TASK> <USER_TASK:> Description: def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None, numPartitions=None, filterFunc=None): """ Return a new DStream by applying incremental `reduceByKey` over a sliding window. The reduced value of over a new window is calculated using the old window's reduce value : 1. reduce the new values that entered the window (e.g., adding new counts) 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts) `invFunc` can be None, then it will reduce all the RDDs in window, could be slower than having `invFunc`. @param func: associative and commutative reduce function @param invFunc: inverse function of `reduceFunc` @param windowDuration: width of the window; must be a multiple of this DStream's batching interval @param slideDuration: sliding interval of the window (i.e., the interval after which the new DStream will generate RDDs); must be a multiple of this DStream's batching interval @param numPartitions: number of partitions of each RDD in the new DStream. @param filterFunc: function to filter expired key-value pairs; only pairs that satisfy the function are retained set this to null if you do not want to filter """
self._validate_window_param(windowDuration, slideDuration) if numPartitions is None: numPartitions = self._sc.defaultParallelism reduced = self.reduceByKey(func, numPartitions) if invFunc: def reduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) r = a.union(b).reduceByKey(func, numPartitions) if a else b if filterFunc: r = r.filter(filterFunc) return r def invReduceFunc(t, a, b): b = b.reduceByKey(func, numPartitions) joined = a.leftOuterJoin(b, numPartitions) return joined.mapValues(lambda kv: invFunc(kv[0], kv[1]) if kv[1] is not None else kv[0]) jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer) jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer) if slideDuration is None: slideDuration = self._slideDuration dstream = self._sc._jvm.PythonReducedWindowedDStream( reduced._jdstream.dstream(), jreduceFunc, jinvReduceFunc, self._ssc._jduration(windowDuration), self._ssc._jduration(slideDuration)) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer) else: return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
<SYSTEM_TASK:> Return a new "state" DStream where the state for each key is updated by applying <END_TASK> <USER_TASK:> Description: def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None): """ Return a new "state" DStream where the state for each key is updated by applying the given function on the previous state of the key and the new values of the key. @param updateFunc: State update function. If this function returns None, then corresponding state key-value pair will be eliminated. """
if numPartitions is None: numPartitions = self._sc.defaultParallelism if initialRDD and not isinstance(initialRDD, RDD): initialRDD = self._sc.parallelize(initialRDD) def reduceFunc(t, a, b): if a is None: g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None)) else: g = a.cogroup(b.partitionBy(numPartitions), numPartitions) g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None)) state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1])) return state.filter(lambda k_v: k_v[1] is not None) jreduceFunc = TransformFunction(self._sc, reduceFunc, self._sc.serializer, self._jrdd_deserializer) if initialRDD: initialRDD = initialRDD._reserialize(self._jrdd_deserializer) dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc, initialRDD._jrdd) else: dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc) return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
<SYSTEM_TASK:> Return a CallSite representing the first Spark call in the current call stack. <END_TASK> <USER_TASK:> Description: def first_spark_call(): """ Return a CallSite representing the first Spark call in the current call stack. """
tb = traceback.extract_stack() if len(tb) == 0: return None file, line, module, what = tb[len(tb) - 1] sparkpath = os.path.dirname(file) first_spark_frame = len(tb) - 1 for i in range(0, len(tb)): file, line, fun, what = tb[i] if file.startswith(sparkpath): first_spark_frame = i break if first_spark_frame == 0: file, line, fun, what = tb[0] return CallSite(function=fun, file=file, linenum=line) sfile, sline, sfun, swhat = tb[first_spark_frame] ufile, uline, ufun, uwhat = tb[first_spark_frame - 1] return CallSite(function=sfun, file=ufile, linenum=uline)
<SYSTEM_TASK:> Parse a line of text into an MLlib LabeledPoint object. <END_TASK> <USER_TASK:> Description: def parsePoint(line): """ Parse a line of text into an MLlib LabeledPoint object. """
values = [float(s) for s in line.split(' ')] if values[0] == -1: # Convert -1 labels to 0 for MLlib values[0] = 0 return LabeledPoint(values[0], values[1:])
<SYSTEM_TASK:> When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. <END_TASK> <USER_TASK:> Description: def _to_corrected_pandas_type(dt): """ When converting Spark SQL records to Pandas DataFrame, the inferred data type may be wrong. This method gets the corrected data type for Pandas if that type may be inferred uncorrectly. """
import numpy as np if type(dt) == ByteType: return np.int8 elif type(dt) == ShortType: return np.int16 elif type(dt) == IntegerType: return np.int32 elif type(dt) == FloatType: return np.float32 else: return None
<SYSTEM_TASK:> Prints the first ``n`` rows to the console. <END_TASK> <USER_TASK:> Description: def show(self, n=20, truncate=True, vertical=False): """Prints the first ``n`` rows to the console. :param n: Number of rows to show. :param truncate: If set to True, truncate strings longer than 20 chars by default. If set to a number greater than one, truncates long strings to length ``truncate`` and align cells right. :param vertical: If set to True, print output rows vertically (one line per column value). >>> df DataFrame[age: int, name: string] >>> df.show() +---+-----+ |age| name| +---+-----+ | 2|Alice| | 5| Bob| +---+-----+ >>> df.show(truncate=3) +---+----+ |age|name| +---+----+ | 2| Ali| | 5| Bob| +---+----+ >>> df.show(vertical=True) -RECORD 0----- age | 2 name | Alice -RECORD 1----- age | 5 name | Bob """
if isinstance(truncate, bool) and truncate: print(self._jdf.showString(n, 20, vertical)) else: print(self._jdf.showString(n, int(truncate), vertical))
<SYSTEM_TASK:> Returns a dataframe with html code when you enabled eager evaluation <END_TASK> <USER_TASK:> Description: def _repr_html_(self): """Returns a dataframe with html code when you enabled eager evaluation by 'spark.sql.repl.eagerEval.enabled', this only called by REPL you are using support eager evaluation with HTML. """
import cgi if not self._support_repr_html: self._support_repr_html = True if self.sql_ctx._conf.isReplEagerEvalEnabled(): max_num_rows = max(self.sql_ctx._conf.replEagerEvalMaxNumRows(), 0) sock_info = self._jdf.getRowsToPython( max_num_rows, self.sql_ctx._conf.replEagerEvalTruncate()) rows = list(_load_from_socket(sock_info, BatchedSerializer(PickleSerializer()))) head = rows[0] row_data = rows[1:] has_more_data = len(row_data) > max_num_rows row_data = row_data[:max_num_rows] html = "<table border='1'>\n" # generate table head html += "<tr><th>%s</th></tr>\n" % "</th><th>".join(map(lambda x: cgi.escape(x), head)) # generate table rows for row in row_data: html += "<tr><td>%s</td></tr>\n" % "</td><td>".join( map(lambda x: cgi.escape(x), row)) html += "</table>\n" if has_more_data: html += "only showing top %d %s\n" % ( max_num_rows, "row" if max_num_rows == 1 else "rows") return html else: return None
<SYSTEM_TASK:> Returns a locally checkpointed version of this Dataset. Checkpointing can be used to <END_TASK> <USER_TASK:> Description: def localCheckpoint(self, eager=True): """Returns a locally checkpointed version of this Dataset. Checkpointing can be used to truncate the logical plan of this DataFrame, which is especially useful in iterative algorithms where the plan may grow exponentially. Local checkpoints are stored in the executors using the caching subsystem and therefore they are not reliable. :param eager: Whether to checkpoint this DataFrame immediately .. note:: Experimental """
jdf = self._jdf.localCheckpoint(eager) return DataFrame(jdf, self.sql_ctx)
<SYSTEM_TASK:> Specifies some hint on the current DataFrame. <END_TASK> <USER_TASK:> Description: def hint(self, name, *parameters): """Specifies some hint on the current DataFrame. :param name: A name of the hint. :param parameters: Optional parameters. :return: :class:`DataFrame` >>> df.join(df2.hint("broadcast"), "name").show() +----+---+------+ |name|age|height| +----+---+------+ | Bob| 5| 85| +----+---+------+ """
if len(parameters) == 1 and isinstance(parameters[0], list): parameters = parameters[0] if not isinstance(name, str): raise TypeError("name should be provided as str, got {0}".format(type(name))) allowed_types = (basestring, list, float, int) for p in parameters: if not isinstance(p, allowed_types): raise TypeError( "all parameters should be in {0}, got {1} of type {2}".format( allowed_types, p, type(p))) jdf = self._jdf.hint(name, self._jseq(parameters)) return DataFrame(jdf, self.sql_ctx)
<SYSTEM_TASK:> Limits the result count to the number specified. <END_TASK> <USER_TASK:> Description: def limit(self, num): """Limits the result count to the number specified. >>> df.limit(1).collect() [Row(age=2, name=u'Alice')] >>> df.limit(0).collect() [] """
jdf = self._jdf.limit(num) return DataFrame(jdf, self.sql_ctx)
<SYSTEM_TASK:> Returns a stratified sample without replacement based on the <END_TASK> <USER_TASK:> Description: def sampleBy(self, col, fractions, seed=None): """ Returns a stratified sample without replacement based on the fraction given on each stratum. :param col: column that defines strata :param fractions: sampling fraction for each stratum. If a stratum is not specified, we treat its fraction as zero. :param seed: random seed :return: a new DataFrame that represents the stratified sample >>> from pyspark.sql.functions import col >>> dataset = sqlContext.range(0, 100).select((col("id") % 3).alias("key")) >>> sampled = dataset.sampleBy("key", fractions={0: 0.1, 1: 0.2}, seed=0) >>> sampled.groupBy("key").count().orderBy("key").show() +---+-----+ |key|count| +---+-----+ | 0| 3| | 1| 6| +---+-----+ >>> dataset.sampleBy(col("key"), fractions={2: 1.0}, seed=0).count() 33 .. versionchanged:: 3.0 Added sampling by a column of :class:`Column` """
if isinstance(col, basestring): col = Column(col) elif not isinstance(col, Column): raise ValueError("col must be a string or a column, but got %r" % type(col)) if not isinstance(fractions, dict): raise ValueError("fractions must be a dict but got %r" % type(fractions)) for k, v in fractions.items(): if not isinstance(k, (float, int, long, basestring)): raise ValueError("key must be float, int, long, or string, but got %r" % type(k)) fractions[k] = float(v) col = col._jc seed = seed if seed is not None else random.randint(0, sys.maxsize) return DataFrame(self._jdf.stat().sampleBy(col, self._jmap(fractions), seed), self.sql_ctx)
<SYSTEM_TASK:> Returns all column names and their data types as a list. <END_TASK> <USER_TASK:> Description: def dtypes(self): """Returns all column names and their data types as a list. >>> df.dtypes [('age', 'int'), ('name', 'string')] """
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]
<SYSTEM_TASK:> Return a JVM Seq of Columns from a list of Column or names <END_TASK> <USER_TASK:> Description: def _jseq(self, cols, converter=None): """Return a JVM Seq of Columns from a list of Column or names"""
return _to_seq(self.sql_ctx._sc, cols, converter)