text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return a JVM Seq of Columns from a list of Column or column names
<END_TASK>
<USER_TASK:>
Description:
def _jcols(self, *cols):
"""Return a JVM Seq of Columns from a list of Column or column names
If `cols` has only one list in it, cols[0] will be used as the list.
""" |
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return self._jseq(cols, _to_java_column) |
<SYSTEM_TASK:>
Return a JVM Seq of Columns that describes the sort order
<END_TASK>
<USER_TASK:>
Description:
def _sort_cols(self, cols, kwargs):
""" Return a JVM Seq of Columns that describes the sort order
""" |
if not cols:
raise ValueError("should sort by at least one column")
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jcols = [_to_java_column(c) for c in cols]
ascending = kwargs.get('ascending', True)
if isinstance(ascending, (bool, int)):
if not ascending:
jcols = [jc.desc() for jc in jcols]
elif isinstance(ascending, list):
jcols = [jc if asc else jc.desc()
for asc, jc in zip(ascending, jcols)]
else:
raise TypeError("ascending can only be boolean or list, but got %s" % type(ascending))
return self._jseq(jcols) |
<SYSTEM_TASK:>
Computes basic statistics for numeric and string columns.
<END_TASK>
<USER_TASK:>
Description:
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
""" |
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx) |
<SYSTEM_TASK:>
Returns the first ``n`` rows.
<END_TASK>
<USER_TASK:>
Description:
def head(self, n=None):
"""Returns the first ``n`` rows.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
:param n: int, default 1. Number of rows to return.
:return: If n is greater than 1, return a list of :class:`Row`.
If n is 1, return a single Row.
>>> df.head()
Row(age=2, name=u'Alice')
>>> df.head(1)
[Row(age=2, name=u'Alice')]
""" |
if n is None:
rs = self.head(1)
return rs[0] if rs else None
return self.take(n) |
<SYSTEM_TASK:>
Filters rows using the given condition.
<END_TASK>
<USER_TASK:>
Description:
def filter(self, condition):
"""Filters rows using the given condition.
:func:`where` is an alias for :func:`filter`.
:param condition: a :class:`Column` of :class:`types.BooleanType`
or a string of SQL expression.
>>> df.filter(df.age > 3).collect()
[Row(age=5, name=u'Bob')]
>>> df.where(df.age == 2).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter("age > 3").collect()
[Row(age=5, name=u'Bob')]
>>> df.where("age = 2").collect()
[Row(age=2, name=u'Alice')]
""" |
if isinstance(condition, basestring):
jdf = self._jdf.filter(condition)
elif isinstance(condition, Column):
jdf = self._jdf.filter(condition._jc)
else:
raise TypeError("condition should be string or Column")
return DataFrame(jdf, self.sql_ctx) |
<SYSTEM_TASK:>
Calculates the approximate quantiles of numerical columns of a
<END_TASK>
<USER_TASK:>
Description:
def approxQuantile(self, col, probabilities, relativeError):
"""
Calculates the approximate quantiles of numerical columns of a
DataFrame.
The result of this algorithm has the following deterministic bound:
If the DataFrame has N elements and if we request the quantile at
probability `p` up to error `err`, then the algorithm will return
a sample `x` from the DataFrame so that the *exact* rank of `x` is
close to (p * N). More precisely,
floor((p - err) * N) <= rank(x) <= ceil((p + err) * N).
This method implements a variation of the Greenwald-Khanna
algorithm (with some speed optimizations). The algorithm was first
present in [[https://doi.org/10.1145/375663.375670
Space-efficient Online Computation of Quantile Summaries]]
by Greenwald and Khanna.
Note that null values will be ignored in numerical columns before calculation.
For columns only containing null values, an empty list is returned.
:param col: str, list.
Can be a single column name, or a list of names for multiple columns.
:param probabilities: a list of quantile probabilities
Each number must belong to [0, 1].
For example 0 is the minimum, 0.5 is the median, 1 is the maximum.
:param relativeError: The relative target precision to achieve
(>= 0). If set to zero, the exact quantiles are computed, which
could be very expensive. Note that values greater than 1 are
accepted but give the same result as 1.
:return: the approximate quantiles at the given probabilities. If
the input `col` is a string, the output is a list of floats. If the
input `col` is a list or tuple of strings, the output is also a
list, but each element in it is a list of floats, i.e., the output
is a list of list of floats.
.. versionchanged:: 2.2
Added support for multiple columns.
""" |
if not isinstance(col, (basestring, list, tuple)):
raise ValueError("col should be a string, list or tuple, but got %r" % type(col))
isStr = isinstance(col, basestring)
if isinstance(col, tuple):
col = list(col)
elif isStr:
col = [col]
for c in col:
if not isinstance(c, basestring):
raise ValueError("columns should be strings, but got %r" % type(c))
col = _to_list(self._sc, col)
if not isinstance(probabilities, (list, tuple)):
raise ValueError("probabilities should be a list or tuple")
if isinstance(probabilities, tuple):
probabilities = list(probabilities)
for p in probabilities:
if not isinstance(p, (float, int, long)) or p < 0 or p > 1:
raise ValueError("probabilities should be numerical (float, int, long) in [0,1].")
probabilities = _to_list(self._sc, probabilities)
if not isinstance(relativeError, (float, int, long)) or relativeError < 0:
raise ValueError("relativeError should be numerical (float, int, long) >= 0.")
relativeError = float(relativeError)
jaq = self._jdf.stat().approxQuantile(col, probabilities, relativeError)
jaq_list = [list(j) for j in jaq]
return jaq_list[0] if isStr else jaq_list |
<SYSTEM_TASK:>
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
<END_TASK>
<USER_TASK:>
Description:
def _collectAsArrow(self):
"""
Returns all records as a list of ArrowRecordBatches, pyarrow must be installed
and available on driver and worker Python environments.
.. note:: Experimental.
""" |
with SCCallSiteSync(self._sc) as css:
sock_info = self._jdf.collectAsArrowToPython()
# Collect list of un-ordered batches where last element is a list of correct order indices
results = list(_load_from_socket(sock_info, ArrowCollectSerializer()))
batches = results[:-1]
batch_order = results[-1]
# Re-order the batch list using the correct order
return [batches[i] for i in batch_order] |
<SYSTEM_TASK:>
Returns a list of function information via JVM. Sorts wrapped expression infos by name
<END_TASK>
<USER_TASK:>
Description:
def _list_function_infos(jvm):
"""
Returns a list of function information via JVM. Sorts wrapped expression infos by name
and returns them.
""" |
jinfos = jvm.org.apache.spark.sql.api.python.PythonSQLUtils.listBuiltinFunctionInfos()
infos = []
for jinfo in jinfos:
name = jinfo.getName()
usage = jinfo.getUsage()
usage = usage.replace("_FUNC_", name) if usage is not None else usage
infos.append(ExpressionInfo(
className=jinfo.getClassName(),
name=name,
usage=usage,
arguments=jinfo.getArguments().replace("_FUNC_", name),
examples=jinfo.getExamples().replace("_FUNC_", name),
note=jinfo.getNote(),
since=jinfo.getSince(),
deprecated=jinfo.getDeprecated()))
return sorted(infos, key=lambda i: i.name) |
<SYSTEM_TASK:>
Makes the usage description pretty and returns a formatted string if `usage`
<END_TASK>
<USER_TASK:>
Description:
def _make_pretty_usage(usage):
"""
Makes the usage description pretty and returns a formatted string if `usage`
is not an empty string. Otherwise, returns None.
""" |
if usage is not None and usage.strip() != "":
usage = "\n".join(map(lambda u: u.strip(), usage.split("\n")))
return "%s\n\n" % usage |
<SYSTEM_TASK:>
Makes the arguments description pretty and returns a formatted string if `arguments`
<END_TASK>
<USER_TASK:>
Description:
def _make_pretty_arguments(arguments):
"""
Makes the arguments description pretty and returns a formatted string if `arguments`
starts with the argument prefix. Otherwise, returns None.
Expected input:
Arguments:
* arg0 - ...
...
* arg0 - ...
...
Expected output:
**Arguments:**
* arg0 - ...
...
* arg0 - ...
...
""" |
if arguments.startswith("\n Arguments:"):
arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:]))
return "**Arguments:**\n\n%s\n\n" % arguments |
<SYSTEM_TASK:>
Makes the examples description pretty and returns a formatted string if `examples`
<END_TASK>
<USER_TASK:>
Description:
def _make_pretty_examples(examples):
"""
Makes the examples description pretty and returns a formatted string if `examples`
starts with the example prefix. Otherwise, returns None.
Expected input:
Examples:
> SELECT ...;
...
> SELECT ...;
...
Expected output:
**Examples:**
```
> SELECT ...;
...
> SELECT ...;
...
```
""" |
if examples.startswith("\n Examples:"):
examples = "\n".join(map(lambda u: u[6:], examples.strip().split("\n")[1:]))
return "**Examples:**\n\n```\n%s\n```\n\n" % examples |
<SYSTEM_TASK:>
Makes the note description pretty and returns a formatted string if `note` is not
<END_TASK>
<USER_TASK:>
Description:
def _make_pretty_note(note):
"""
Makes the note description pretty and returns a formatted string if `note` is not
an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Note:**
...
""" |
if note != "":
note = "\n".join(map(lambda n: n[4:], note.split("\n")))
return "**Note:**\n%s\n" % note |
<SYSTEM_TASK:>
Makes the deprecated description pretty and returns a formatted string if `deprecated`
<END_TASK>
<USER_TASK:>
Description:
def _make_pretty_deprecated(deprecated):
"""
Makes the deprecated description pretty and returns a formatted string if `deprecated`
is not an empty string. Otherwise, returns None.
Expected input:
...
Expected output:
**Deprecated:**
...
""" |
if deprecated != "":
deprecated = "\n".join(map(lambda n: n[4:], deprecated.split("\n")))
return "**Deprecated:**\n%s\n" % deprecated |
<SYSTEM_TASK:>
Train a logistic regression model on the given data.
<END_TASK>
<USER_TASK:>
Description:
def train(cls, data, iterations=100, initialWeights=None, regParam=0.0, regType="l2",
intercept=False, corrections=10, tolerance=1e-6, validateData=True, numClasses=2):
"""
Train a logistic regression model on the given data.
:param data:
The training data, an RDD of LabeledPoint.
:param iterations:
The number of iterations.
(default: 100)
:param initialWeights:
The initial weights.
(default: None)
:param regParam:
The regularizer parameter.
(default: 0.0)
:param regType:
The type of regularizer used for training our model.
Supported values:
- "l1" for using L1 regularization
- "l2" for using L2 regularization (default)
- None for no regularization
:param intercept:
Boolean parameter which indicates the use or not of the
augmented representation for training data (i.e., whether bias
features are activated or not).
(default: False)
:param corrections:
The number of corrections used in the LBFGS update.
If a known updater is used for binary classification,
it calls the ml implementation and this parameter will
have no effect. (default: 10)
:param tolerance:
The convergence tolerance of iterations for L-BFGS.
(default: 1e-6)
:param validateData:
Boolean parameter which indicates if the algorithm should
validate data before training.
(default: True)
:param numClasses:
The number of classes (i.e., outcomes) a label can take in
Multinomial Logistic Regression.
(default: 2)
>>> data = [
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> lrm = LogisticRegressionWithLBFGS.train(sc.parallelize(data), iterations=10)
>>> lrm.predict([1.0, 0.0])
1
>>> lrm.predict([0.0, 1.0])
0
""" |
def train(rdd, i):
return callMLlibFunc("trainLogisticRegressionModelWithLBFGS", rdd, int(iterations), i,
float(regParam), regType, bool(intercept), int(corrections),
float(tolerance), bool(validateData), int(numClasses))
if initialWeights is None:
if numClasses == 2:
initialWeights = [0.0] * len(data.first().features)
else:
if intercept:
initialWeights = [0.0] * (len(data.first().features) + 1) * (numClasses - 1)
else:
initialWeights = [0.0] * len(data.first().features) * (numClasses - 1)
return _regression_train_wrapper(train, LogisticRegressionModel, data, initialWeights) |
<SYSTEM_TASK:>
Push item onto heap, maintaining the heap invariant.
<END_TASK>
<USER_TASK:>
Description:
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant.""" |
heap.append(item)
_siftdown(heap, 0, len(heap)-1) |
<SYSTEM_TASK:>
Pop the smallest item off the heap, maintaining the heap invariant.
<END_TASK>
<USER_TASK:>
Description:
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant.""" |
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt |
<SYSTEM_TASK:>
Pop and return the current smallest value, and add the new item.
<END_TASK>
<USER_TASK:>
Description:
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
""" |
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem |
<SYSTEM_TASK:>
Fast version of a heappush followed by a heappop.
<END_TASK>
<USER_TASK:>
Description:
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop.""" |
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item |
<SYSTEM_TASK:>
Maxheap version of a heappop.
<END_TASK>
<USER_TASK:>
Description:
def _heappop_max(heap):
"""Maxheap version of a heappop.""" |
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt |
<SYSTEM_TASK:>
Maxheap version of a heappop followed by a heappush.
<END_TASK>
<USER_TASK:>
Description:
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush.""" |
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem |
<SYSTEM_TASK:>
Find the n smallest elements in a dataset.
<END_TASK>
<USER_TASK:>
Description:
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
""" |
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top = result[0][0]
order += 1
result.sort()
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order += 1
result.sort()
return [r[2] for r in result] |
<SYSTEM_TASK:>
Find the n largest elements in a dataset.
<END_TASK>
<USER_TASK:>
Description:
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
""" |
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[0] for r in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top = result[0][0]
order -= 1
result.sort(reverse=True)
return [r[2] for r in result] |
<SYSTEM_TASK:>
Compute the correlation matrix with specified method using dataset.
<END_TASK>
<USER_TASK:>
Description:
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
""" |
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args)) |
<SYSTEM_TASK:>
Given a list of metrics, provides a builder that it turns computes metrics from a column.
<END_TASK>
<USER_TASK:>
Description:
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- variance: a vector tha contains the coefficient-wise variance.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
""" |
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js) |
<SYSTEM_TASK:>
Returns an aggregate object that contains the summary of the column with the requested
<END_TASK>
<USER_TASK:>
Description:
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
:param featuresCol:
a column that contains features Vector object.
:param weightCol:
a column that contains weight value. Default weight is 1.0.
:return:
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
""" |
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc)) |
<SYSTEM_TASK:>
Builds and returns all combinations of parameters specified
<END_TASK>
<USER_TASK:>
Description:
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
""" |
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)] |
<SYSTEM_TASK:>
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
<END_TASK>
<USER_TASK:>
Description:
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
""" |
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator |
<SYSTEM_TASK:>
Given a Java CrossValidator, create and return a Python wrapper of it.
<END_TASK>
<USER_TASK:>
Description:
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
""" |
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage |
<SYSTEM_TASK:>
Transfer this instance to a Java CrossValidator. Used for ML persistence.
<END_TASK>
<USER_TASK:>
Description:
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
""" |
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
return _java_obj |
<SYSTEM_TASK:>
Creates a copy of this instance with a randomly generated uid
<END_TASK>
<USER_TASK:>
Description:
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
""" |
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
subModels = self.subModels
return CrossValidatorModel(bestModel, avgMetrics, subModels) |
<SYSTEM_TASK:>
Creates a copy of this instance with a randomly generated uid
<END_TASK>
<USER_TASK:>
Description:
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
""" |
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS |
<SYSTEM_TASK:>
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
<END_TASK>
<USER_TASK:>
Description:
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
""" |
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage |
<SYSTEM_TASK:>
Creates a copy of this instance with a randomly generated uid
<END_TASK>
<USER_TASK:>
Description:
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
""" |
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
subModels = self.subModels
return TrainValidationSplitModel(bestModel, validationMetrics, subModels) |
<SYSTEM_TASK:>
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
<END_TASK>
<USER_TASK:>
Description:
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
""" |
# Load information from java_stage to the instance.
bestModel = JavaParams._from_java(java_stage.bestModel())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel).setEstimator(estimator)
py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
if java_stage.hasSubModels():
py_stage.subModels = [JavaParams._from_java(sub_model)
for sub_model in java_stage.subModels()]
py_stage._resetUid(java_stage.uid())
return py_stage |
<SYSTEM_TASK:>
Returns the value of Spark runtime configuration property for the given key,
<END_TASK>
<USER_TASK:>
Description:
def get(self, key, default=_NoValue):
"""Returns the value of Spark runtime configuration property for the given key,
assuming it is set.
""" |
self._checkType(key, "key")
if default is _NoValue:
return self._jconf.get(key)
else:
if default is not None:
self._checkType(default, "default")
return self._jconf.get(key, default) |
<SYSTEM_TASK:>
Assert that an object is of type str.
<END_TASK>
<USER_TASK:>
Description:
def _checkType(self, obj, identifier):
"""Assert that an object is of type str.""" |
if not isinstance(obj, basestring):
raise TypeError("expected %s '%s' to be a string (was '%s')" %
(identifier, obj, type(obj).__name__)) |
<SYSTEM_TASK:>
Wrap the deprecated function to print out deprecation warnings
<END_TASK>
<USER_TASK:>
Description:
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings""" |
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_) |
<SYSTEM_TASK:>
Marks a DataFrame as small enough for use in broadcast joins.
<END_TASK>
<USER_TASK:>
Description:
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins.""" |
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx) |
<SYSTEM_TASK:>
Returns col1 if it is not NaN, or col2 if col1 is NaN.
<END_TASK>
<USER_TASK:>
Description:
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2))) |
<SYSTEM_TASK:>
Parses the expression string into the column that it represents
<END_TASK>
<USER_TASK:>
Description:
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str)) |
<SYSTEM_TASK:>
Returns the first argument-based logarithm of the second argument.
<END_TASK>
<USER_TASK:>
Description:
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
""" |
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc) |
<SYSTEM_TASK:>
Convert a number in a string column from one base to another.
<END_TASK>
<USER_TASK:>
Description:
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase)) |
<SYSTEM_TASK:>
Returns the date that is `days` days after `start`
<END_TASK>
<USER_TASK:>
Description:
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days)) |
<SYSTEM_TASK:>
Returns the number of days from `start` to `end`.
<END_TASK>
<USER_TASK:>
Description:
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start))) |
<SYSTEM_TASK:>
Returns the date that is `months` months after `start`
<END_TASK>
<USER_TASK:>
Description:
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months)) |
<SYSTEM_TASK:>
Returns timestamp truncated to the unit specified by the format.
<END_TASK>
<USER_TASK:>
Description:
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp))) |
<SYSTEM_TASK:>
Returns the first date which is later than the value of the date column.
<END_TASK>
<USER_TASK:>
Description:
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek)) |
<SYSTEM_TASK:>
Returns the last day of the month which the given date belongs to.
<END_TASK>
<USER_TASK:>
Description:
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date))) |
<SYSTEM_TASK:>
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
<END_TASK>
<USER_TASK:>
Description:
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
""" |
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz)) |
<SYSTEM_TASK:>
Calculates the hash code of given columns, and returns the result as an int column.
<END_TASK>
<USER_TASK:>
Description:
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc) |
<SYSTEM_TASK:>
Concatenates multiple input string columns together into a single string column,
<END_TASK>
<USER_TASK:>
Description:
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column))) |
<SYSTEM_TASK:>
Formats the arguments in printf-style and returns the result as a string column.
<END_TASK>
<USER_TASK:>
Description:
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column))) |
<SYSTEM_TASK:>
Locate the position of the first occurrence of substr column in the given string.
<END_TASK>
<USER_TASK:>
Description:
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr)) |
<SYSTEM_TASK:>
Substring starts at `pos` and is of length `len` when str is String type or
<END_TASK>
<USER_TASK:>
Description:
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len)) |
<SYSTEM_TASK:>
Computes the Levenshtein distance of the two given strings.
<END_TASK>
<USER_TASK:>
Description:
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc) |
<SYSTEM_TASK:>
Locate the position of the first occurrence of substr in a string column, after position pos.
<END_TASK>
<USER_TASK:>
Description:
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos)) |
<SYSTEM_TASK:>
Repeats a string column n times, and returns it as a new string column.
<END_TASK>
<USER_TASK:>
Description:
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n)) |
<SYSTEM_TASK:>
Splits str around matches of the given pattern.
<END_TASK>
<USER_TASK:>
Description:
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit)) |
<SYSTEM_TASK:>
r"""Extract a specific group matched by a Java regex, from the specified string column.
<END_TASK>
<USER_TASK:>
Description:
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc) |
<SYSTEM_TASK:>
r"""Replace all substrings of the specified string value that match regexp with rep.
<END_TASK>
<USER_TASK:>
Description:
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc) |
<SYSTEM_TASK:>
A function translate any character in the `srcCol` by a character in `matching`.
<END_TASK>
<USER_TASK:>
Description:
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace)) |
<SYSTEM_TASK:>
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
<END_TASK>
<USER_TASK:>
Description:
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
""" |
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement)) |
<SYSTEM_TASK:>
Concatenates multiple input columns together into a single column.
<END_TASK>
<USER_TASK:>
Description:
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
""" |
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column))) |
<SYSTEM_TASK:>
Returns a new row for each element in the given array or map.
<END_TASK>
<USER_TASK:>
Description:
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc) |
<SYSTEM_TASK:>
Extracts json object from a json string based on json path specified, and returns json string
<END_TASK>
<USER_TASK:>
Description:
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc) |
<SYSTEM_TASK:>
Creates a new row for a json column according to the given field names.
<END_TASK>
<USER_TASK:>
Description:
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
""" |
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc) |
<SYSTEM_TASK:>
Parses a JSON string and infers its schema in DDL format.
<END_TASK>
<USER_TASK:>
Description:
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
""" |
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, options)
return Column(jc) |
<SYSTEM_TASK:>
Parses a CSV string and infers its schema in DDL format.
<END_TASK>
<USER_TASK:>
Description:
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
""" |
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, options)
return Column(jc) |
<SYSTEM_TASK:>
Returns the union of all the given maps.
<END_TASK>
<USER_TASK:>
Description:
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
""" |
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc) |
<SYSTEM_TASK:>
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
<END_TASK>
<USER_TASK:>
Description:
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
""" |
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step))) |
<SYSTEM_TASK:>
Parses a column containing a CSV string to a row with the specified schema.
<END_TASK>
<USER_TASK:>
Description:
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
""" |
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc) |
<SYSTEM_TASK:>
Specifies the input data source format.
<END_TASK>
<USER_TASK:>
Description:
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
""" |
self._jreader = self._jreader.format(source)
return self |
<SYSTEM_TASK:>
Specifies the input schema.
<END_TASK>
<USER_TASK:>
Description:
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
""" |
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self |
<SYSTEM_TASK:>
Adds an input option for the underlying data source.
<END_TASK>
<USER_TASK:>
Description:
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
""" |
self._jreader = self._jreader.option(key, to_str(value))
return self |
<SYSTEM_TASK:>
Adds input options for the underlying data source.
<END_TASK>
<USER_TASK:>
Description:
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
""" |
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self |
<SYSTEM_TASK:>
Specifies the behavior when data or table already exists.
<END_TASK>
<USER_TASK:>
Description:
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
""" |
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self |
<SYSTEM_TASK:>
Specifies the underlying output data source.
<END_TASK>
<USER_TASK:>
Description:
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
""" |
self._jwrite = self._jwrite.format(source)
return self |
<SYSTEM_TASK:>
Adds an output option for the underlying data source.
<END_TASK>
<USER_TASK:>
Description:
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
""" |
self._jwrite = self._jwrite.option(key, to_str(value))
return self |
<SYSTEM_TASK:>
Adds output options for the underlying data source.
<END_TASK>
<USER_TASK:>
Description:
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
""" |
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self |
<SYSTEM_TASK:>
Partitions the output by the given columns on the file system.
<END_TASK>
<USER_TASK:>
Description:
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
""" |
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self |
<SYSTEM_TASK:>
Sorts the output in each bucket by the given columns on the file system.
<END_TASK>
<USER_TASK:>
Description:
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
""" |
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self |
<SYSTEM_TASK:>
Saves the content of the DataFrame in a text file at the specified path.
<END_TASK>
<USER_TASK:>
Description:
def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
The text files will be encoded as UTF-8.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
""" |
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path) |
<SYSTEM_TASK:>
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
<END_TASK>
<USER_TASK:>
Description:
def choose_jira_assignee(issue, asf_jira):
"""
Prompt the user to choose who to assign the issue to in jira, given a list of candidates,
including the original reporter and all commentors
""" |
while True:
try:
reporter = issue.fields.reporter
commentors = map(lambda x: x.author, issue.fields.comment.comments)
candidates = set(commentors)
candidates.add(reporter)
candidates = list(candidates)
print("JIRA is unassigned, choose assignee")
for idx, author in enumerate(candidates):
if author.key == "apachespark":
continue
annotations = ["Reporter"] if author == reporter else []
if author in commentors:
annotations.append("Commentor")
print("[%d] %s (%s)" % (idx, author.displayName, ",".join(annotations)))
raw_assignee = input(
"Enter number of user, or userid, to assign to (blank to leave unassigned):")
if raw_assignee == "":
return None
else:
try:
id = int(raw_assignee)
assignee = candidates[id]
except:
# assume it's a user id, and try to assign (might fail, we just prompt again)
assignee = asf_jira.user(raw_assignee)
asf_jira.assign_issue(issue.key, assignee.key)
return assignee
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
print("Error assigning JIRA, try again (or leave blank and fix manually)") |
<SYSTEM_TASK:>
Converts a LabeledPoint to a string in LIBSVM format.
<END_TASK>
<USER_TASK:>
Description:
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format.""" |
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items) |
<SYSTEM_TASK:>
Load labeled points saved using RDD.saveAsTextFile.
<END_TASK>
<USER_TASK:>
Description:
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
@return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
""" |
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions) |
<SYSTEM_TASK:>
Train an isotonic regression model on the given data.
<END_TASK>
<USER_TASK:>
Description:
def train(cls, data, isotonic=True):
"""
Train an isotonic regression model on the given data.
:param data:
RDD of (label, feature, weight) tuples.
:param isotonic:
Whether this is isotonic (which is default) or antitonic.
(default: True)
""" |
boundaries, predictions = callMLlibFunc("trainIsotonicRegressionModel",
data.map(_convert_to_vector), bool(isotonic))
return IsotonicRegressionModel(boundaries.toArray(), predictions.toArray(), isotonic) |
<SYSTEM_TASK:>
Compute similarities between columns of this matrix.
<END_TASK>
<USER_TASK:>
Description:
def columnSimilarities(self, threshold=0.0):
"""
Compute similarities between columns of this matrix.
The threshold parameter is a trade-off knob between estimate
quality and computational cost.
The default threshold setting of 0 guarantees deterministically
correct results, but uses the brute-force approach of computing
normalized dot products.
Setting the threshold to positive values uses a sampling
approach and incurs strictly less computational cost than the
brute-force approach. However the similarities computed will
be estimates.
The sampling guarantees relative-error correctness for those
pairs of columns that have similarity greater than the given
similarity threshold.
To describe the guarantee, we set some notation:
* Let A be the smallest in magnitude non-zero element of
this matrix.
* Let B be the largest in magnitude non-zero element of
this matrix.
* Let L be the maximum number of non-zeros per row.
For example, for {0,1} matrices: A=B=1.
Another example, for the Netflix matrix: A=1, B=5
For those column pairs that are above the threshold, the
computed similarity is correct to within 20% relative error
with probability at least 1 - (0.981)^10/B^
The shuffle size is bounded by the *smaller* of the following
two expressions:
* O(n log(n) L / (threshold * A))
* O(m L^2^)
The latter is the cost of the brute-force approach, so for
non-zero thresholds, the cost is always cheaper than the
brute-force approach.
:param: threshold: Set to 0 for deterministic guaranteed
correctness. Similarities above this
threshold are estimated with the cost vs
estimate quality trade-off described above.
:return: An n x n sparse upper-triangular CoordinateMatrix of
cosine similarities between columns of this matrix.
>>> rows = sc.parallelize([[1, 2], [1, 5]])
>>> mat = RowMatrix(rows)
>>> sims = mat.columnSimilarities()
>>> sims.entries.first().value
0.91914503...
""" |
java_sims_mat = self._java_matrix_wrapper.call("columnSimilarities", float(threshold))
return CoordinateMatrix(java_sims_mat) |
<SYSTEM_TASK:>
Compute the QR decomposition of this RowMatrix.
<END_TASK>
<USER_TASK:>
Description:
def tallSkinnyQR(self, computeQ=False):
"""
Compute the QR decomposition of this RowMatrix.
The implementation is designed to optimize the QR decomposition
(factorization) for the RowMatrix of a tall and skinny shape.
Reference:
Paul G. Constantine, David F. Gleich. "Tall and skinny QR
factorizations in MapReduce architectures"
([[https://doi.org/10.1145/1996092.1996103]])
:param: computeQ: whether to computeQ
:return: QRDecomposition(Q: RowMatrix, R: Matrix), where
Q = None if computeQ = false.
>>> rows = sc.parallelize([[3, -6], [4, -8], [0, 1]])
>>> mat = RowMatrix(rows)
>>> decomp = mat.tallSkinnyQR(True)
>>> Q = decomp.Q
>>> R = decomp.R
>>> # Test with absolute values
>>> absQRows = Q.rows.map(lambda row: abs(row.toArray()).tolist())
>>> absQRows.collect()
[[0.6..., 0.0], [0.8..., 0.0], [0.0, 1.0]]
>>> # Test with absolute values
>>> abs(R.toArray()).tolist()
[[5.0, 10.0], [0.0, 1.0]]
""" |
decomp = JavaModelWrapper(self._java_matrix_wrapper.call("tallSkinnyQR", computeQ))
if computeQ:
java_Q = decomp.call("Q")
Q = RowMatrix(java_Q)
else:
Q = None
R = decomp.call("R")
return QRDecomposition(Q, R) |
<SYSTEM_TASK:>
Computes the singular value decomposition of the RowMatrix.
<END_TASK>
<USER_TASK:>
Description:
def computeSVD(self, k, computeU=False, rCond=1e-9):
"""
Computes the singular value decomposition of the RowMatrix.
The given row matrix A of dimension (m X n) is decomposed into
U * s * V'T where
* U: (m X k) (left singular vectors) is a RowMatrix whose
columns are the eigenvectors of (A X A')
* s: DenseVector consisting of square root of the eigenvalues
(singular values) in descending order.
* v: (n X k) (right singular vectors) is a Matrix whose columns
are the eigenvectors of (A' X A)
For more specific details on implementation, please refer
the Scala documentation.
:param k: Number of leading singular values to keep (`0 < k <= n`).
It might return less than k if there are numerically zero singular values
or there are not enough Ritz values converged before the maximum number of
Arnoldi update iterations is reached (in case that matrix A is ill-conditioned).
:param computeU: Whether or not to compute U. If set to be
True, then U is computed by A * V * s^-1
:param rCond: Reciprocal condition number. All singular values
smaller than rCond * s[0] are treated as zero
where s[0] is the largest singular value.
:returns: :py:class:`SingularValueDecomposition`
>>> rows = sc.parallelize([[3, 1, 1], [-1, 3, 1]])
>>> rm = RowMatrix(rows)
>>> svd_model = rm.computeSVD(2, True)
>>> svd_model.U.rows.collect()
[DenseVector([-0.7071, 0.7071]), DenseVector([-0.7071, -0.7071])]
>>> svd_model.s
DenseVector([3.4641, 3.1623])
>>> svd_model.V
DenseMatrix(3, 2, [-0.4082, -0.8165, -0.4082, 0.8944, -0.4472, 0.0], 0)
""" |
j_model = self._java_matrix_wrapper.call(
"computeSVD", int(k), bool(computeU), float(rCond))
return SingularValueDecomposition(j_model) |
<SYSTEM_TASK:>
Returns a distributed matrix whose columns are the left
<END_TASK>
<USER_TASK:>
Description:
def U(self):
"""
Returns a distributed matrix whose columns are the left
singular vectors of the SingularValueDecomposition if computeU was set to be True.
""" |
u = self.call("U")
if u is not None:
mat_name = u.getClass().getSimpleName()
if mat_name == "RowMatrix":
return RowMatrix(u)
elif mat_name == "IndexedRowMatrix":
return IndexedRowMatrix(u)
else:
raise TypeError("Expected RowMatrix/IndexedRowMatrix got %s" % mat_name) |
<SYSTEM_TASK:>
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
<END_TASK>
<USER_TASK:>
Description:
def rows(self):
"""
Rows of the IndexedRowMatrix stored as an RDD of IndexedRows.
>>> mat = IndexedRowMatrix(sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(1, [4, 5, 6])]))
>>> rows = mat.rows
>>> rows.first()
IndexedRow(0, [1.0,2.0,3.0])
""" |
# We use DataFrames for serialization of IndexedRows from
# Java, so we first convert the RDD of rows to a DataFrame
# on the Scala/Java side. Then we map each Row in the
# DataFrame back to an IndexedRow on this side.
rows_df = callMLlibFunc("getIndexedRows", self._java_matrix_wrapper._java_model)
rows = rows_df.rdd.map(lambda row: IndexedRow(row[0], row[1]))
return rows |
<SYSTEM_TASK:>
Convert this matrix to a BlockMatrix.
<END_TASK>
<USER_TASK:>
Description:
def toBlockMatrix(self, rowsPerBlock=1024, colsPerBlock=1024):
"""
Convert this matrix to a BlockMatrix.
:param rowsPerBlock: Number of rows that make up each block.
The blocks forming the final rows are not
required to have the given number of rows.
:param colsPerBlock: Number of columns that make up each block.
The blocks forming the final columns are not
required to have the given number of columns.
>>> rows = sc.parallelize([IndexedRow(0, [1, 2, 3]),
... IndexedRow(6, [4, 5, 6])])
>>> mat = IndexedRowMatrix(rows).toBlockMatrix()
>>> # This IndexedRowMatrix will have 7 effective rows, due to
>>> # the highest row index being 6, and the ensuing
>>> # BlockMatrix will have 7 rows as well.
>>> print(mat.numRows())
7
>>> print(mat.numCols())
3
""" |
java_block_matrix = self._java_matrix_wrapper.call("toBlockMatrix",
rowsPerBlock,
colsPerBlock)
return BlockMatrix(java_block_matrix, rowsPerBlock, colsPerBlock) |
<SYSTEM_TASK:>
Persists the underlying RDD with the specified storage level.
<END_TASK>
<USER_TASK:>
Description:
def persist(self, storageLevel):
"""
Persists the underlying RDD with the specified storage level.
""" |
if not isinstance(storageLevel, StorageLevel):
raise TypeError("`storageLevel` should be a StorageLevel, got %s" % type(storageLevel))
javaStorageLevel = self._java_matrix_wrapper._sc._getJavaStorageLevel(storageLevel)
self._java_matrix_wrapper.call("persist", javaStorageLevel)
return self |
<SYSTEM_TASK:>
Adds two block matrices together. The matrices must have the
<END_TASK>
<USER_TASK:>
Description:
def add(self, other):
"""
Adds two block matrices together. The matrices must have the
same size and matching `rowsPerBlock` and `colsPerBlock` values.
If one of the sub matrix blocks that are being added is a
SparseMatrix, the resulting sub matrix block will also be a
SparseMatrix, even if it is being added to a DenseMatrix. If
two dense sub matrix blocks are added, the output block will
also be a DenseMatrix.
>>> dm1 = Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])
>>> dm2 = Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12])
>>> sm = Matrices.sparse(3, 2, [0, 1, 3], [0, 1, 2], [7, 11, 12])
>>> blocks1 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks2 = sc.parallelize([((0, 0), dm1), ((1, 0), dm2)])
>>> blocks3 = sc.parallelize([((0, 0), sm), ((1, 0), dm2)])
>>> mat1 = BlockMatrix(blocks1, 3, 2)
>>> mat2 = BlockMatrix(blocks2, 3, 2)
>>> mat3 = BlockMatrix(blocks3, 3, 2)
>>> mat1.add(mat2).toLocalMatrix()
DenseMatrix(6, 2, [2.0, 4.0, 6.0, 14.0, 16.0, 18.0, 8.0, 10.0, 12.0, 20.0, 22.0, 24.0], 0)
>>> mat1.add(mat3).toLocalMatrix()
DenseMatrix(6, 2, [8.0, 2.0, 3.0, 14.0, 16.0, 18.0, 4.0, 16.0, 18.0, 20.0, 22.0, 24.0], 0)
""" |
if not isinstance(other, BlockMatrix):
raise TypeError("Other should be a BlockMatrix, got %s" % type(other))
other_java_block_matrix = other._java_matrix_wrapper._java_model
java_block_matrix = self._java_matrix_wrapper.call("add", other_java_block_matrix)
return BlockMatrix(java_block_matrix, self.rowsPerBlock, self.colsPerBlock) |
<SYSTEM_TASK:>
Transpose this BlockMatrix. Returns a new BlockMatrix
<END_TASK>
<USER_TASK:>
Description:
def transpose(self):
"""
Transpose this BlockMatrix. Returns a new BlockMatrix
instance sharing the same underlying data. Is a lazy operation.
>>> blocks = sc.parallelize([((0, 0), Matrices.dense(3, 2, [1, 2, 3, 4, 5, 6])),
... ((1, 0), Matrices.dense(3, 2, [7, 8, 9, 10, 11, 12]))])
>>> mat = BlockMatrix(blocks, 3, 2)
>>> mat_transposed = mat.transpose()
>>> mat_transposed.toLocalMatrix()
DenseMatrix(2, 6, [1.0, 4.0, 2.0, 5.0, 3.0, 6.0, 7.0, 10.0, 8.0, 11.0, 9.0, 12.0], 0)
""" |
java_transposed_matrix = self._java_matrix_wrapper.call("transpose")
return BlockMatrix(java_transposed_matrix, self.colsPerBlock, self.rowsPerBlock) |
<SYSTEM_TASK:>
Returns the size of the vector.
<END_TASK>
<USER_TASK:>
Description:
def _vector_size(v):
"""
Returns the size of the vector.
>>> _vector_size([1., 2., 3.])
3
>>> _vector_size((1., 2., 3.))
3
>>> _vector_size(array.array('d', [1., 2., 3.]))
3
>>> _vector_size(np.zeros(3))
3
>>> _vector_size(np.zeros((3, 1)))
3
>>> _vector_size(np.zeros((1, 3)))
Traceback (most recent call last):
...
ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
""" |
if isinstance(v, Vector):
return len(v)
elif type(v) in (array.array, list, tuple, xrange):
return len(v)
elif type(v) == np.ndarray:
if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1):
return len(v)
else:
raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape))
elif _have_scipy and scipy.sparse.issparse(v):
assert v.shape[1] == 1, "Expected column vector"
return v.shape[0]
else:
raise TypeError("Cannot treat type %s as a vector" % type(v)) |
<SYSTEM_TASK:>
Parse string representation back into the DenseVector.
<END_TASK>
<USER_TASK:>
Description:
def parse(s):
"""
Parse string representation back into the DenseVector.
>>> DenseVector.parse(' [ 0.0,1.0,2.0, 3.0]')
DenseVector([0.0, 1.0, 2.0, 3.0])
""" |
start = s.find('[')
if start == -1:
raise ValueError("Array should start with '['.")
end = s.find(']')
if end == -1:
raise ValueError("Array should end with ']'.")
s = s[start + 1: end]
try:
values = [float(val) for val in s.split(',') if val]
except ValueError:
raise ValueError("Unable to parse values from %s" % s)
return DenseVector(values) |
<SYSTEM_TASK:>
Squared distance of two Vectors.
<END_TASK>
<USER_TASK:>
Description:
def squared_distance(self, other):
"""
Squared distance of two Vectors.
>>> dense1 = DenseVector(array.array('d', [1., 2.]))
>>> dense1.squared_distance(dense1)
0.0
>>> dense2 = np.array([2., 1.])
>>> dense1.squared_distance(dense2)
2.0
>>> dense3 = [2., 1.]
>>> dense1.squared_distance(dense3)
2.0
>>> sparse1 = SparseVector(2, [0, 1], [2., 1.])
>>> dense1.squared_distance(sparse1)
2.0
>>> dense1.squared_distance([1.,])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> dense1.squared_distance(SparseVector(1, [0,], [1.,]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
""" |
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, SparseVector):
return other.squared_distance(self)
elif _have_scipy and scipy.sparse.issparse(other):
return _convert_to_vector(other).squared_distance(self)
if isinstance(other, Vector):
other = other.toArray()
elif not isinstance(other, np.ndarray):
other = np.array(other)
diff = self.toArray() - other
return np.dot(diff, diff) |
<SYSTEM_TASK:>
Parse string representation back into the SparseVector.
<END_TASK>
<USER_TASK:>
Description:
def parse(s):
"""
Parse string representation back into the SparseVector.
>>> SparseVector.parse(' (4, [0,1 ],[ 4.0,5.0] )')
SparseVector(4, {0: 4.0, 1: 5.0})
""" |
start = s.find('(')
if start == -1:
raise ValueError("Tuple should start with '('")
end = s.find(')')
if end == -1:
raise ValueError("Tuple should end with ')'")
s = s[start + 1: end].strip()
size = s[: s.find(',')]
try:
size = int(size)
except ValueError:
raise ValueError("Cannot parse size %s." % size)
ind_start = s.find('[')
if ind_start == -1:
raise ValueError("Indices array should start with '['.")
ind_end = s.find(']')
if ind_end == -1:
raise ValueError("Indices array should end with ']'")
new_s = s[ind_start + 1: ind_end]
ind_list = new_s.split(',')
try:
indices = [int(ind) for ind in ind_list if ind]
except ValueError:
raise ValueError("Unable to parse indices from %s." % new_s)
s = s[ind_end + 1:].strip()
val_start = s.find('[')
if val_start == -1:
raise ValueError("Values array should start with '['.")
val_end = s.find(']')
if val_end == -1:
raise ValueError("Values array should end with ']'.")
val_list = s[val_start + 1: val_end].split(',')
try:
values = [float(val) for val in val_list if val]
except ValueError:
raise ValueError("Unable to parse values from %s." % s)
return SparseVector(size, indices, values) |
<SYSTEM_TASK:>
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
<END_TASK>
<USER_TASK:>
Description:
def dot(self, other):
"""
Dot product with a SparseVector or 1- or 2-dimensional Numpy array.
>>> a = SparseVector(4, [1, 3], [3.0, 4.0])
>>> a.dot(a)
25.0
>>> a.dot(array.array('d', [1., 2., 3., 4.]))
22.0
>>> b = SparseVector(4, [2], [1.0])
>>> a.dot(b)
0.0
>>> a.dot(np.array([[1, 1], [2, 2], [3, 3], [4, 4]]))
array([ 22., 22.])
>>> a.dot([1., 2., 3.])
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.array([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(DenseVector([1., 2.]))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
>>> a.dot(np.zeros((3, 2)))
Traceback (most recent call last):
...
AssertionError: dimension mismatch
""" |
if isinstance(other, np.ndarray):
if other.ndim not in [2, 1]:
raise ValueError("Cannot call dot with %d-dimensional array" % other.ndim)
assert len(self) == other.shape[0], "dimension mismatch"
return np.dot(self.values, other[self.indices])
assert len(self) == _vector_size(other), "dimension mismatch"
if isinstance(other, DenseVector):
return np.dot(other.array[self.indices], self.values)
elif isinstance(other, SparseVector):
# Find out common indices.
self_cmind = np.in1d(self.indices, other.indices, assume_unique=True)
self_values = self.values[self_cmind]
if self_values.size == 0:
return 0.0
else:
other_cmind = np.in1d(other.indices, self.indices, assume_unique=True)
return np.dot(self_values, other.values[other_cmind])
else:
return self.dot(_convert_to_vector(other)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.