text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Squared distance from a SparseVector or 1-dimensional NumPy array. <END_TASK> <USER_TASK:> Description: def squared_distance(self, other): """ Squared distance from a SparseVector or 1-dimensional NumPy array. >>> a = SparseVector(4, [1, 3], [3.0, 4.0]) >>> a.squared_distance(a) 0.0 >>> a.squared_distance(array.array('d', [1., 2., 3., 4.])) 11.0 >>> a.squared_distance(np.array([1., 2., 3., 4.])) 11.0 >>> b = SparseVector(4, [2], [1.0]) >>> a.squared_distance(b) 26.0 >>> b.squared_distance(a) 26.0 >>> b.squared_distance([1., 2.]) Traceback (most recent call last): ... AssertionError: dimension mismatch >>> b.squared_distance(SparseVector(3, [1,], [1.0,])) Traceback (most recent call last): ... AssertionError: dimension mismatch """
assert len(self) == _vector_size(other), "dimension mismatch" if isinstance(other, np.ndarray) or isinstance(other, DenseVector): if isinstance(other, np.ndarray) and other.ndim != 1: raise Exception("Cannot call squared_distance with %d-dimensional array" % other.ndim) if isinstance(other, DenseVector): other = other.array sparse_ind = np.zeros(other.size, dtype=bool) sparse_ind[self.indices] = True dist = other[sparse_ind] - self.values result = np.dot(dist, dist) other_ind = other[~sparse_ind] result += np.dot(other_ind, other_ind) return result elif isinstance(other, SparseVector): result = 0.0 i, j = 0, 0 while i < len(self.indices) and j < len(other.indices): if self.indices[i] == other.indices[j]: diff = self.values[i] - other.values[j] result += diff * diff i += 1 j += 1 elif self.indices[i] < other.indices[j]: result += self.values[i] * self.values[i] i += 1 else: result += other.values[j] * other.values[j] j += 1 while i < len(self.indices): result += self.values[i] * self.values[i] i += 1 while j < len(other.indices): result += other.values[j] * other.values[j] j += 1 return result else: return self.squared_distance(_convert_to_vector(other))
<SYSTEM_TASK:> Returns a copy of this SparseVector as a 1-dimensional NumPy array. <END_TASK> <USER_TASK:> Description: def toArray(self): """ Returns a copy of this SparseVector as a 1-dimensional NumPy array. """
arr = np.zeros((self.size,), dtype=np.float64) arr[self.indices] = self.values return arr
<SYSTEM_TASK:> Convert this vector to the new mllib-local representation. <END_TASK> <USER_TASK:> Description: def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0 """
return newlinalg.SparseVector(self.size, self.indices, self.values)
<SYSTEM_TASK:> Create a dense vector of 64-bit floats from a Python list or numbers. <END_TASK> <USER_TASK:> Description: def dense(*elements): """ Create a dense vector of 64-bit floats from a Python list or numbers. >>> Vectors.dense([1, 2, 3]) DenseVector([1.0, 2.0, 3.0]) >>> Vectors.dense(1.0, 2.0) DenseVector([1.0, 2.0]) """
if len(elements) == 1 and not isinstance(elements[0], (float, int, long)): # it's list, numpy.array or other iterable object. elements = elements[0] return DenseVector(elements)
<SYSTEM_TASK:> Convert a vector from the new mllib-local representation. <END_TASK> <USER_TASK:> Description: def fromML(vec): """ Convert a vector from the new mllib-local representation. This does NOT copy the data; it copies references. :param vec: a :py:class:`pyspark.ml.linalg.Vector` :return: a :py:class:`pyspark.mllib.linalg.Vector` .. versionadded:: 2.0.0 """
if isinstance(vec, newlinalg.DenseVector): return DenseVector(vec.array) elif isinstance(vec, newlinalg.SparseVector): return SparseVector(vec.size, vec.indices, vec.values) else: raise TypeError("Unsupported vector type %s" % type(vec))
<SYSTEM_TASK:> Squared distance between two vectors. <END_TASK> <USER_TASK:> Description: def squared_distance(v1, v2): """ Squared distance between two vectors. a and b can be of type SparseVector, DenseVector, np.ndarray or array.array. >>> a = Vectors.sparse(4, [(0, 1), (3, 4)]) >>> b = Vectors.dense([2, 5, 4, 1]) >>> a.squared_distance(b) 51.0 """
v1, v2 = _convert_to_vector(v1), _convert_to_vector(v2) return v1.squared_distance(v2)
<SYSTEM_TASK:> Parse a string representation back into the Vector. <END_TASK> <USER_TASK:> Description: def parse(s): """Parse a string representation back into the Vector. >>> Vectors.parse('[2,1,2 ]') DenseVector([2.0, 1.0, 2.0]) >>> Vectors.parse(' ( 100, [0], [2])') SparseVector(100, {0: 2.0}) """
if s.find('(') == -1 and s.find('[') != -1: return DenseVector.parse(s) elif s.find('(') != -1: return SparseVector.parse(s) else: raise ValueError( "Cannot find tokens '[' or '(' from the input string.")
<SYSTEM_TASK:> Convert Matrix attributes which are array-like or buffer to array. <END_TASK> <USER_TASK:> Description: def _convert_to_array(array_like, dtype): """ Convert Matrix attributes which are array-like or buffer to array. """
if isinstance(array_like, bytes): return np.frombuffer(array_like, dtype=dtype) return np.asarray(array_like, dtype=dtype)
<SYSTEM_TASK:> Convert a matrix from the new mllib-local representation. <END_TASK> <USER_TASK:> Description: def fromML(mat): """ Convert a matrix from the new mllib-local representation. This does NOT copy the data; it copies references. :param mat: a :py:class:`pyspark.ml.linalg.Matrix` :return: a :py:class:`pyspark.mllib.linalg.Matrix` .. versionadded:: 2.0.0 """
if isinstance(mat, newlinalg.DenseMatrix): return DenseMatrix(mat.numRows, mat.numCols, mat.values, mat.isTransposed) elif isinstance(mat, newlinalg.SparseMatrix): return SparseMatrix(mat.numRows, mat.numCols, mat.colPtrs, mat.rowIndices, mat.values, mat.isTransposed) else: raise TypeError("Unsupported matrix type %s" % type(mat))
<SYSTEM_TASK:> Construct the model directly from an array of label strings, <END_TASK> <USER_TASK:> Description: def from_labels(cls, labels, inputCol, outputCol=None, handleInvalid=None): """ Construct the model directly from an array of label strings, requires an active SparkContext. """
sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(labels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCol(inputCol) if outputCol is not None: model.setOutputCol(outputCol) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
<SYSTEM_TASK:> Construct the model directly from an array of array of label strings, <END_TASK> <USER_TASK:> Description: def from_arrays_of_labels(cls, arrayOfLabels, inputCols, outputCols=None, handleInvalid=None): """ Construct the model directly from an array of array of label strings, requires an active SparkContext. """
sc = SparkContext._active_spark_context java_class = sc._gateway.jvm.java.lang.String jlabels = StringIndexerModel._new_java_array(arrayOfLabels, java_class) model = StringIndexerModel._create_from_java_class( "org.apache.spark.ml.feature.StringIndexerModel", jlabels) model.setInputCols(inputCols) if outputCols is not None: model.setOutputCols(outputCols) if handleInvalid is not None: model.setHandleInvalid(handleInvalid) return model
<SYSTEM_TASK:> Hook an exception handler into Py4j, which could capture some SQL exceptions in Java. <END_TASK> <USER_TASK:> Description: def install_exception_handler(): """ Hook an exception handler into Py4j, which could capture some SQL exceptions in Java. When calling Java API, it will call `get_return_value` to parse the returned object. If any exception happened in JVM, the result will be Java exception object, it raise py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that could capture the Java exception and throw a Python one (with the same error message). It's idempotent, could be called multiple times. """
original = py4j.protocol.get_return_value # The original `get_return_value` is not patched, it's idempotent. patched = capture_sql_exception(original) # only patch the one used in py4j.java_gateway (call Java API) py4j.java_gateway.get_return_value = patched
<SYSTEM_TASK:> Raise ImportError if minimum version of Pandas is not installed <END_TASK> <USER_TASK:> Description: def require_minimum_pandas_version(): """ Raise ImportError if minimum version of Pandas is not installed """
# TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pandas_version = "0.19.2" from distutils.version import LooseVersion try: import pandas have_pandas = True except ImportError: have_pandas = False if not have_pandas: raise ImportError("Pandas >= %s must be installed; however, " "it was not found." % minimum_pandas_version) if LooseVersion(pandas.__version__) < LooseVersion(minimum_pandas_version): raise ImportError("Pandas >= %s must be installed; however, " "your version was %s." % (minimum_pandas_version, pandas.__version__))
<SYSTEM_TASK:> Raise ImportError if minimum version of pyarrow is not installed <END_TASK> <USER_TASK:> Description: def require_minimum_pyarrow_version(): """ Raise ImportError if minimum version of pyarrow is not installed """
# TODO(HyukjinKwon): Relocate and deduplicate the version specification. minimum_pyarrow_version = "0.12.1" from distutils.version import LooseVersion try: import pyarrow have_arrow = True except ImportError: have_arrow = False if not have_arrow: raise ImportError("PyArrow >= %s must be installed; however, " "it was not found." % minimum_pyarrow_version) if LooseVersion(pyarrow.__version__) < LooseVersion(minimum_pyarrow_version): raise ImportError("PyArrow >= %s must be installed; however, " "your version was %s." % (minimum_pyarrow_version, pyarrow.__version__))
<SYSTEM_TASK:> Performs the authentication protocol defined by the SocketAuthHelper class on the given <END_TASK> <USER_TASK:> Description: def _do_server_auth(conn, auth_secret): """ Performs the authentication protocol defined by the SocketAuthHelper class on the given file-like object 'conn'. """
write_with_length(auth_secret.encode("utf-8"), conn) conn.flush() reply = UTF8Deserializer().loads(conn) if reply != "ok": conn.close() raise Exception("Unexpected reply from iterator server.")
<SYSTEM_TASK:> Start callback server if not already started. The callback server is needed if the Java <END_TASK> <USER_TASK:> Description: def ensure_callback_server_started(gw): """ Start callback server if not already started. The callback server is needed if the Java driver process needs to callback into the Python driver process to execute Python code. """
# getattr will fallback to JVM, so we cannot test by hasattr() if "_callback_server" not in gw.__dict__ or gw._callback_server is None: gw.callback_server_parameters.eager_load = True gw.callback_server_parameters.daemonize = True gw.callback_server_parameters.daemonize_connections = True gw.callback_server_parameters.port = 0 gw.start_callback_server(gw.callback_server_parameters) cbport = gw._callback_server.server_socket.getsockname()[1] gw._callback_server.port = cbport # gateway with real port gw._python_proxy_port = gw._callback_server.port # get the GatewayServer object in JVM by ID jgws = JavaObject("GATEWAY_SERVER", gw._gateway_client) # update the port of CallbackClient with real port jgws.resetCallbackClient(jgws.getCallbackClient().getAddress(), gw._python_proxy_port)
<SYSTEM_TASK:> Calculates URL contributions to the rank of other URLs. <END_TASK> <USER_TASK:> Description: def computeContribs(urls, rank): """Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls) for url in urls: yield (url, rank / num_urls)
<SYSTEM_TASK:> Returns the image schema. <END_TASK> <USER_TASK:> Description: def imageSchema(self): """ Returns the image schema. :return: a :class:`StructType` with a single column of images named "image" (nullable) and having the same type returned by :meth:`columnSchema`. .. versionadded:: 2.3.0 """
if self._imageSchema is None: ctx = SparkContext._active_spark_context jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageSchema() self._imageSchema = _parse_datatype_json_string(jschema.json()) return self._imageSchema
<SYSTEM_TASK:> Returns the OpenCV type mapping supported. <END_TASK> <USER_TASK:> Description: def ocvTypes(self): """ Returns the OpenCV type mapping supported. :return: a dictionary containing the OpenCV type mapping supported. .. versionadded:: 2.3.0 """
if self._ocvTypes is None: ctx = SparkContext._active_spark_context self._ocvTypes = dict(ctx._jvm.org.apache.spark.ml.image.ImageSchema.javaOcvTypes()) return self._ocvTypes
<SYSTEM_TASK:> Returns the schema for the image column. <END_TASK> <USER_TASK:> Description: def columnSchema(self): """ Returns the schema for the image column. :return: a :class:`StructType` for image column, ``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``. .. versionadded:: 2.4.0 """
if self._columnSchema is None: ctx = SparkContext._active_spark_context jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema() self._columnSchema = _parse_datatype_json_string(jschema.json()) return self._columnSchema
<SYSTEM_TASK:> Returns field names of image columns. <END_TASK> <USER_TASK:> Description: def imageFields(self): """ Returns field names of image columns. :return: a list of field names. .. versionadded:: 2.3.0 """
if self._imageFields is None: ctx = SparkContext._active_spark_context self._imageFields = list(ctx._jvm.org.apache.spark.ml.image.ImageSchema.imageFields()) return self._imageFields
<SYSTEM_TASK:> Returns the name of undefined image type for the invalid image. <END_TASK> <USER_TASK:> Description: def undefinedImageType(self): """ Returns the name of undefined image type for the invalid image. .. versionadded:: 2.3.0 """
if self._undefinedImageType is None: ctx = SparkContext._active_spark_context self._undefinedImageType = \ ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType() return self._undefinedImageType
<SYSTEM_TASK:> Converts an image to an array with metadata. <END_TASK> <USER_TASK:> Description: def toNDArray(self, image): """ Converts an image to an array with metadata. :param `Row` image: A row that contains the image to be converted. It should have the attributes specified in `ImageSchema.imageSchema`. :return: a `numpy.ndarray` that is an image. .. versionadded:: 2.3.0 """
if not isinstance(image, Row): raise TypeError( "image argument should be pyspark.sql.types.Row; however, " "it got [%s]." % type(image)) if any(not hasattr(image, f) for f in self.imageFields): raise ValueError( "image argument should have attributes specified in " "ImageSchema.imageSchema [%s]." % ", ".join(self.imageFields)) height = image.height width = image.width nChannels = image.nChannels return np.ndarray( shape=(height, width, nChannels), dtype=np.uint8, buffer=image.data, strides=(width * nChannels, nChannels, 1))
<SYSTEM_TASK:> Converts an array with metadata to a two-dimensional image. <END_TASK> <USER_TASK:> Description: def toImage(self, array, origin=""): """ Converts an array with metadata to a two-dimensional image. :param `numpy.ndarray` array: The array to convert to image. :param str origin: Path to the image, optional. :return: a :class:`Row` that is a two dimensional image. .. versionadded:: 2.3.0 """
if not isinstance(array, np.ndarray): raise TypeError( "array argument should be numpy.ndarray; however, it got [%s]." % type(array)) if array.ndim != 3: raise ValueError("Invalid array shape") height, width, nChannels = array.shape ocvTypes = ImageSchema.ocvTypes if nChannels == 1: mode = ocvTypes["CV_8UC1"] elif nChannels == 3: mode = ocvTypes["CV_8UC3"] elif nChannels == 4: mode = ocvTypes["CV_8UC4"] else: raise ValueError("Invalid number of channels") # Running `bytearray(numpy.array([1]))` fails in specific Python versions # with a specific Numpy version, for example in Python 3.6.0 and NumPy 1.13.3. # Here, it avoids it by converting it to bytes. if LooseVersion(np.__version__) >= LooseVersion('1.9'): data = bytearray(array.astype(dtype=np.uint8).ravel().tobytes()) else: # Numpy prior to 1.9 don't have `tobytes` method. data = bytearray(array.astype(dtype=np.uint8).ravel()) # Creating new Row with _create_row(), because Row(name = value, ... ) # orders fields by name, which conflicts with expected schema order # when the new DataFrame is created by UDF return _create_row(self.imageFields, [origin, height, width, nChannels, mode, data])
<SYSTEM_TASK:> Reads the directory of images from the local or remote source. <END_TASK> <USER_TASK:> Description: def readImages(self, path, recursive=False, numPartitions=-1, dropImageFailures=False, sampleRatio=1.0, seed=0): """ Reads the directory of images from the local or remote source. .. note:: If multiple jobs are run in parallel with different sampleRatio or recursive flag, there may be a race condition where one job overwrites the hadoop configs of another. .. note:: If sample ratio is less than 1, sampling uses a PathFilter that is efficient but potentially non-deterministic. .. note:: Deprecated in 2.4.0. Use `spark.read.format("image").load(path)` instead and this `readImages` will be removed in 3.0.0. :param str path: Path to the image directory. :param bool recursive: Recursive search flag. :param int numPartitions: Number of DataFrame partitions. :param bool dropImageFailures: Drop the files that are not valid images. :param float sampleRatio: Fraction of the images loaded. :param int seed: Random number seed. :return: a :class:`DataFrame` with a single column of "images", see ImageSchema for details. >>> df = ImageSchema.readImages('data/mllib/images/origin/kittens', recursive=True) >>> df.count() 5 .. versionadded:: 2.3.0 """
warnings.warn("`ImageSchema.readImage` is deprecated. " + "Use `spark.read.format(\"image\").load(path)` instead.", DeprecationWarning) spark = SparkSession.builder.getOrCreate() image_schema = spark._jvm.org.apache.spark.ml.image.ImageSchema jsession = spark._jsparkSession jresult = image_schema.readImages(path, jsession, recursive, numPartitions, dropImageFailures, float(sampleRatio), seed) return DataFrame(jresult, spark._wrapped)
<SYSTEM_TASK:> Construct this object from given Java classname and arguments <END_TASK> <USER_TASK:> Description: def _create_from_java_class(cls, java_class, *args): """ Construct this object from given Java classname and arguments """
java_obj = JavaWrapper._new_java_obj(java_class, *args) return cls(java_obj)
<SYSTEM_TASK:> Create a Java array of given java_class type. Useful for <END_TASK> <USER_TASK:> Description: def _new_java_array(pylist, java_class): """ Create a Java array of given java_class type. Useful for calling a method with a Scala Array from Python with Py4J. If the param pylist is a 2D array, then a 2D java array will be returned. The returned 2D java array is a square, non-jagged 2D array that is big enough for all elements. The empty slots in the inner Java arrays will be filled with null to make the non-jagged 2D array. :param pylist: Python list to convert to a Java Array. :param java_class: Java class to specify the type of Array. Should be in the form of sc._gateway.jvm.* (sc is a valid Spark Context). :return: Java Array of converted pylist. Example primitive Java classes: - basestring -> sc._gateway.jvm.java.lang.String - int -> sc._gateway.jvm.java.lang.Integer - float -> sc._gateway.jvm.java.lang.Double - bool -> sc._gateway.jvm.java.lang.Boolean """
sc = SparkContext._active_spark_context java_array = None if len(pylist) > 0 and isinstance(pylist[0], list): # If pylist is a 2D array, then a 2D java array will be created. # The 2D array is a square, non-jagged 2D array that is big enough for all elements. inner_array_length = 0 for i in xrange(len(pylist)): inner_array_length = max(inner_array_length, len(pylist[i])) java_array = sc._gateway.new_array(java_class, len(pylist), inner_array_length) for i in xrange(len(pylist)): for j in xrange(len(pylist[i])): java_array[i][j] = pylist[i][j] else: java_array = sc._gateway.new_array(java_class, len(pylist)) for i in xrange(len(pylist)): java_array[i] = pylist[i] return java_array
<SYSTEM_TASK:> Print the profile stats to stdout, id is the RDD id <END_TASK> <USER_TASK:> Description: def show(self, id): """ Print the profile stats to stdout, id is the RDD id """
stats = self.stats() if stats: print("=" * 60) print("Profile of RDD<id=%d>" % id) print("=" * 60) stats.sort_stats("time", "cumulative").print_stats()
<SYSTEM_TASK:> Dump the profile into path, id is the RDD id <END_TASK> <USER_TASK:> Description: def dump(self, id, path): """ Dump the profile into path, id is the RDD id """
if not os.path.exists(path): os.makedirs(path) stats = self.stats() if stats: p = os.path.join(path, "rdd_%d.pstats" % id) stats.dump_stats(p)
<SYSTEM_TASK:> Runs and profiles the method to_profile passed in. A profile object is returned. <END_TASK> <USER_TASK:> Description: def profile(self, func): """ Runs and profiles the method to_profile passed in. A profile object is returned. """
pr = cProfile.Profile() pr.runcall(func) st = pstats.Stats(pr) st.stream = None # make it picklable st.strip_dirs() # Adds a new profile to the existing accumulated value self._accumulator.add(st)
<SYSTEM_TASK:> Get the existing SQLContext or create a new one with given SparkContext. <END_TASK> <USER_TASK:> Description: def getOrCreate(cls, sc): """ Get the existing SQLContext or create a new one with given SparkContext. :param sc: SparkContext """
if cls._instantiatedContext is None: jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc()) sparkSession = SparkSession(sc, jsqlContext.sparkSession()) cls(sc, sparkSession, jsqlContext) return cls._instantiatedContext
<SYSTEM_TASK:> Returns the value of Spark SQL configuration property for the given key. <END_TASK> <USER_TASK:> Description: def getConf(self, key, defaultValue=_NoValue): """Returns the value of Spark SQL configuration property for the given key. If the key is not set and defaultValue is set, return defaultValue. If the key is not set and defaultValue is not set, return the system default value. >>> sqlContext.getConf("spark.sql.shuffle.partitions") u'200' >>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10") u'10' >>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50") >>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10") u'50' """
return self.sparkSession.conf.get(key, defaultValue)
<SYSTEM_TASK:> Creates an external table based on the dataset in a data source. <END_TASK> <USER_TASK:> Description: def createExternalTable(self, tableName, path=None, source=None, schema=None, **options): """Creates an external table based on the dataset in a data source. It returns the DataFrame associated with the external table. The data source is specified by the ``source`` and a set of ``options``. If ``source`` is not specified, the default data source configured by ``spark.sql.sources.default`` will be used. Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and created external table. :return: :class:`DataFrame` """
return self.sparkSession.catalog.createExternalTable( tableName, path, source, schema, **options)
<SYSTEM_TASK:> Returns a list of names of tables in the database ``dbName``. <END_TASK> <USER_TASK:> Description: def tableNames(self, dbName=None): """Returns a list of names of tables in the database ``dbName``. :param dbName: string, name of the database to use. Default to the current database. :return: list of table names, in string >>> sqlContext.registerDataFrameAsTable(df, "table1") >>> "table1" in sqlContext.tableNames() True >>> "table1" in sqlContext.tableNames("default") True """
if dbName is None: return [name for name in self._ssql_ctx.tableNames()] else: return [name for name in self._ssql_ctx.tableNames(dbName)]
<SYSTEM_TASK:> Creates a copy of this instance with a randomly generated uid <END_TASK> <USER_TASK:> Description: def copy(self, extra=None): """ Creates a copy of this instance with a randomly generated uid and some extra params. This creates a deep copy of the embedded paramMap, and copies the embedded and extra parameters over. :param extra: Extra parameters to copy to the new instance :return: Copy of this instance """
if extra is None: extra = dict() newModel = Params.copy(self, extra) newModel.models = [model.copy(extra) for model in self.models] return newModel
<SYSTEM_TASK:> Given a Java OneVsRestModel, create and return a Python wrapper of it. <END_TASK> <USER_TASK:> Description: def _from_java(cls, java_stage): """ Given a Java OneVsRestModel, create and return a Python wrapper of it. Used for ML persistence. """
featuresCol = java_stage.getFeaturesCol() labelCol = java_stage.getLabelCol() predictionCol = java_stage.getPredictionCol() classifier = JavaParams._from_java(java_stage.getClassifier()) models = [JavaParams._from_java(model) for model in java_stage.models()] py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\ .setFeaturesCol(featuresCol).setClassifier(classifier) py_stage._resetUid(java_stage.uid()) return py_stage
<SYSTEM_TASK:> Transfer this instance to a Java OneVsRestModel. Used for ML persistence. <END_TASK> <USER_TASK:> Description: def _to_java(self): """ Transfer this instance to a Java OneVsRestModel. Used for ML persistence. :return: Java object equivalent to this instance. """
sc = SparkContext._active_spark_context java_models = [model._to_java() for model in self.models] java_models_array = JavaWrapper._new_java_array( java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel) metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata") _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel", self.uid, metadata.empty(), java_models_array) _java_obj.set("classifier", self.getClassifier()._to_java()) _java_obj.set("featuresCol", self.getFeaturesCol()) _java_obj.set("labelCol", self.getLabelCol()) _java_obj.set("predictionCol", self.getPredictionCol()) return _java_obj
<SYSTEM_TASK:> Return the message from an exception as either a str or unicode object. Supports both <END_TASK> <USER_TASK:> Description: def _exception_message(excp): """Return the message from an exception as either a str or unicode object. Supports both Python 2 and Python 3. >>> msg = "Exception message" >>> excp = Exception(msg) >>> msg == _exception_message(excp) True >>> msg = u"unicöde" >>> excp = Exception(msg) >>> msg == _exception_message(excp) True """
if isinstance(excp, Py4JJavaError): # 'Py4JJavaError' doesn't contain the stack trace available on the Java side in 'message' # attribute in Python 2. We should call 'str' function on this exception in general but # 'Py4JJavaError' has an issue about addressing non-ascii strings. So, here we work # around by the direct call, '__str__()'. Please see SPARK-23517. return excp.__str__() if hasattr(excp, "message"): return excp.message return str(excp)
<SYSTEM_TASK:> Get argspec of a function. Supports both Python 2 and Python 3. <END_TASK> <USER_TASK:> Description: def _get_argspec(f): """ Get argspec of a function. Supports both Python 2 and Python 3. """
if sys.version_info[0] < 3: argspec = inspect.getargspec(f) else: # `getargspec` is deprecated since python3.0 (incompatible with function annotations). # See SPARK-23569. argspec = inspect.getfullargspec(f) return argspec
<SYSTEM_TASK:> Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError' <END_TASK> <USER_TASK:> Description: def fail_on_stopiteration(f): """ Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError' prevents silent loss of data when 'f' is used in a for loop in Spark code """
def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except StopIteration as exc: raise RuntimeError( "Caught StopIteration thrown from user's code; failing the task", exc ) return wrapper
<SYSTEM_TASK:> Checks whether a SparkContext is initialized or not. <END_TASK> <USER_TASK:> Description: def _ensure_initialized(cls, instance=None, gateway=None, conf=None): """ Checks whether a SparkContext is initialized or not. Throws error if a SparkContext is already running. """
with SparkContext._lock: if not SparkContext._gateway: SparkContext._gateway = gateway or launch_gateway(conf) SparkContext._jvm = SparkContext._gateway.jvm if instance: if (SparkContext._active_spark_context and SparkContext._active_spark_context != instance): currentMaster = SparkContext._active_spark_context.master currentAppName = SparkContext._active_spark_context.appName callsite = SparkContext._active_spark_context._callsite # Raise error if there is already a running Spark context raise ValueError( "Cannot run multiple SparkContexts at once; " "existing SparkContext(app=%s, master=%s)" " created by %s at %s:%s " % (currentAppName, currentMaster, callsite.function, callsite.file, callsite.linenum)) else: SparkContext._active_spark_context = instance
<SYSTEM_TASK:> Get or instantiate a SparkContext and register it as a singleton object. <END_TASK> <USER_TASK:> Description: def getOrCreate(cls, conf=None): """ Get or instantiate a SparkContext and register it as a singleton object. :param conf: SparkConf (optional) """
with SparkContext._lock: if SparkContext._active_spark_context is None: SparkContext(conf=conf or SparkConf()) return SparkContext._active_spark_context
<SYSTEM_TASK:> Set a Java system property, such as spark.executor.memory. This must <END_TASK> <USER_TASK:> Description: def setSystemProperty(cls, key, value): """ Set a Java system property, such as spark.executor.memory. This must must be invoked before instantiating SparkContext. """
SparkContext._ensure_initialized() SparkContext._jvm.java.lang.System.setProperty(key, value)
<SYSTEM_TASK:> Distribute a local Python collection to form an RDD. Using xrange <END_TASK> <USER_TASK:> Description: def parallelize(self, c, numSlices=None): """ Distribute a local Python collection to form an RDD. Using xrange is recommended if the input represents a range for performance. >>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect() [[0], [2], [3], [4], [6]] >>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect() [[], [0], [], [2], [4]] """
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism if isinstance(c, xrange): size = len(c) if size == 0: return self.parallelize([], numSlices) step = c[1] - c[0] if size > 1 else 1 start0 = c[0] def getStart(split): return start0 + int((split * size / numSlices)) * step def f(split, iterator): # it's an empty iterator here but we need this line for triggering the # logic of signal handling in FramedSerializer.load_stream, for instance, # SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since # FramedSerializer.load_stream produces a generator, the control should # at least be in that function once. Here we do it by explicitly converting # the empty iterator to a list, thus make sure worker reuse takes effect. # See more details in SPARK-26549. assert len(list(iterator)) == 0 return xrange(getStart(split), getStart(split + 1), step) return self.parallelize([], numSlices).mapPartitionsWithIndex(f) # Make sure we distribute data evenly if it's smaller than self.batchSize if "__len__" not in dir(c): c = list(c) # Make it a list so we can compute its length batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024)) serializer = BatchedSerializer(self._unbatched_serializer, batchSize) def reader_func(temp_filename): return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices) def createRDDServer(): return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices) jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer) return RDD(jrdd, self, serializer)
<SYSTEM_TASK:> Build the union of a list of RDDs. <END_TASK> <USER_TASK:> Description: def union(self, rdds): """ Build the union of a list of RDDs. This supports unions() of RDDs with different serialized formats, although this forces them to be reserialized using the default serializer: >>> path = os.path.join(tempdir, "union-text.txt") >>> with open(path, "w") as testFile: ... _ = testFile.write("Hello") >>> textFile = sc.textFile(path) >>> textFile.collect() [u'Hello'] >>> parallelized = sc.parallelize(["World!"]) >>> sorted(sc.union([textFile, parallelized]).collect()) [u'Hello', 'World!'] """
first_jrdd_deserializer = rdds[0]._jrdd_deserializer if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): rdds = [x._reserialize() for x in rdds] cls = SparkContext._jvm.org.apache.spark.api.java.JavaRDD jrdds = SparkContext._gateway.new_array(cls, len(rdds)) for i in range(0, len(rdds)): jrdds[i] = rdds[i]._jrdd return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
<SYSTEM_TASK:> Assigns a group ID to all the jobs started by this thread until the group ID is set to a <END_TASK> <USER_TASK:> Description: def setJobGroup(self, groupId, description, interruptOnCancel=False): """ Assigns a group ID to all the jobs started by this thread until the group ID is set to a different value or cleared. Often, a unit of execution in an application consists of multiple Spark actions or jobs. Application programmers can use this method to group all those jobs together and give a group description. Once set, the Spark web UI will associate such jobs with this group. The application can use L{SparkContext.cancelJobGroup} to cancel all running jobs in this group. >>> import threading >>> from time import sleep >>> result = "Not Set" >>> lock = threading.Lock() >>> def map_func(x): ... sleep(100) ... raise Exception("Task should have been cancelled") >>> def start_job(x): ... global result ... try: ... sc.setJobGroup("job_to_cancel", "some description") ... result = sc.parallelize(range(x)).map(map_func).collect() ... except Exception as e: ... result = "Cancelled" ... lock.release() >>> def stop_job(): ... sleep(5) ... sc.cancelJobGroup("job_to_cancel") >>> suppress = lock.acquire() >>> suppress = threading.Thread(target=start_job, args=(10,)).start() >>> suppress = threading.Thread(target=stop_job).start() >>> suppress = lock.acquire() >>> print(result) Cancelled If interruptOnCancel is set to true for the job group, then job cancellation will result in Thread.interrupt() being called on the job's executor threads. This is useful to help ensure that the tasks are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead. """
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
<SYSTEM_TASK:> Executes the given partitionFunc on the specified set of partitions, <END_TASK> <USER_TASK:> Description: def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False): """ Executes the given partitionFunc on the specified set of partitions, returning the result as an array of elements. If 'partitions' is not specified, this will run over all partitions. >>> myRDD = sc.parallelize(range(6), 3) >>> sc.runJob(myRDD, lambda part: [x * x for x in part]) [0, 1, 4, 9, 16, 25] >>> myRDD = sc.parallelize(range(6), 3) >>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True) [0, 1, 16, 25] """
if partitions is None: partitions = range(rdd._jrdd.partitions().size()) # Implementation note: This is implemented as a mapPartitions followed # by runJob() in order to avoid having to pass a Python lambda into # SparkContext#runJob. mappedRDD = rdd.mapPartitions(partitionFunc) sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions) return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
<SYSTEM_TASK:> Computes an FP-Growth model that contains frequent itemsets. <END_TASK> <USER_TASK:> Description: def train(cls, data, minSupport=0.3, numPartitions=-1): """ Computes an FP-Growth model that contains frequent itemsets. :param data: The input data set, each element contains a transaction. :param minSupport: The minimal support level. (default: 0.3) :param numPartitions: The number of partitions used by parallel FP-growth. A value of -1 will use the same number as input data. (default: -1) """
model = callMLlibFunc("trainFPGrowthModel", data, float(minSupport), int(numPartitions)) return FPGrowthModel(model)
<SYSTEM_TASK:> Finds the complete set of frequent sequential patterns in the <END_TASK> <USER_TASK:> Description: def train(cls, data, minSupport=0.1, maxPatternLength=10, maxLocalProjDBSize=32000000): """ Finds the complete set of frequent sequential patterns in the input sequences of itemsets. :param data: The input data set, each element contains a sequence of itemsets. :param minSupport: The minimal support level of the sequential pattern, any pattern that appears more than (minSupport * size-of-the-dataset) times will be output. (default: 0.1) :param maxPatternLength: The maximal length of the sequential pattern, any pattern that appears less than maxPatternLength will be output. (default: 10) :param maxLocalProjDBSize: The maximum number of items (including delimiters used in the internal storage format) allowed in a projected database before local processing. If a projected database exceeds this size, another iteration of distributed prefix growth is run. (default: 32000000) """
model = callMLlibFunc("trainPrefixSpanModel", data, minSupport, maxPatternLength, maxLocalProjDBSize) return PrefixSpanModel(model)
<SYSTEM_TASK:> Set sample points from the population. Should be a RDD <END_TASK> <USER_TASK:> Description: def setSample(self, sample): """Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD): raise TypeError("samples should be a RDD, received %s" % type(sample)) self._sample = sample
<SYSTEM_TASK:> Estimate the probability density at points <END_TASK> <USER_TASK:> Description: def estimate(self, points): """Estimate the probability density at points"""
points = list(points) densities = callMLlibFunc( "estimateKernelDensity", self._sample, self._bandwidth, points) return np.asarray(densities)
<SYSTEM_TASK:> Start a TCP server to receive accumulator updates in a daemon thread, and returns it <END_TASK> <USER_TASK:> Description: def _start_update_server(auth_token): """Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler, auth_token) thread = threading.Thread(target=server.serve_forever) thread.daemon = True thread.start() return server
<SYSTEM_TASK:> Adds a term to this accumulator's value <END_TASK> <USER_TASK:> Description: def add(self, term): """Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
<SYSTEM_TASK:> Generates an RDD comprised of i.i.d. samples from the standard normal <END_TASK> <USER_TASK:> Description: def normalRDD(sc, size, numPartitions=None, seed=None): """ Generates an RDD comprised of i.i.d. samples from the standard normal distribution. To transform the distribution in the generated RDD from standard normal to some other normal N(mean, sigma^2), use C{RandomRDDs.normal(sc, n, p, seed)\ .map(lambda v: mean + sigma * v)} :param sc: SparkContext used to create the RDD. :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ N(0.0, 1.0). >>> x = RandomRDDs.normalRDD(sc, 1000, seed=1) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - 0.0) < 0.1 True >>> abs(stats.stdev() - 1.0) < 0.1 True """
return callMLlibFunc("normalRDD", sc._jsc, size, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of i.i.d. samples from the log normal <END_TASK> <USER_TASK:> Description: def logNormalRDD(sc, mean, std, size, numPartitions=None, seed=None): """ Generates an RDD comprised of i.i.d. samples from the log normal distribution with the input mean and standard distribution. :param sc: SparkContext used to create the RDD. :param mean: mean for the log Normal distribution :param std: std for the log Normal distribution :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ log N(mean, std). >>> from math import sqrt, exp >>> mean = 0.0 >>> std = 1.0 >>> expMean = exp(mean + 0.5 * std * std) >>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std)) >>> x = RandomRDDs.logNormalRDD(sc, mean, std, 1000, seed=2) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - expMean) < 0.5 True >>> from math import sqrt >>> abs(stats.stdev() - expStd) < 0.5 True """
return callMLlibFunc("logNormalRDD", sc._jsc, float(mean), float(std), size, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of i.i.d. samples from the Exponential <END_TASK> <USER_TASK:> Description: def exponentialRDD(sc, mean, size, numPartitions=None, seed=None): """ Generates an RDD comprised of i.i.d. samples from the Exponential distribution with the input mean. :param sc: SparkContext used to create the RDD. :param mean: Mean, or 1 / lambda, for the Exponential distribution. :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ Exp(mean). >>> mean = 2.0 >>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - mean) < 0.5 True >>> from math import sqrt >>> abs(stats.stdev() - sqrt(mean)) < 0.5 True """
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of i.i.d. samples from the Gamma <END_TASK> <USER_TASK:> Description: def gammaRDD(sc, shape, scale, size, numPartitions=None, seed=None): """ Generates an RDD comprised of i.i.d. samples from the Gamma distribution with the input shape and scale. :param sc: SparkContext used to create the RDD. :param shape: shape (> 0) parameter for the Gamma distribution :param scale: scale (> 0) parameter for the Gamma distribution :param size: Size of the RDD. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of float comprised of i.i.d. samples ~ Gamma(shape, scale). >>> from math import sqrt >>> shape = 1.0 >>> scale = 2.0 >>> expMean = shape * scale >>> expStd = sqrt(shape * scale * scale) >>> x = RandomRDDs.gammaRDD(sc, shape, scale, 1000, seed=2) >>> stats = x.stats() >>> stats.count() 1000 >>> abs(stats.mean() - expMean) < 0.5 True >>> abs(stats.stdev() - expStd) < 0.5 True """
return callMLlibFunc("gammaRDD", sc._jsc, float(shape), float(scale), size, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of vectors containing i.i.d. samples drawn <END_TASK> <USER_TASK:> Description: def normalVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the standard normal distribution. :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of Vector with vectors containing i.i.d. samples ~ `N(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.normalVectorRDD(sc, 100, 100, seed=1).collect()) >>> mat.shape (100, 100) >>> abs(mat.mean() - 0.0) < 0.1 True >>> abs(mat.std() - 1.0) < 0.1 True """
return callMLlibFunc("normalVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of vectors containing i.i.d. samples drawn <END_TASK> <USER_TASK:> Description: def logNormalVectorRDD(sc, mean, std, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the log normal distribution. :param sc: SparkContext used to create the RDD. :param mean: Mean of the log normal distribution :param std: Standard Deviation of the log normal distribution :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of Vector with vectors containing i.i.d. samples ~ log `N(mean, std)`. >>> import numpy as np >>> from math import sqrt, exp >>> mean = 0.0 >>> std = 1.0 >>> expMean = exp(mean + 0.5 * std * std) >>> expStd = sqrt((exp(std * std) - 1.0) * exp(2.0 * mean + std * std)) >>> m = RandomRDDs.logNormalVectorRDD(sc, mean, std, 100, 100, seed=1).collect() >>> mat = np.matrix(m) >>> mat.shape (100, 100) >>> abs(mat.mean() - expMean) < 0.1 True >>> abs(mat.std() - expStd) < 0.1 True """
return callMLlibFunc("logNormalVectorRDD", sc._jsc, float(mean), float(std), numRows, numCols, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of vectors containing i.i.d. samples drawn <END_TASK> <USER_TASK:> Description: def poissonVectorRDD(sc, mean, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the Poisson distribution with the input mean. :param sc: SparkContext used to create the RDD. :param mean: Mean, or lambda, for the Poisson distribution. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`) :param seed: Random seed (default: a random long integer). :return: RDD of Vector with vectors containing i.i.d. samples ~ Pois(mean). >>> import numpy as np >>> mean = 100.0 >>> rdd = RandomRDDs.poissonVectorRDD(sc, mean, 100, 100, seed=1) >>> mat = np.mat(rdd.collect()) >>> mat.shape (100, 100) >>> abs(mat.mean() - mean) < 0.5 True >>> from math import sqrt >>> abs(mat.std() - sqrt(mean)) < 0.5 True """
return callMLlibFunc("poissonVectorRDD", sc._jsc, float(mean), numRows, numCols, numPartitions, seed)
<SYSTEM_TASK:> Generates an RDD comprised of vectors containing i.i.d. samples drawn <END_TASK> <USER_TASK:> Description: def gammaVectorRDD(sc, shape, scale, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the Gamma distribution. :param sc: SparkContext used to create the RDD. :param shape: Shape (> 0) of the Gamma distribution :param scale: Scale (> 0) of the Gamma distribution :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`). :param seed: Random seed (default: a random long integer). :return: RDD of Vector with vectors containing i.i.d. samples ~ Gamma(shape, scale). >>> import numpy as np >>> from math import sqrt >>> shape = 1.0 >>> scale = 2.0 >>> expMean = shape * scale >>> expStd = sqrt(shape * scale * scale) >>> mat = np.matrix(RandomRDDs.gammaVectorRDD(sc, shape, scale, 100, 100, seed=1).collect()) >>> mat.shape (100, 100) >>> abs(mat.mean() - expMean) < 0.1 True >>> abs(mat.std() - expStd) < 0.1 True """
return callMLlibFunc("gammaVectorRDD", sc._jsc, float(shape), float(scale), numRows, numCols, numPartitions, seed)
<SYSTEM_TASK:> Runtime configuration interface for Spark. <END_TASK> <USER_TASK:> Description: def conf(self): """Runtime configuration interface for Spark. This is the interface through which the user can get and set all Spark and Hadoop configurations that are relevant to Spark SQL. When getting the value of a config, this defaults to the value set in the underlying :class:`SparkContext`, if any. """
if not hasattr(self, "_conf"): self._conf = RuntimeConfig(self._jsparkSession.conf()) return self._conf
<SYSTEM_TASK:> Interface through which the user may create, drop, alter or query underlying <END_TASK> <USER_TASK:> Description: def catalog(self): """Interface through which the user may create, drop, alter or query underlying databases, tables, functions etc. :return: :class:`Catalog` """
from pyspark.sql.catalog import Catalog if not hasattr(self, "_catalog"): self._catalog = Catalog(self) return self._catalog
<SYSTEM_TASK:> Infer schema from list of Row or tuple. <END_TASK> <USER_TASK:> Description: def _inferSchemaFromList(self, data, names=None): """ Infer schema from list of Row or tuple. :param data: list of Row or tuple :param names: list of column names :return: :class:`pyspark.sql.types.StructType` """
if not data: raise ValueError("can not infer schema from empty dataset") first = data[0] if type(first) is dict: warnings.warn("inferring schema from dict is deprecated," "please use pyspark.sql.Row instead") schema = reduce(_merge_type, (_infer_schema(row, names) for row in data)) if _has_nulltype(schema): raise ValueError("Some of types cannot be determined after inferring") return schema
<SYSTEM_TASK:> Infer schema from an RDD of Row or tuple. <END_TASK> <USER_TASK:> Description: def _inferSchema(self, rdd, samplingRatio=None, names=None): """ Infer schema from an RDD of Row or tuple. :param rdd: an RDD of Row or tuple :param samplingRatio: sampling ratio, or no sampling (default) :return: :class:`pyspark.sql.types.StructType` """
first = rdd.first() if not first: raise ValueError("The first row in RDD is empty, " "can not infer schema") if type(first) is dict: warnings.warn("Using RDD of dict to inferSchema is deprecated. " "Use pyspark.sql.Row instead") if samplingRatio is None: schema = _infer_schema(first, names=names) if _has_nulltype(schema): for row in rdd.take(100)[1:]: schema = _merge_type(schema, _infer_schema(row, names=names)) if not _has_nulltype(schema): break else: raise ValueError("Some of types cannot be determined by the " "first 100 rows, please try again with sampling") else: if samplingRatio < 0.99: rdd = rdd.sample(False, float(samplingRatio)) schema = rdd.map(lambda row: _infer_schema(row, names)).reduce(_merge_type) return schema
<SYSTEM_TASK:> Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. <END_TASK> <USER_TASK:> Description: def _createFromRDD(self, rdd, schema, samplingRatio): """ Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. """
if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchema(rdd, samplingRatio, names=schema) converter = _create_converter(struct) rdd = rdd.map(converter) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data rdd = rdd.map(schema.toInternal) return rdd, schema
<SYSTEM_TASK:> Create an RDD for DataFrame from a list or pandas.DataFrame, returns <END_TASK> <USER_TASK:> Description: def _createFromLocal(self, data, schema): """ Create an RDD for DataFrame from a list or pandas.DataFrame, returns the RDD and schema. """
# make sure data could consumed multiple times if not isinstance(data, list): data = list(data) if schema is None or isinstance(schema, (list, tuple)): struct = self._inferSchemaFromList(data, names=schema) converter = _create_converter(struct) data = map(converter, data) if isinstance(schema, (list, tuple)): for i, name in enumerate(schema): struct.fields[i].name = name struct.names[i] = name schema = struct elif not isinstance(schema, StructType): raise TypeError("schema should be StructType or list or None, but got: %s" % schema) # convert python objects to sql data data = [schema.toInternal(row) for row in data] return self._sc.parallelize(data), schema
<SYSTEM_TASK:> Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting <END_TASK> <USER_TASK:> Description: def _create_from_pandas_with_arrow(self, pdf, schema, timezone): """ Create a DataFrame from a given pandas.DataFrame by slicing it into partitions, converting to Arrow data, then sending to the JVM to parallelize. If a schema is passed in, the data types will be used to coerce the data in Pandas to Arrow conversion. """
from pyspark.serializers import ArrowStreamPandasSerializer from pyspark.sql.types import from_arrow_type, to_arrow_type, TimestampType from pyspark.sql.utils import require_minimum_pandas_version, \ require_minimum_pyarrow_version require_minimum_pandas_version() require_minimum_pyarrow_version() from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype import pyarrow as pa # Create the Spark schema from list of names passed in with Arrow types if isinstance(schema, (list, tuple)): arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False) struct = StructType() for name, field in zip(schema, arrow_schema): struct.add(name, from_arrow_type(field.type), nullable=field.nullable) schema = struct # Determine arrow types to coerce data when creating batches if isinstance(schema, StructType): arrow_types = [to_arrow_type(f.dataType) for f in schema.fields] elif isinstance(schema, DataType): raise ValueError("Single data type %s is not supported with Arrow" % str(schema)) else: # Any timestamps must be coerced to be compatible with Spark arrow_types = [to_arrow_type(TimestampType()) if is_datetime64_dtype(t) or is_datetime64tz_dtype(t) else None for t in pdf.dtypes] # Slice the DataFrame to be batched step = -(-len(pdf) // self.sparkContext.defaultParallelism) # round int up pdf_slices = (pdf[start:start + step] for start in xrange(0, len(pdf), step)) # Create list of Arrow (columns, type) for serializer dump_stream arrow_data = [[(c, t) for (_, c), t in zip(pdf_slice.iteritems(), arrow_types)] for pdf_slice in pdf_slices] jsqlContext = self._wrapped._jsqlContext safecheck = self._wrapped._conf.arrowSafeTypeConversion() col_by_name = True # col by name only applies to StructType columns, can't happen here ser = ArrowStreamPandasSerializer(timezone, safecheck, col_by_name) def reader_func(temp_filename): return self._jvm.PythonSQLUtils.readArrowStreamFromFile(jsqlContext, temp_filename) def create_RDD_server(): return self._jvm.ArrowRDDServer(jsqlContext) # Create Spark DataFrame from Arrow stream file, using one batch per partition jrdd = self._sc._serialize_to_jvm(arrow_data, ser, reader_func, create_RDD_server) jdf = self._jvm.PythonSQLUtils.toDataFrame(jrdd, schema.json(), jsqlContext) df = DataFrame(jdf, self._wrapped) df._schema = schema return df
<SYSTEM_TASK:> Initialize a SparkSession for a pyspark shell session. This is called from shell.py <END_TASK> <USER_TASK:> Description: def _create_shell_session(): """ Initialize a SparkSession for a pyspark shell session. This is called from shell.py to make error handling simpler without needing to declare local variables in that script, which would expose those to users. """
import py4j from pyspark.conf import SparkConf from pyspark.context import SparkContext try: # Try to access HiveConf, it will raise exception if Hive is not added conf = SparkConf() if conf.get('spark.sql.catalogImplementation', 'hive').lower() == 'hive': SparkContext._jvm.org.apache.hadoop.hive.conf.HiveConf() return SparkSession.builder\ .enableHiveSupport()\ .getOrCreate() else: return SparkSession.builder.getOrCreate() except (py4j.protocol.Py4JError, TypeError): if conf.get('spark.sql.catalogImplementation', '').lower() == 'hive': warnings.warn("Fall back to non-hive support because failing to access HiveConf, " "please make sure you build spark with hive") return SparkSession.builder.getOrCreate()
<SYSTEM_TASK:> Restore an object of namedtuple <END_TASK> <USER_TASK:> Description: def _restore(name, fields, value): """ Restore an object of namedtuple"""
k = (name, fields) cls = __cls.get(k) if cls is None: cls = collections.namedtuple(name, fields) __cls[k] = cls return cls(*value)
<SYSTEM_TASK:> Make class generated by namedtuple picklable <END_TASK> <USER_TASK:> Description: def _hack_namedtuple(cls): """ Make class generated by namedtuple picklable """
name = cls.__name__ fields = cls._fields def __reduce__(self): return (_restore, (name, fields, tuple(self))) cls.__reduce__ = __reduce__ cls._is_namedtuple_ = True return cls
<SYSTEM_TASK:> Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields <END_TASK> <USER_TASK:> Description: def load_stream(self, stream): """ Load a stream of un-ordered Arrow RecordBatches, where the last iteration yields a list of indices that can be used to put the RecordBatches in the correct order. """
# load the batches for batch in self.serializer.load_stream(stream): yield batch # load the batch order indices num = read_int(stream) batch_order = [] for i in xrange(num): index = read_int(stream) batch_order.append(index) yield batch_order
<SYSTEM_TASK:> Create an Arrow record batch from the given pandas.Series or list of Series, <END_TASK> <USER_TASK:> Description: def _create_batch(self, series): """ Create an Arrow record batch from the given pandas.Series or list of Series, with optional type. :param series: A single pandas.Series, list of Series, or list of (series, arrow_type) :return: Arrow RecordBatch """
import pandas as pd import pyarrow as pa from pyspark.sql.types import _check_series_convert_timestamps_internal # Make input conform to [(series1, type1), (series2, type2), ...] if not isinstance(series, (list, tuple)) or \ (len(series) == 2 and isinstance(series[1], pa.DataType)): series = [series] series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series) def create_array(s, t): mask = s.isnull() # Ensure timestamp series are in expected form for Spark internal representation if t is not None and pa.types.is_timestamp(t): s = _check_series_convert_timestamps_internal(s.fillna(0), self._timezone) # TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2 return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False) try: array = pa.Array.from_pandas(s, mask=mask, type=t, safe=self._safecheck) except pa.ArrowException as e: error_msg = "Exception thrown when converting pandas.Series (%s) to Arrow " + \ "Array (%s). It can be caused by overflows or other unsafe " + \ "conversions warned by Arrow. Arrow safe type check can be " + \ "disabled by using SQL config " + \ "`spark.sql.execution.pandas.arrowSafeTypeConversion`." raise RuntimeError(error_msg % (s.dtype, t), e) return array arrs = [] for s, t in series: if t is not None and pa.types.is_struct(t): if not isinstance(s, pd.DataFrame): raise ValueError("A field of type StructType expects a pandas.DataFrame, " "but got: %s" % str(type(s))) # Input partition and result pandas.DataFrame empty, make empty Arrays with struct if len(s) == 0 and len(s.columns) == 0: arrs_names = [(pa.array([], type=field.type), field.name) for field in t] # Assign result columns by schema name if user labeled with strings elif self._assign_cols_by_name and any(isinstance(name, basestring) for name in s.columns): arrs_names = [(create_array(s[field.name], field.type), field.name) for field in t] # Assign result columns by position else: arrs_names = [(create_array(s[s.columns[i]], field.type), field.name) for i, field in enumerate(t)] struct_arrs, struct_names = zip(*arrs_names) arrs.append(pa.StructArray.from_arrays(struct_arrs, struct_names)) else: arrs.append(create_array(s, t)) return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
<SYSTEM_TASK:> Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or <END_TASK> <USER_TASK:> Description: def dump_stream(self, iterator, stream): """ Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or a list of series accompanied by an optional pyarrow type to coerce the data to. """
batches = (self._create_batch(series) for series in iterator) super(ArrowStreamPandasSerializer, self).dump_stream(batches, stream)
<SYSTEM_TASK:> Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. <END_TASK> <USER_TASK:> Description: def load_stream(self, stream): """ Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series. """
batches = super(ArrowStreamPandasSerializer, self).load_stream(stream) import pyarrow as pa for batch in batches: yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
<SYSTEM_TASK:> Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent. <END_TASK> <USER_TASK:> Description: def dump_stream(self, iterator, stream): """ Override because Pandas UDFs require a START_ARROW_STREAM before the Arrow stream is sent. This should be sent after creating the first record batch so in case of an error, it can be sent back to the JVM before the Arrow stream starts. """
def init_stream_yield_batches(): should_write_start_length = True for series in iterator: batch = self._create_batch(series) if should_write_start_length: write_int(SpecialLengths.START_ARROW_STREAM, stream) should_write_start_length = False yield batch return ArrowStreamSerializer.dump_stream(self, init_stream_yield_batches(), stream)
<SYSTEM_TASK:> Set the trigger for the stream query. If this is not set it will run the query as fast <END_TASK> <USER_TASK:> Description: def trigger(self, processingTime=None, once=None, continuous=None): """Set the trigger for the stream query. If this is not set it will run the query as fast as possible, which is equivalent to setting the trigger to ``processingTime='0 seconds'``. .. note:: Evolving. :param processingTime: a processing time interval as a string, e.g. '5 seconds', '1 minute'. Set a trigger that runs a query periodically based on the processing time. Only one trigger can be set. :param once: if set to True, set a trigger that processes only one batch of data in a streaming query then terminates the query. Only one trigger can be set. >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(processingTime='5 seconds') >>> # trigger the query for just once batch of data >>> writer = sdf.writeStream.trigger(once=True) >>> # trigger the query for execution every 5 seconds >>> writer = sdf.writeStream.trigger(continuous='5 seconds') """
params = [processingTime, once, continuous] if params.count(None) == 3: raise ValueError('No trigger provided') elif params.count(None) < 2: raise ValueError('Multiple triggers not allowed.') jTrigger = None if processingTime is not None: if type(processingTime) != str or len(processingTime.strip()) == 0: raise ValueError('Value for processingTime must be a non empty string. Got: %s' % processingTime) interval = processingTime.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.ProcessingTime( interval) elif once is not None: if once is not True: raise ValueError('Value for once must be True. Got: %s' % once) jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Once() else: if type(continuous) != str or len(continuous.strip()) == 0: raise ValueError('Value for continuous must be a non empty string. Got: %s' % continuous) interval = continuous.strip() jTrigger = self._spark._sc._jvm.org.apache.spark.sql.streaming.Trigger.Continuous( interval) self._jwrite = self._jwrite.trigger(jTrigger) return self
<SYSTEM_TASK:> Sets the output of the streaming query to be processed using the provided writer ``f``. <END_TASK> <USER_TASK:> Description: def foreach(self, f): """ Sets the output of the streaming query to be processed using the provided writer ``f``. This is often used to write the output of a streaming query to arbitrary storage systems. The processing logic can be specified in two ways. #. A **function** that takes a row as input. This is a simple way to express your processing logic. Note that this does not allow you to deduplicate generated data when failures cause reprocessing of some input data. That would require you to specify the processing logic in the next way. #. An **object** with a ``process`` method and optional ``open`` and ``close`` methods. The object can have the following methods. * ``open(partition_id, epoch_id)``: *Optional* method that initializes the processing (for example, open a connection, start a transaction, etc). Additionally, you can use the `partition_id` and `epoch_id` to deduplicate regenerated data (discussed later). * ``process(row)``: *Non-optional* method that processes each :class:`Row`. * ``close(error)``: *Optional* method that finalizes and cleans up (for example, close connection, commit transaction, etc.) after all rows have been processed. The object will be used by Spark in the following way. * A single copy of this object is responsible of all the data generated by a single task in a query. In other words, one instance is responsible for processing one partition of the data generated in a distributed manner. * This object must be serializable because each task will get a fresh serialized-deserialized copy of the provided object. Hence, it is strongly recommended that any initialization for writing data (e.g. opening a connection or starting a transaction) is done after the `open(...)` method has been called, which signifies that the task is ready to generate data. * The lifecycle of the methods are as follows. For each partition with ``partition_id``: ... For each batch/epoch of streaming data with ``epoch_id``: ....... Method ``open(partitionId, epochId)`` is called. ....... If ``open(...)`` returns true, for each row in the partition and batch/epoch, method ``process(row)`` is called. ....... Method ``close(errorOrNull)`` is called with error (if any) seen while processing rows. Important points to note: * The `partitionId` and `epochId` can be used to deduplicate generated data when failures cause reprocessing of some input data. This depends on the execution mode of the query. If the streaming query is being executed in the micro-batch mode, then every partition represented by a unique tuple (partition_id, epoch_id) is guaranteed to have the same data. Hence, (partition_id, epoch_id) can be used to deduplicate and/or transactionally commit data and achieve exactly-once guarantees. However, if the streaming query is being executed in the continuous mode, then this guarantee does not hold and therefore should not be used for deduplication. * The ``close()`` method (if exists) will be called if `open()` method exists and returns successfully (irrespective of the return value), except if the Python crashes in the middle. .. note:: Evolving. >>> # Print every row using a function >>> def print_row(row): ... print(row) ... >>> writer = sdf.writeStream.foreach(print_row) >>> # Print every row using a object with process() method >>> class RowPrinter: ... def open(self, partition_id, epoch_id): ... print("Opened %d, %d" % (partition_id, epoch_id)) ... return True ... def process(self, row): ... print(row) ... def close(self, error): ... print("Closed with error: %s" % str(error)) ... >>> writer = sdf.writeStream.foreach(RowPrinter()) """
from pyspark.rdd import _wrap_function from pyspark.serializers import PickleSerializer, AutoBatchedSerializer from pyspark.taskcontext import TaskContext if callable(f): # The provided object is a callable function that is supposed to be called on each row. # Construct a function that takes an iterator and calls the provided function on each # row. def func_without_process(_, iterator): for x in iterator: f(x) return iter([]) func = func_without_process else: # The provided object is not a callable function. Then it is expected to have a # 'process(row)' method, and optional 'open(partition_id, epoch_id)' and # 'close(error)' methods. if not hasattr(f, 'process'): raise Exception("Provided object does not have a 'process' method") if not callable(getattr(f, 'process')): raise Exception("Attribute 'process' in provided object is not callable") def doesMethodExist(method_name): exists = hasattr(f, method_name) if exists and not callable(getattr(f, method_name)): raise Exception( "Attribute '%s' in provided object is not callable" % method_name) return exists open_exists = doesMethodExist('open') close_exists = doesMethodExist('close') def func_with_open_process_close(partition_id, iterator): epoch_id = TaskContext.get().getLocalProperty('streaming.sql.batchId') if epoch_id: epoch_id = int(epoch_id) else: raise Exception("Could not get batch id from TaskContext") # Check if the data should be processed should_process = True if open_exists: should_process = f.open(partition_id, epoch_id) error = None try: if should_process: for x in iterator: f.process(x) except Exception as ex: error = ex finally: if close_exists: f.close(error) if error: raise error return iter([]) func = func_with_open_process_close serializer = AutoBatchedSerializer(PickleSerializer()) wrapped_func = _wrap_function(self._spark._sc, func, serializer, serializer) jForeachWriter = \ self._spark._sc._jvm.org.apache.spark.sql.execution.python.PythonForeachWriter( wrapped_func, self._df._jdf.schema()) self._jwrite.foreach(jForeachWriter) return self
<SYSTEM_TASK:> Serialize obj as a string of bytes allocated in memory <END_TASK> <USER_TASK:> Description: def dumps(obj, protocol=None): """Serialize obj as a string of bytes allocated in memory protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to pickle.HIGHEST_PROTOCOL. This setting favors maximum communication speed between processes running the same Python version. Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure compatibility with older versions of Python. """
file = StringIO() try: cp = CloudPickler(file, protocol=protocol) cp.dump(obj) return file.getvalue() finally: file.close()
<SYSTEM_TASK:> Fills in the rest of function data into the skeleton function object <END_TASK> <USER_TASK:> Description: def _fill_function(*args): """Fills in the rest of function data into the skeleton function object The skeleton itself is create by _make_skel_func(). """
if len(args) == 2: func = args[0] state = args[1] elif len(args) == 5: # Backwards compat for cloudpickle v0.4.0, after which the `module` # argument was introduced func = args[0] keys = ['globals', 'defaults', 'dict', 'closure_values'] state = dict(zip(keys, args[1:])) elif len(args) == 6: # Backwards compat for cloudpickle v0.4.1, after which the function # state was passed as a dict to the _fill_function it-self. func = args[0] keys = ['globals', 'defaults', 'dict', 'module', 'closure_values'] state = dict(zip(keys, args[1:])) else: raise ValueError('Unexpected _fill_value arguments: %r' % (args,)) # - At pickling time, any dynamic global variable used by func is # serialized by value (in state['globals']). # - At unpickling time, func's __globals__ attribute is initialized by # first retrieving an empty isolated namespace that will be shared # with other functions pickled from the same original module # by the same CloudPickler instance and then updated with the # content of state['globals'] to populate the shared isolated # namespace with all the global variables that are specifically # referenced for this function. func.__globals__.update(state['globals']) func.__defaults__ = state['defaults'] func.__dict__ = state['dict'] if 'annotations' in state: func.__annotations__ = state['annotations'] if 'doc' in state: func.__doc__ = state['doc'] if 'name' in state: func.__name__ = state['name'] if 'module' in state: func.__module__ = state['module'] if 'qualname' in state: func.__qualname__ = state['qualname'] cells = func.__closure__ if cells is not None: for cell, value in zip(cells, state['closure_values']): if value is not _empty_cell_value: cell_set(cell, value) return func
<SYSTEM_TASK:> Return True if the module is special module that cannot be imported by its <END_TASK> <USER_TASK:> Description: def _is_dynamic(module): """ Return True if the module is special module that cannot be imported by its name. """
# Quick check: module that have __file__ attribute are not dynamic modules. if hasattr(module, '__file__'): return False if hasattr(module, '__spec__'): return module.__spec__ is None else: # Backward compat for Python 2 import imp try: path = None for part in module.__name__.split('.'): if path is not None: path = [path] f, path, description = imp.find_module(part, path) if f is not None: f.close() except ImportError: return True return False
<SYSTEM_TASK:> Registered with the dispatch to handle all function types. <END_TASK> <USER_TASK:> Description: def save_function(self, obj, name=None): """ Registered with the dispatch to handle all function types. Determines what kind of function obj is (e.g. lambda, defined at interactive prompt, etc) and handles the pickling appropriately. """
try: should_special_case = obj in _BUILTIN_TYPE_CONSTRUCTORS except TypeError: # Methods of builtin types aren't hashable in python 2. should_special_case = False if should_special_case: # We keep a special-cased cache of built-in type constructors at # global scope, because these functions are structured very # differently in different python versions and implementations (for # example, they're instances of types.BuiltinFunctionType in # CPython, but they're ordinary types.FunctionType instances in # PyPy). # # If the function we've received is in that cache, we just # serialize it as a lookup into the cache. return self.save_reduce(_BUILTIN_TYPE_CONSTRUCTORS[obj], (), obj=obj) write = self.write if name is None: name = obj.__name__ try: # whichmodule() could fail, see # https://bitbucket.org/gutworth/six/issues/63/importing-six-breaks-pickling modname = pickle.whichmodule(obj, name) except Exception: modname = None # print('which gives %s %s %s' % (modname, obj, name)) try: themodule = sys.modules[modname] except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__ modname = '__main__' if modname == '__main__': themodule = None try: lookedup_by_name = getattr(themodule, name, None) except Exception: lookedup_by_name = None if themodule: if lookedup_by_name is obj: return self.save_global(obj, name) # a builtin_function_or_method which comes in as an attribute of some # object (e.g., itertools.chain.from_iterable) will end # up with modname "__main__" and so end up here. But these functions # have no __code__ attribute in CPython, so the handling for # user-defined functions below will fail. # So we pickle them here using save_reduce; have to do it differently # for different python versions. if not hasattr(obj, '__code__'): if PY3: # pragma: no branch rv = obj.__reduce_ex__(self.proto) else: if hasattr(obj, '__self__'): rv = (getattr, (obj.__self__, name)) else: raise pickle.PicklingError("Can't pickle %r" % obj) return self.save_reduce(obj=obj, *rv) # if func is lambda, def'ed at prompt, is in main, or is nested, then # we'll pickle the actual function object rather than simply saving a # reference (as is done in default pickler), via save_function_tuple. if (islambda(obj) or getattr(obj.__code__, 'co_filename', None) == '<stdin>' or themodule is None): self.save_function_tuple(obj) return else: # func is nested if lookedup_by_name is None or lookedup_by_name is not obj: self.save_function_tuple(obj) return if obj.__dict__: # essentially save_reduce, but workaround needed to avoid recursion self.save(_restore_attr) write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj) self.save(obj.__dict__) write(pickle.TUPLE + pickle.REDUCE) else: write(pickle.GLOBAL + modname + '\n' + name + '\n') self.memoize(obj)
<SYSTEM_TASK:> Inner logic to save instance. Based off pickle.save_inst <END_TASK> <USER_TASK:> Description: def save_inst(self, obj): """Inner logic to save instance. Based off pickle.save_inst"""
cls = obj.__class__ # Try the dispatch table (pickle module doesn't do it) f = self.dispatch.get(cls) if f: f(self, obj) # Call unbound method with explicit self return memo = self.memo write = self.write save = self.save if hasattr(obj, '__getinitargs__'): args = obj.__getinitargs__() len(args) # XXX Assert it's a sequence pickle._keep_alive(args, memo) else: args = () write(pickle.MARK) if self.bin: save(cls) for arg in args: save(arg) write(pickle.OBJ) else: for arg in args: save(arg) write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n') self.memoize(obj) try: getstate = obj.__getstate__ except AttributeError: stuff = obj.__dict__ else: stuff = getstate() pickle._keep_alive(stuff, memo) save(stuff) write(pickle.BUILD)
<SYSTEM_TASK:> Copy the current param to a new parent, must be a dummy param. <END_TASK> <USER_TASK:> Description: def _copy_new_parent(self, parent): """Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined": param = copy.copy(self) param.parent = parent.uid return param else: raise ValueError("Cannot copy from non-dummy parent %s." % parent)
<SYSTEM_TASK:> Convert a value to a list, if possible. <END_TASK> <USER_TASK:> Description: def toList(value): """ Convert a value to a list, if possible. """
if type(value) == list: return value elif type(value) in [np.ndarray, tuple, xrange, array.array]: return list(value) elif isinstance(value, Vector): return list(value.toArray()) else: raise TypeError("Could not convert %s to list" % value)
<SYSTEM_TASK:> Convert a value to list of floats, if possible. <END_TASK> <USER_TASK:> Description: def toListFloat(value): """ Convert a value to list of floats, if possible. """
if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return [float(v) for v in value] raise TypeError("Could not convert %s to list of floats" % value)
<SYSTEM_TASK:> Convert a value to list of ints, if possible. <END_TASK> <USER_TASK:> Description: def toListInt(value): """ Convert a value to list of ints, if possible. """
if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_integer(v), value)): return [int(v) for v in value] raise TypeError("Could not convert %s to list of ints" % value)
<SYSTEM_TASK:> Convert a value to list of strings, if possible. <END_TASK> <USER_TASK:> Description: def toListString(value): """ Convert a value to list of strings, if possible. """
if TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)): return [TypeConverters.toString(v) for v in value] raise TypeError("Could not convert %s to list of strings" % value)
<SYSTEM_TASK:> Convert a value to a MLlib Vector, if possible. <END_TASK> <USER_TASK:> Description: def toVector(value): """ Convert a value to a MLlib Vector, if possible. """
if isinstance(value, Vector): return value elif TypeConverters._can_convert_to_list(value): value = TypeConverters.toList(value) if all(map(lambda v: TypeConverters._is_numeric(v), value)): return DenseVector(value) raise TypeError("Could not convert %s to vector" % value)
<SYSTEM_TASK:> Convert a value to a string, if possible. <END_TASK> <USER_TASK:> Description: def toString(value): """ Convert a value to a string, if possible. """
if isinstance(value, basestring): return value elif type(value) in [np.string_, np.str_]: return str(value) elif type(value) == np.unicode_: return unicode(value) else: raise TypeError("Could not convert %s to string type" % type(value))
<SYSTEM_TASK:> Copy all params defined on the class to current object. <END_TASK> <USER_TASK:> Description: def _copy_params(self): """ Copy all params defined on the class to current object. """
cls = type(self) src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)] src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs)) for name, param in src_params: setattr(self, name, param._copy_new_parent(self))
<SYSTEM_TASK:> Explains a single param and returns its name, doc, and optional <END_TASK> <USER_TASK:> Description: def explainParam(self, param): """ Explains a single param and returns its name, doc, and optional default value and user-supplied value in a string. """
param = self._resolveParam(param) values = [] if self.isDefined(param): if param in self._defaultParamMap: values.append("default: %s" % self._defaultParamMap[param]) if param in self._paramMap: values.append("current: %s" % self._paramMap[param]) else: values.append("undefined") valueStr = "(" + ", ".join(values) + ")" return "%s: %s %s" % (param.name, param.doc, valueStr)
<SYSTEM_TASK:> Gets a param by its name. <END_TASK> <USER_TASK:> Description: def getParam(self, paramName): """ Gets a param by its name. """
param = getattr(self, paramName) if isinstance(param, Param): return param else: raise ValueError("Cannot find param with name %s." % paramName)
<SYSTEM_TASK:> Checks whether a param is explicitly set by user. <END_TASK> <USER_TASK:> Description: def isSet(self, param): """ Checks whether a param is explicitly set by user. """
param = self._resolveParam(param) return param in self._paramMap
<SYSTEM_TASK:> Checks whether a param has a default value. <END_TASK> <USER_TASK:> Description: def hasDefault(self, param): """ Checks whether a param has a default value. """
param = self._resolveParam(param) return param in self._defaultParamMap
<SYSTEM_TASK:> Gets the value of a param in the user-supplied param map or its <END_TASK> <USER_TASK:> Description: def getOrDefault(self, param): """ Gets the value of a param in the user-supplied param map or its default value. Raises an error if neither is set. """
param = self._resolveParam(param) if param in self._paramMap: return self._paramMap[param] else: return self._defaultParamMap[param]
<SYSTEM_TASK:> Sets a parameter in the embedded param map. <END_TASK> <USER_TASK:> Description: def set(self, param, value): """ Sets a parameter in the embedded param map. """
self._shouldOwn(param) try: value = param.typeConverter(value) except ValueError as e: raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e)) self._paramMap[param] = value
<SYSTEM_TASK:> Validates that the input param belongs to this Params instance. <END_TASK> <USER_TASK:> Description: def _shouldOwn(self, param): """ Validates that the input param belongs to this Params instance. """
if not (self.uid == param.parent and self.hasParam(param.name)): raise ValueError("Param %r does not belong to %r." % (param, self))
<SYSTEM_TASK:> Resolves a param and validates the ownership. <END_TASK> <USER_TASK:> Description: def _resolveParam(self, param): """ Resolves a param and validates the ownership. :param param: param name or the param instance, which must belong to this Params instance :return: resolved param instance """
if isinstance(param, Param): self._shouldOwn(param) return param elif isinstance(param, basestring): return self.getParam(param) else: raise ValueError("Cannot resolve %r as a param." % param)
<SYSTEM_TASK:> Copies param values from this instance to another instance for <END_TASK> <USER_TASK:> Description: def _copyValues(self, to, extra=None): """ Copies param values from this instance to another instance for params shared by them. :param to: the target instance :param extra: extra params to be copied :return: the target instance with param values copied """
paramMap = self._paramMap.copy() if extra is not None: paramMap.update(extra) for param in self.params: # copy default params if param in self._defaultParamMap and to.hasParam(param.name): to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param] # copy explicitly set params if param in paramMap and to.hasParam(param.name): to._set(**{param.name: paramMap[param]}) return to