text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Intialize the rabit module, call this once before using anything.
<END_TASK>
<USER_TASK:>
Description:
def init(args=None, lib='standard'):
"""Intialize the rabit module, call this once before using anything.
Parameters
----------
args: list of str, optional
The list of arguments used to initialized the rabit
usually you need to pass in sys.argv.
Defaults to sys.argv when it is None.
lib: {'standard', 'mock', 'mpi'}
Type of library we want to load
""" |
if args is None:
args = sys.argv
_loadlib(lib)
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(args), arr) |
<SYSTEM_TASK:>
Perform allreduce, return the result.
<END_TASK>
<USER_TASK:>
Description:
def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to intialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
""" |
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(args):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf |
<SYSTEM_TASK:>
Load latest check point.
<END_TASK>
<USER_TASK:>
Description:
def load_checkpoint(with_local=False):
"""Load latest check point.
Parameters
----------
with_local: bool, optional
whether the checkpoint contains local model
Returns
-------
tuple : tuple
if with_local: return (version, gobal_model, local_model)
else return (version, gobal_model)
if returned version == 0, this means no model has been CheckPointed
and global_model, local_model returned will be None
""" |
gptr = ctypes.POINTER(ctypes.c_char)()
global_len = ctypes.c_ulong()
if with_local:
lptr = ctypes.POINTER(ctypes.c_char)()
local_len = ctypes.c_ulong()
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
ctypes.byref(lptr),
ctypes.byref(local_len))
if version == 0:
return (version, None, None)
return (version,
_load_model(gptr, global_len.value),
_load_model(lptr, local_len.value))
else:
version = _LIB.RabitLoadCheckPoint(
ctypes.byref(gptr),
ctypes.byref(global_len),
None, None)
if version == 0:
return (version, None)
return (version,
_load_model(gptr, global_len.value)) |
<SYSTEM_TASK:>
Checkpoint the model.
<END_TASK>
<USER_TASK:>
Description:
def checkpoint(global_model, local_model=None):
"""Checkpoint the model.
This means we finished a stage of execution.
Every time we call check point, there is a version number which will increase by one.
Parameters
----------
global_model: anytype that can be pickled
globally shared model/state when calling this function,
the caller need to gauranttees that global_model is the same in all nodes
local_model: anytype that can be pickled
Local model, that is specific to current node/rank.
This can be None when no local state is needed.
Notes
-----
local_model requires explicit replication of the model for fault-tolerance.
This will bring replication cost in checkpoint function.
while global_model do not need explicit replication.
It is recommended to use global_model if possible.
""" |
sglobal = pickle.dumps(global_model)
if local_model is None:
_LIB.RabitCheckPoint(sglobal, len(sglobal), None, 0)
del sglobal
else:
slocal = pickle.dumps(local_model)
_LIB.RabitCheckPoint(sglobal, len(sglobal), slocal, len(slocal))
del slocal
del sglobal |
<SYSTEM_TASK:>
Create a RankingFactorizationRecommender that learns latent factors for each
<END_TASK>
<USER_TASK:>
Description:
def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
num_factors=32,
regularization=1e-9,
linear_regularization=1e-9,
side_data_factorization=True,
ranking_regularization=0.25,
unobserved_rating_value=None,
num_sampled_negative_examples=4,
max_iterations=25,
sgd_step_size=0,
random_seed=0,
binary_target = False,
solver = 'auto',
verbose=True,
**kwargs):
"""Create a RankingFactorizationRecommender that learns latent factors for each
user and item and uses them to make rating predictions.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
num_factors : int, optional
Number of latent factors.
regularization : float, optional
L2 regularization for interaction terms. Default: 1e-10; a typical range
for this parameter is between 1e-12 and 1. Setting this to 0 may cause
numerical issues.
linear_regularization : float, optional
L2 regularization for linear term. Default: 1e-10; a typical range for this
parameter is between 1e-12 and 1. Setting this to 0 may cause numerical issues.
side_data_factorization : boolean, optional
Use factorization for modeling any additional features beyond the user
and item columns. If True, and side features or any additional columns are
present, then a Factorization Machine model is trained. Otherwise, only
the linear terms are fit to these features. See
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
for more information. Default: True.
ranking_regularization : float, optional
Penalize the predicted value of user-item pairs not in the
training set. Larger values increase this penalization.
Suggested values: 0, 0.1, 0.5, 1. NOTE: if no target column
is present, this parameter is ignored.
unobserved_rating_value : float, optional
Penalize unobserved items with a larger predicted score than this value.
By default, the estimated 5% quantile is used (mean - 1.96*std_dev).
num_sampled_negative_examples : integer, optional
For each (user, item) pair in the data, the ranking sgd solver evaluates
this many randomly chosen unseen items for the negative example step.
Increasing this can give better performance at the expense of speed,
particularly when the number of items is large. Default is 4.
binary_target : boolean, optional
Assume the target column is composed of 0's and 1's. If True, use
logistic loss to fit the model.
max_iterations : int, optional
The training algorithm will make at most this many iterations through
the observed data. Default: 50.
sgd_step_size : float, optional
Step size for stochastic gradient descent. Smaller values generally
lead to more accurate models that take more time to train. The
default setting of 0 means that the step size is chosen by trying
several options on a small subset of the data.
random_seed : int, optional
The random seed used to choose the initial starting point for
model training. Note that some randomness in the training is
unavoidable, so models trained with the same random seed may still
differ. Default: 0.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. The available solvers for
this model are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *ials*: Implicit Alternating Least Squares [1].
- *adagrad*: Adaptive Gradient Stochastic Gradient Descent.
- *sgd*: Stochastic Gradient Descent
verbose : bool, optional
Enables verbose output.
kwargs : optional
Optional advanced keyword arguments passed in to the model
optimization procedure. These parameters do not typically
need to be changed.
Examples
--------
**Basic usage**
When given just user and item pairs, one can create a RankingFactorizationRecommender
as follows.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"])
>>> from turicreate.recommender import ranking_factorization_recommender
>>> m1 = ranking_factorization_recommender.create(sf)
When a target column is present, one can include this to try and recommend
items that are rated highly.
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m1 = ranking_factorization_recommender.create(sf, target='rating')
**Including side features**
>>> user_info = turicreate.SFrame({'user_id': ["0", "1", "2"],
... 'name': ["Alice", "Bob", "Charlie"],
... 'numeric_feature': [0.1, 12, 22]})
>>> item_info = turicreate.SFrame({'item_id': ["a", "b", "c", "d"],
... 'name': ["item1", "item2", "item3", "item4"],
... 'dict_feature': [{'a' : 23}, {'a' : 13},
... {'b' : 1},
... {'a' : 23, 'b' : 32}]})
>>> m2 = ranking_factorization_recommender.create(sf, target='rating',
... user_data=user_info,
... item_data=item_info)
**Customizing ranking regularization**
Create a model that pushes predicted ratings of unobserved user-item
pairs toward 1 or below.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
... ranking_regularization = 0.1,
... unobserved_rating_value = 1)
**Using the implicit alternating least squares model**
Ranking factorization also implements implicit alternating least squares [1] as
an alternative solver. This is enable using ``solver = 'ials'``.
>>> m3 = ranking_factorization_recommender.create(sf, target='rating',
solver = 'ials')
See Also
--------
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`,
:class:`turicreate.recommender.ranking_factorization_recommender.RankingFactorizationRecommender`
References
-----------
[1] Collaborative Filtering for Implicit Feedback Datasets Hu, Y.; Koren,
Y.; Volinsky, C. IEEE International Conference on Data Mining
(ICDM 2008), IEEE (2008).
""" |
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.ranking_factorization_recommender()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
if target is None:
binary_target = True
opts = {'user_id' : user_id,
'item_id' : item_id,
'target' : target,
'random_seed' : random_seed,
'num_factors' : num_factors,
'regularization' : regularization,
'linear_regularization' : linear_regularization,
'ranking_regularization' : ranking_regularization,
'binary_target' : binary_target,
'max_iterations' : max_iterations,
'side_data_factorization' : side_data_factorization,
'num_sampled_negative_examples' : num_sampled_negative_examples,
'solver' : solver,
# Has no effect here.
'sgd_step_size' : sgd_step_size}
if unobserved_rating_value is not None:
opts["unobserved_rating_value"] = unobserved_rating_value
if kwargs:
try:
possible_args = set(_get_default_options()["name"])
except (RuntimeError, KeyError):
possible_args = set()
bad_arguments = set(kwargs.keys()).difference(possible_args)
if bad_arguments:
raise TypeError("Bad Keyword Arguments: " + ', '.join(bad_arguments))
opts.update(kwargs)
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return RankingFactorizationRecommender(model_proxy) |
<SYSTEM_TASK:>
Returns the module holding the conversion functions for a
<END_TASK>
<USER_TASK:>
Description:
def _get_converter_module(sk_obj):
"""
Returns the module holding the conversion functions for a
particular model).
""" |
try:
cv_idx = _converter_lookup[sk_obj.__class__]
except KeyError:
raise ValueError(
"Transformer '%s' not supported; supported transformers are %s."
% (repr(sk_obj),
",".join(k.__name__ for k in _converter_module_list)))
return _converter_module_list[cv_idx] |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def set_post_evaluation_transform(self, value):
r"""
Set the post processing transform applied after the prediction value
from the tree ensemble.
Parameters
----------
value: str
A value denoting the transform applied. Possible values are:
- "NoTransform" (default). Do not apply a transform.
- "Classification_SoftMax".
Apply a softmax function to the outcome to produce normalized,
non-negative scores that sum to 1. The transformation applied to
dimension `i` is equivalent to:
.. math::
\frac{e^{x_i}}{\sum_j e^{x_j}}
Note: This is the output transformation applied by the XGBoost package
with multiclass classification.
- "Regression_Logistic".
Applies a logistic transform the predicted value, specifically:
.. math::
(1 + e^{-v})^{-1}
This is the transformation used in binary classification.
""" |
self.tree_spec.postEvaluationTransform = \
_TreeEnsemble_pb2.TreeEnsemblePostEvaluationTransform.Value(value) |
<SYSTEM_TASK:>
Add a branch node to the tree ensemble.
<END_TASK>
<USER_TASK:>
Description:
def add_branch_node(self, tree_id, node_id, feature_index, feature_value,
branch_mode, true_child_id, false_child_id, relative_hit_rate = None,
missing_value_tracks_true_child = False):
"""
Add a branch node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
feature_index: int
Index of the feature in the input being split on.
feature_value: double or int
The value used in the feature comparison determining the traversal
direction from this node.
branch_mode: str
Branch mode of the node, specifying the condition under which the node
referenced by `true_child_id` is called next.
Must be one of the following:
- `"BranchOnValueLessThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] <= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueLessThan"`. Traverse to node `true_child_id`
if `input[feature_index] < feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThanEqual"`. Traverse to node `true_child_id`
if `input[feature_index] >= feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueGreaterThan"`. Traverse to node `true_child_id`
if `input[feature_index] > feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueEqual"`. Traverse to node `true_child_id`
if `input[feature_index] == feature_value`, and `false_child_id`
otherwise.
- `"BranchOnValueNotEqual"`. Traverse to node `true_child_id`
if `input[feature_index] != feature_value`, and `false_child_id`
otherwise.
true_child_id: int
ID of the child under the true condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
false_child_id: int
ID of the child under the false condition of the split. An error will
be raised at model validation if this does not match the `node_id`
of a node instantiated by `add_branch_node` or `add_leaf_node` within
this `tree_id`.
relative_hit_rate: float [optional]
When the model is converted compiled by CoreML, this gives hints to
Core ML about which node is more likely to be hit on evaluation,
allowing for additional optimizations. The values can be on any scale,
with the values between child nodes being compared relative to each
other.
missing_value_tracks_true_child: bool [optional]
If the training data contains NaN values or missing values, then this
flag determines which direction a NaN value traverses.
""" |
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.branchFeatureIndex = feature_index
spec_node.branchFeatureValue = feature_value
spec_node.trueChildNodeId = true_child_id
spec_node.falseChildNodeId = false_child_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value(branch_mode)
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
spec_node.missingValueTracksTrueChild = missing_value_tracks_true_child |
<SYSTEM_TASK:>
Add a leaf node to the tree ensemble.
<END_TASK>
<USER_TASK:>
Description:
def add_leaf_node(self, tree_id, node_id, values, relative_hit_rate = None):
"""
Add a leaf node to the tree ensemble.
Parameters
----------
tree_id: int
ID of the tree to add the node to.
node_id: int
ID of the node within the tree.
values: [float | int | list | dict]
Value(s) at the leaf node to add to the prediction when this node is
activated. If the prediction dimension of the tree is 1, then the
value is specified as a float or integer value.
For multidimensional predictions, the values can be a list of numbers
with length matching the dimension of the predictions or a dictionary
mapping index to value added to that dimension.
Note that the dimension of any tree must match the dimension given
when :py:meth:`set_default_prediction_value` is called.
""" |
spec_node = self.tree_parameters.nodes.add()
spec_node.treeId = tree_id
spec_node.nodeId = node_id
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode')
if not isinstance(values, _collections.Iterable):
values = [values]
if relative_hit_rate is not None:
spec_node.relativeHitRate = relative_hit_rate
if type(values) == dict:
iter = values.items()
else:
iter = enumerate(values)
for index, value in iter:
ev_info = spec_node.evaluationInfo.add()
ev_info.evaluationIndex = index
ev_info.evaluationValue = float(value)
spec_node.nodeBehavior = \
_TreeEnsemble_pb2.TreeEnsembleParameters.TreeNode.TreeNodeBehavior.Value('LeafNode') |
<SYSTEM_TASK:>
Creates a new 'PropertySet' instance for the given raw properties,
<END_TASK>
<USER_TASK:>
Description:
def create (raw_properties = []):
""" Creates a new 'PropertySet' instance for the given raw properties,
or returns an already existing one.
""" |
assert (is_iterable_typed(raw_properties, property.Property)
or is_iterable_typed(raw_properties, basestring))
# FIXME: propagate to callers.
if len(raw_properties) > 0 and isinstance(raw_properties[0], property.Property):
x = raw_properties
else:
x = [property.create_from_string(ps) for ps in raw_properties]
# These two lines of code are optimized to the current state
# of the Property class. Since this function acts as the caching
# frontend to the PropertySet class modifying these two lines
# could have a severe performance penalty. Be careful.
# It would be faster to sort by p.id, but some projects may rely
# on the fact that the properties are ordered alphabetically. So,
# we maintain alphabetical sorting so as to maintain backward compatibility.
x = sorted(set(x), key=lambda p: (p.feature.name, p.value, p.condition))
key = tuple(p.id for p in x)
if key not in __cache:
__cache [key] = PropertySet(x)
return __cache [key] |
<SYSTEM_TASK:>
Creates new 'PropertySet' instances after checking
<END_TASK>
<USER_TASK:>
Description:
def create_with_validation (raw_properties):
""" Creates new 'PropertySet' instances after checking
that all properties are valid and converting implicit
properties into gristed form.
""" |
assert is_iterable_typed(raw_properties, basestring)
properties = [property.create_from_string(s) for s in raw_properties]
property.validate(properties)
return create(properties) |
<SYSTEM_TASK:>
Creates a property-set from the input given by the user, in the
<END_TASK>
<USER_TASK:>
Description:
def create_from_user_input(raw_properties, jamfile_module, location):
"""Creates a property-set from the input given by the user, in the
context of 'jamfile-module' at 'location'""" |
assert is_iterable_typed(raw_properties, basestring)
assert isinstance(jamfile_module, basestring)
assert isinstance(location, basestring)
properties = property.create_from_strings(raw_properties, True)
properties = property.translate_paths(properties, location)
properties = property.translate_indirect(properties, jamfile_module)
project_id = get_manager().projects().attributeDefault(jamfile_module, 'id', None)
if not project_id:
project_id = os.path.abspath(location)
properties = property.translate_dependencies(properties, project_id, location)
properties = property.expand_subfeatures_in_conditions(properties)
return create(properties) |
<SYSTEM_TASK:>
Returns properties that are neither incidental nor free.
<END_TASK>
<USER_TASK:>
Description:
def base (self):
""" Returns properties that are neither incidental nor free.
""" |
result = [p for p in self.lazy_properties
if not(p.feature.incidental or p.feature.free)]
result.extend(self.base_)
return result |
<SYSTEM_TASK:>
Returns free properties which are not dependency properties.
<END_TASK>
<USER_TASK:>
Description:
def free (self):
""" Returns free properties which are not dependency properties.
""" |
result = [p for p in self.lazy_properties
if not p.feature.incidental and p.feature.free]
result.extend(self.free_)
return result |
<SYSTEM_TASK:>
Returns dependency properties.
<END_TASK>
<USER_TASK:>
Description:
def dependency (self):
""" Returns dependency properties.
""" |
result = [p for p in self.lazy_properties if p.feature.dependency]
result.extend(self.dependency_)
return self.dependency_ |
<SYSTEM_TASK:>
Returns properties that are not dependencies.
<END_TASK>
<USER_TASK:>
Description:
def non_dependency (self):
""" Returns properties that are not dependencies.
""" |
result = [p for p in self.lazy_properties if not p.feature.dependency]
result.extend(self.non_dependency_)
return result |
<SYSTEM_TASK:>
Refines this set's properties using the requirements passed as an argument.
<END_TASK>
<USER_TASK:>
Description:
def refine (self, requirements):
""" Refines this set's properties using the requirements passed as an argument.
""" |
assert isinstance(requirements, PropertySet)
if requirements not in self.refined_:
r = property.refine(self.all_, requirements.all_)
self.refined_[requirements] = create(r)
return self.refined_[requirements] |
<SYSTEM_TASK:>
Computes the target path that should be used for
<END_TASK>
<USER_TASK:>
Description:
def target_path (self):
""" Computes the target path that should be used for
target with these properties.
Returns a tuple of
- the computed path
- if the path is relative to build directory, a value of
'true'.
""" |
if not self.target_path_:
# The <location> feature can be used to explicitly
# change the location of generated targets
l = self.get ('<location>')
if l:
computed = l[0]
is_relative = False
else:
p = self.as_path()
if hash_maybe:
p = hash_maybe(p)
# Really, an ugly hack. Boost regression test system requires
# specific target paths, and it seems that changing it to handle
# other directory layout is really hard. For that reason,
# we teach V2 to do the things regression system requires.
# The value o '<location-prefix>' is predended to the path.
prefix = self.get ('<location-prefix>')
if prefix:
if len (prefix) > 1:
raise AlreadyDefined ("Two <location-prefix> properties specified: '%s'" % prefix)
computed = os.path.join(prefix[0], p)
else:
computed = p
if not computed:
computed = "."
is_relative = True
self.target_path_ = (computed, is_relative)
return self.target_path_ |
<SYSTEM_TASK:>
Creates a new property set containing the properties in this one,
<END_TASK>
<USER_TASK:>
Description:
def add (self, ps):
""" Creates a new property set containing the properties in this one,
plus the ones of the property set passed as argument.
""" |
assert isinstance(ps, PropertySet)
if ps not in self.added_:
self.added_[ps] = create(self.all_ + ps.all())
return self.added_[ps] |
<SYSTEM_TASK:>
Returns all contained properties associated with 'feature
<END_TASK>
<USER_TASK:>
Description:
def get_properties(self, feature):
"""Returns all contained properties associated with 'feature'""" |
if not isinstance(feature, b2.build.feature.Feature):
feature = b2.build.feature.get(feature)
assert isinstance(feature, b2.build.feature.Feature)
result = []
for p in self.all_:
if p.feature == feature:
result.append(p)
return result |
<SYSTEM_TASK:>
A unified interface for training recommender models. Based on simple
<END_TASK>
<USER_TASK:>
Description:
def _create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
ranking=True,
verbose=True):
"""
A unified interface for training recommender models. Based on simple
characteristics of the data, a type of model is selected and trained. The
trained model can be used to predict ratings and make recommendations.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
Name of the column in `observation_data` containing ratings given by
users to items, if applicable.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with the
same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with the
same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
ranking : bool, optional
Determine whether or not the goal is to rank items for each user.
verbose : bool, optional
Enables verbose output.
Returns
-------
out : A trained model.
- If a target column is given, then
:class:`turicreate.recommender.factorization_recommender.FactorizationRecommender`.
- If no target column is given, then
:class:`turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender`.
Examples
--------
**Basic usage**
Given basic user-item observation data, an
:class:`~turicreate.recommender.item_similarity_recommender.ItemSimilarityRecommender` is created:
>>> sf = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd']})
>>> m = turicreate.recommender.create(sf)
>>> recs = m.recommend()
**Creating a model for ratings data**
This trains a :class:`~turicreate.recommender.factorization_recommender.FactorizationRecommender` that
can predict target ratings:
>>> sf2 = turicreate.SFrame({'user_id': ['0', '0', '0', '1', '1', '2', '2', '2'],
... 'item_id': ['a', 'b', 'c', 'a', 'b', 'b', 'c', 'd'],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m2 = turicreate.recommender.create(sf2, target="rating", ranking = False)
**Creating specific models**
Specific models allow for a number of additional options during create.
The available recommenders are all in the turicreate.recommender namespace.
For the complete list of acceptable options, please refer to the documentation
for individual models. Such options can be passed to the underlying model
just like any other parameter. For example, the following code creates
an :class:`~turicreate.recommender.ItemSimilarityRecommender` with a space-saving
option called `only_top_k`. The returned model stores only the 2 most
similar items for item:
>>> from turicreate.recommender import item_similarity_recommender
>>> item_similarity_recommender.create(sf, only_top_k=2)
""" |
if not (isinstance(observation_data, _SFrame)):
raise TypeError('observation_data input must be a SFrame')
side_data = (user_data is not None) or (item_data is not None)
if user_data is not None:
if not isinstance(user_data, _SFrame):
raise TypeError('Provided user_data must be an SFrame.')
if item_data is not None:
if not isinstance(item_data, _SFrame):
raise TypeError('Provided item_data must be an SFrame.')
if target is None:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'item_similarity'
else:
if ranking:
if side_data:
method = 'ranking_factorization_recommender'
else:
method = 'ranking_factorization_recommender'
else:
if side_data:
method = 'factorization_recommender'
else:
method = 'factorization_recommender'
opts = {'observation_data': observation_data,
'user_id': user_id,
'item_id': item_id,
'target': target,
'user_data': user_data,
'item_data': item_data}
if method == "item_similarity":
return _turicreate.recommender.item_similarity_recommender.create(**opts)
elif method == "factorization_recommender":
return _turicreate.recommender.factorization_recommender.create(**opts)
elif method == "ranking_factorization_recommender":
return _turicreate.recommender.ranking_factorization_recommender.create(**opts)
else:
raise RuntimeError("Provided method not recognized.") |
<SYSTEM_TASK:>
Compare the prediction or recommendation performance of recommender models
<END_TASK>
<USER_TASK:>
Description:
def compare_models(dataset, models, model_names=None, user_sample=1.0,
metric='auto',
target=None,
exclude_known_for_precision_recall=True,
make_plot=False,
verbose=True,
**kwargs):
"""
Compare the prediction or recommendation performance of recommender models
on a common test dataset.
Models that are trained to predict ratings are compared separately from
models that are trained without target ratings. The ratings prediction
models are compared on root-mean-squared error, and the rest are compared on
precision-recall.
Parameters
----------
dataset : SFrame
The dataset to use for model evaluation.
models : list[recommender models]
List of trained recommender models.
model_names : list[str], optional
List of model name strings for display.
user_sample : float, optional
Sampling proportion of unique users to use in estimating model
performance. Defaults to 1.0, i.e. use all users in the dataset.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric for the evaluation. The default automatically splits
models into two groups with their default evaluation metric respectively:
'rmse' for models trained with a target, and 'precision_recall'
otherwise.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same column.
If the model is trained without a target column and `metric='rmse'`,
then this option must be provided by user.
exclude_known_for_precision_recall : bool, optional
A useful option when `metric='precision_recall'`. Recommender models
automatically exclude items seen in the training data from the
final recommendation list. If the input evaluation `dataset` is the
same as the data used for training the models, set this option to False.
verbose : bool, optional
If true, print the progress.
Returns
-------
out : list[SFrame]
A list of results where each one is an sframe of evaluation results of
the respective model on the given dataset
Examples
--------
If you have created two ItemSimilarityRecommenders ``m1`` and ``m2`` and have
an :class:`~turicreate.SFrame` ``test_data``, then you may compare the
performance of the two models on test data using:
>>> import turicreate
>>> train_data = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"]})
>>> test_data = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"]})
>>> m1 = turicreate.item_similarity_recommender.create(train_data)
>>> m2 = turicreate.item_similarity_recommender.create(train_data, only_top_k=1)
>>> turicreate.recommender.util.compare_models(test_data, [m1, m2], model_names=["m1", "m2"])
The evaluation metric is automatically set to 'precision_recall', and the
evaluation will be based on recommendations that exclude items seen in the
training data.
If you want to evaluate on the original training set:
>>> turicreate.recommender.util.compare_models(train_data, [m1, m2],
... exclude_known_for_precision_recall=False)
Suppose you have four models, two trained with a target rating column, and
the other two trained without a target. By default, the models are put into
two different groups with "rmse", and "precision-recall" as the evaluation
metric respectively.
>>> train_data2 = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "c", "e", "b", "f", "b", "c", "d"],
... 'rating': [1, 3, 4, 5, 3, 4, 2, 5]})
>>> test_data2 = turicreate.SFrame({'user_id': ["0", "0", "1", "1", "1", "2", "2"],
... 'item_id': ["b", "d", "a", "c", "e", "a", "e"],
... 'rating': [3, 5, 4, 4, 3, 5, 2]})
>>> m3 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> m4 = turicreate.factorization_recommender.create(train_data2, target='rating')
>>> turicreate.recommender.util.compare_models(test_data2, [m3, m4])
To compare all four models using the same 'precision_recall' metric, you can
do:
>>> turicreate.recommender.util.compare_models(test_data2, [m1, m2, m3, m4],
... metric='precision_recall')
""" |
num_models = len(models)
if model_names is None:
model_names = ['M' + str(i) for i in range(len(models))]
if num_models < 1:
raise ValueError("Must pass in at least one recommender model to \
evaluate")
if model_names is not None and len(model_names) != num_models:
raise ValueError("Must pass in the same number of model names as \
models")
# if we are asked to sample the users, come up with a list of unique users
if user_sample < 1.0:
user_id_name = models[0].user_id
if user_id_name is None:
raise ValueError("user_id not set in model(s)")
user_sa = dataset[user_id_name]
unique_users = list(user_sa.unique())
nusers = len(unique_users)
ntake = int(round(user_sample * nusers))
_random.shuffle(unique_users)
users = unique_users[:ntake]
print("compare_models: using", ntake, "users to estimate model performance")
users = frozenset(users)
ix = [u in users for u in dataset[user_id_name]]
dataset_subset = dataset[_SArray(ix) == True]
else:
dataset_subset = dataset
results = []
for (m, mname) in zip(models, model_names):
if verbose:
print('PROGRESS: Evaluate model %s' % mname)
r = m.evaluate(dataset_subset,
metric,
exclude_known_for_precision_recall,
target,
verbose=verbose,
cutoffs=list(range(1,11,1))+list(range(11,50,5)),
**kwargs)
results.append(r)
return results |
<SYSTEM_TASK:>
Compute precision and recall at a given cutoff for each user. In information
<END_TASK>
<USER_TASK:>
Description:
def precision_recall_by_user(observed_user_items,
recommendations,
cutoffs=[10]):
"""
Compute precision and recall at a given cutoff for each user. In information
retrieval terms, precision represents the ratio of relevant, retrieved items
to the number of relevant items. Recall represents the ratio of relevant,
retrieved items to the number of relevant items.
Let :math:`p_k` be a vector of the first :math:`k` elements in the
recommendations for a particular user, and let :math:`a` be the set of items
in ``observed_user_items`` for that user. The "precision at cutoff k" for
this user is defined as
.. math::
P(k) = \\frac{ | a \cap p_k | }{k},
while "recall at cutoff k" is defined as
.. math::
R(k) = \\frac{ | a \cap p_k | }{|a|}
The order of the elements in the recommendations affects the returned
precision and recall scores.
Parameters
----------
observed_user_items : SFrame
An SFrame containing observed user item pairs, where the first
column contains user ids and the second column contains item ids.
recommendations : SFrame
An SFrame containing columns pertaining to the user id, the item id,
the score given to that pair, and the rank of that item among the
recommendations made for user id. For example, see the output of
recommend() produced by any turicreate.recommender model.
cutoffs : list[int], optional
The cutoffs to use when computing precision and recall.
Returns
-------
out : SFrame
An SFrame containing columns user id, cutoff, precision, recall, and
count where the precision and recall are reported for each user at
each requested cutoff, and count is the number of observations for
that user id.
Notes
-----
The corner cases that involve empty lists were chosen to be consistent
with the feasible set of precision-recall curves, which start at
(precision, recall) = (1,0) and end at (0,1). However, we do not believe
there is a well-known consensus on this choice.
Examples
--------
Given SFrames ``train_data`` and ``test_data`` with columns user_id
and item_id:
>>> from turicreate.toolkits.recommender.util import precision_recall_by_user
>>> m = turicreate.recommender.create(train_data)
>>> recs = m.recommend()
>>> precision_recall_by_user(test_data, recs, cutoffs=[5, 10])
""" |
assert type(observed_user_items) == _SFrame
assert type(recommendations) == _SFrame
assert type(cutoffs) == list
assert min(cutoffs) > 0, "All cutoffs must be positive integers."
assert recommendations.num_columns() >= 2
user_id = recommendations.column_names()[0]
item_id = recommendations.column_names()[1]
assert observed_user_items.num_rows() > 0, \
"Evaluating precision and recall requires a non-empty " + \
"observed_user_items."
assert user_id in observed_user_items.column_names(), \
"User column required in observed_user_items."
assert item_id in observed_user_items.column_names(), \
"Item column required in observed_user_items."
assert observed_user_items[user_id].dtype == \
recommendations[user_id].dtype, \
"The user column in the two provided SFrames must have the same type."
assert observed_user_items[item_id].dtype == \
recommendations[item_id].dtype, \
"The user column in the two provided SFrames must have the same type."
cutoffs = _array.array('f', cutoffs)
opts = {'data': observed_user_items,
'recommendations': recommendations,
'cutoffs': cutoffs}
response = _turicreate.toolkits._main.run('evaluation_precision_recall_by_user', opts)
sf = _SFrame(None, _proxy=response['pr'])
return sf.sort([user_id, 'cutoff']) |
<SYSTEM_TASK:>
Create a recommender-friendly train-test split of the provided data set.
<END_TASK>
<USER_TASK:>
Description:
def random_split_by_user(dataset,
user_id='user_id',
item_id='item_id',
max_num_users=1000,
item_test_proportion=.2,
random_seed=0):
"""Create a recommender-friendly train-test split of the provided data set.
The test dataset is generated by first choosing `max_num_users` out of the
total number of users in `dataset`. Then, for each of the chosen test users,
a portion of the user's items (determined by `item_test_proportion`) is
randomly chosen to be included in the test set. This split allows the
training data to retain enough information about the users in the testset,
so that adequate recommendations can be made. The total number of users
in the test set may be fewer than `max_num_users` if a user was chosen for
the test set but none of their items are selected.
Parameters
----------
dataset : SFrame
An SFrame containing (user, item) pairs.
user_id : str, optional
The name of the column in ``dataset`` that contains user ids.
item_id : str, optional
The name of the column in ``dataset`` that contains item ids.
max_num_users : int, optional
The maximum number of users to use to construct the test set. If
set to 'None', then use all available users.
item_test_proportion : float, optional
The desired probability that a test user's item will be chosen
for the test set.
random_seed : int, optional The random seed to use for
randomization. If None, then the random seed is different
every time; if numeric, then subsequent calls with the same
dataset and random seed with have the same split.
Returns
-------
train, test : SFrame
A tuple with two datasets to be used for training and testing.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf, max_num_users=100)
""" |
assert user_id in dataset.column_names(), \
'Provided user column "{0}" not found in data set.'.format(user_id)
assert item_id in dataset.column_names(), \
'Provided item column "{0}" not found in data set.'.format(item_id)
if max_num_users == 'all':
max_num_users = None
if random_seed is None:
import time
random_seed = int(hash("%20f" % time.time()) % 2**63)
opts = {'dataset': dataset,
'user_id': user_id,
'item_id': item_id,
'max_num_users': max_num_users,
'item_test_proportion': item_test_proportion,
'random_seed': random_seed}
response = _turicreate.extensions._recsys.train_test_split(dataset, user_id, item_id,
max_num_users, item_test_proportion, random_seed)
train = response['train']
test = response['test']
return train, test |
<SYSTEM_TASK:>
Get the current settings of the model. The keys depend on the type of
<END_TASK>
<USER_TASK:>
Description:
def _list_fields(self):
"""
Get the current settings of the model. The keys depend on the type of
model.
Returns
-------
out : list
A list of fields that can be queried using the ``get`` method.
""" |
response = self.__proxy__.list_fields()
return [s for s in response['value'] if not s.startswith("_")] |
<SYSTEM_TASK:>
Set current options for a model.
<END_TASK>
<USER_TASK:>
Description:
def _set_current_options(self, options):
"""
Set current options for a model.
Parameters
----------
options : dict
A dictionary of the desired option settings. The key should be the name
of the option and each value is the desired value of the option.
""" |
opts = self._get_current_options()
opts.update(options)
response = self.__proxy__.set_current_options(opts)
return response |
<SYSTEM_TASK:>
Processes the dataset parameter for type correctness.
<END_TASK>
<USER_TASK:>
Description:
def __prepare_dataset_parameter(self, dataset):
"""
Processes the dataset parameter for type correctness.
Returns it as an SFrame.
""" |
# Translate the dataset argument into the proper type
if not isinstance(dataset, _SFrame):
def raise_dataset_type_exception():
raise TypeError("The dataset parameter must be either an SFrame, "
"or a dictionary of (str : list) or (str : value).")
if type(dataset) is dict:
if not all(type(k) is str for k in _six.iterkeys(dataset)):
raise_dataset_type_exception()
if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)):
dataset = _SFrame(dataset)
else:
dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)})
else:
raise_dataset_type_exception()
return dataset |
<SYSTEM_TASK:>
Return a score prediction for the user ids and item ids in the provided
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset,
new_observation_data=None, new_user_data=None, new_item_data=None):
"""
Return a score prediction for the user ids and item ids in the provided
data set.
Parameters
----------
dataset : SFrame
Dataset in the same form used for training.
new_observation_data : SFrame, optional
``new_observation_data`` gives additional observation data
to the model, which may be used by the models to improve
score accuracy. Must be in the same format as the
observation data passed to ``create``. How this data is
used varies by model.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
Returns
-------
out : SArray
An SArray with predicted scores for each given observation
predicted by the model.
See Also
--------
recommend, evaluate
""" |
if new_observation_data is None:
new_observation_data = _SFrame()
if new_user_data is None:
new_user_data = _SFrame()
if new_item_data is None:
new_item_data = _SFrame()
dataset = self.__prepare_dataset_parameter(dataset)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types))
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"])
check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"])
check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"])
response = self.__proxy__.predict(dataset, new_user_data, new_item_data)
return response['prediction'] |
<SYSTEM_TASK:>
Get the k most similar items for each item in items.
<END_TASK>
<USER_TASK:>
Description:
def get_similar_items(self, items=None, k=10, verbose=False):
"""
Get the k most similar items for each item in items.
Each type of recommender has its own model for the similarity
between items. For example, the item_similarity_recommender will
return the most similar items according to the user-chosen
similarity; the factorization_recommender will return the
nearest items based on the cosine similarity between latent item
factors.
Parameters
----------
items : SArray or list; optional
An :class:`~turicreate.SArray` or list of item ids for which to get
similar items. If 'None', then return the `k` most similar items for
all items in the training set.
k : int, optional
The number of similar items for each item.
verbose : bool, optional
Progress printing is shown.
Returns
-------
out : SFrame
A SFrame with the top ranked similar items for each item. The
columns `item`, 'similar', 'score' and 'rank', where
`item` matches the item column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that item. The value of the score depends on the method
used for computing item similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.item_similarity_recommender.create(sf)
>>> nn = m.get_similar_items()
""" |
if items is None:
get_all_items = True
items = _SArray()
else:
get_all_items = False
if isinstance(items, list):
items = _SArray(items)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(items, "items", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
return self.__proxy__.get_similar_items(items, k, verbose, get_all_items) |
<SYSTEM_TASK:>
Get the k most similar users for each entry in `users`.
<END_TASK>
<USER_TASK:>
Description:
def get_similar_users(self, users=None, k=10):
"""Get the k most similar users for each entry in `users`.
Each type of recommender has its own model for the similarity
between users. For example, the factorization_recommender will
return the nearest users based on the cosine similarity
between latent user factors. (This method is not currently
available for item_similarity models.)
Parameters
----------
users : SArray or list; optional
An :class:`~turicreate.SArray` or list of user ids for which to get
similar users. If 'None', then return the `k` most similar users for
all users in the training set.
k : int, optional
The number of neighbors to return for each user.
Returns
-------
out : SFrame
A SFrame with the top ranked similar users for each user. The
columns `user`, 'similar', 'score' and 'rank', where
`user` matches the user column name specified at training time.
The 'rank' is between 1 and `k` and 'score' gives the similarity
score of that user. The value of the score depends on the method
used for computing user similarities.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"]})
>>> m = turicreate.factorization_recommender.create(sf)
>>> nn = m.get_similar_users()
""" |
if users is None:
get_all_users = True
users = _SArray()
else:
get_all_users = False
if isinstance(users, list):
users = _SArray(users)
def check_type(arg, arg_name, required_type, allowed_types):
if not isinstance(arg, required_type):
raise TypeError("Parameter " + arg_name + " must be of type(s) "
+ (", ".join(allowed_types) )
+ "; Type '" + str(type(arg)) + "' not recognized.")
check_type(users, "users", _SArray, ["SArray", "list"])
check_type(k, "k", int, ["int"])
opt = {'model': self.__proxy__,
'users': users,
'get_all_users' : get_all_users,
'k': k}
response = self.__proxy__.get_similar_users(users, k, get_all_users)
return response |
<SYSTEM_TASK:>
Recommend the ``k`` highest scored items based on the
<END_TASK>
<USER_TASK:>
Description:
def recommend_from_interactions(
self, observed_items, k=10, exclude=None, items=None,
new_user_data=None, new_item_data=None,
exclude_known=True, diversity=0, random_seed=None,
verbose=True):
"""
Recommend the ``k`` highest scored items based on the
interactions given in `observed_items.`
Parameters
----------
observed_items : SArray, SFrame, or list
A list/SArray of items to use to make recommendations, or
an SFrame of items and optionally ratings and/or other
interaction data. The model will then recommend the most
similar items to those given. If ``observed_items`` has a user
column, then it must be only one user, and the additional
interaction data stored in the model is also used to make
recommendations.
k : int, optional
The number of recommendations to generate.
items : SArray, SFrame, or list, optional
Restricts the items from which recommendations can be
made. ``items`` must be an SArray, list, or SFrame with a
single column containing items, and all recommendations
will be made from this pool of items. This can be used,
for example, to restrict the recommendations to items
within a particular category or genre. By default,
recommendations are made from all items present when the
model was trained.
new_user_data : SFrame, optional
``new_user_data`` may give additional user data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the user
data passed to ``create``.
new_item_data : SFrame, optional
``new_item_data`` may give additional item data to the
model. If present, scoring is done with reference to this
new information. If there is any overlap with the side
information present at training time, then this new side
data is preferred. Must be in the same format as the item
data passed to ``create``.
exclude : SFrame, optional
An :class:`~turicreate.SFrame` of items or user / item
pairs. The column names must be equal to the user and
item columns of the main data, and it provides the model
with user/item pairs to exclude from the recommendations.
These user-item-pairs are always excluded from the
predictions, even if exclude_known is False.
exclude_known : bool, optional
By default, all user-item interactions previously seen in
the training data, or in any new data provided using
new_observation_data.., are excluded from the
recommendations. Passing in ``exclude_known = False``
overrides this behavior.
diversity : non-negative float, optional
If given, then the recommend function attempts chooses a set
of `k` items that are both highly scored and different from
other items in that set. It does this by first retrieving
``k*(1+diversity)`` recommended items, then randomly
choosing a diverse set from these items. Suggested values
for diversity are between 1 and 3.
random_seed : int, optional
If diversity is larger than 0, then some randomness is used;
this controls the random seed to use for randomization. If
None, then it will be different each time.
verbose : bool, optional
If True, print the progress of generating recommendation.
Returns
-------
out : SFrame
A SFrame with the top ranked items for each user. The
columns are: ``item_id``, *score*, and *rank*, where
``user_id`` and ``item_id`` match the user and item column
names specified at training time. The rank column is
between 1 and ``k`` and gives the relative score of that
item. The value of score depends on the method used for
recommendations.
observed_items: list, SArray, or SFrame
""" |
column_types = self._get_data_schema()
user_id = self.user_id
item_id = self.item_id
user_type = column_types[user_id]
item_type = column_types[item_id]
if not hasattr(self, "_implicit_user_name"):
import hashlib
import time
self._implicit_user_name = None #("implicit-user-%s"
# % hashlib.md5("%0.20f" % time.time()).hexdigest()[:12])
if isinstance(observed_items, list):
observed_items = _SArray(observed_items, dtype = item_type)
if isinstance(observed_items, _SArray):
observed_items = _SFrame({self.item_id : observed_items})
if not isinstance(observed_items, _SFrame):
raise TypeError("observed_items must be a list or SArray of items, or an SFrame of items "
"and optionally ratings or other interaction information.")
# Don't modify the user's argument (if it's an SFrame).
observed_items = observed_items.copy()
# If a user id is present, then use that as the query user id
# (making sure there is only one present). If not, then use
# the local fake user id.
if user_id in observed_items.column_names():
main_user_value = observed_items[user_id][0]
if (observed_items[user_id] != main_user_value).any():
raise ValueError("To recommend items for more than one user, use `recommend()` and "
"supply new interactions using new_observation_data.")
users = _SArray([main_user_value], dtype = user_type)
else:
users = _SArray([self._implicit_user_name], dtype = user_type)
observed_items[user_id] = self._implicit_user_name
if observed_items[user_id].dtype != user_type:
observed_items[user_id] = observed_items[user_id].astype(user_type)
# Check the rest of the arguments.
if exclude is not None:
if isinstance(exclude, list):
exclude = _SArray(exclude, dtype = item_type)
if isinstance(exclude, _SArray):
exclude = _SFrame({item_id : exclude})
if user_id not in exclude.column_names():
exclude[user_id] = self._implicit_user_name
exclude[user_id] = exclude[user_id].astype(user_type)
recommendations = self.recommend(
users = users,
new_observation_data = observed_items,
k = k,
items = items,
new_user_data = new_user_data,
new_item_data = new_item_data,
exclude_known = exclude_known,
diversity = diversity,
random_seed = random_seed,
verbose = verbose)
del recommendations[user_id]
return recommendations |
<SYSTEM_TASK:>
Compute a model's precision and recall scores for a particular dataset.
<END_TASK>
<USER_TASK:>
Description:
def evaluate_precision_recall(self, dataset, cutoffs=list(range(1,11,1))+list(range(11,50,5)),
skip_set=None, exclude_known=True,
verbose=True, **kwargs):
"""
Compute a model's precision and recall scores for a particular dataset.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
This will be compared to the model's recommendations, which exclude
the (user, item) pairs seen at training time.
cutoffs : list, optional
A list of cutoff values for which one wants to evaluate precision
and recall, i.e. the value of k in "precision at k".
skip_set : SFrame, optional
Passed to :meth:`recommend` as ``exclude``.
exclude_known : bool, optional
Passed to :meth:`recommend` as ``exclude_known``. If True, exclude
training item from recommendation.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
Additional keyword arguments are passed to the recommend
function, whose returned recommendations are used for evaluating
precision and recall of the model.
Returns
-------
out : dict
Contains the precision and recall at each cutoff value and each
user in ``dataset``.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train)
>>> m.evaluate_precision_recall(test)
See Also
--------
turicreate.recommender.util.precision_recall_by_user
""" |
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
dataset = self.__prepare_dataset_parameter(dataset)
users = dataset[self.user_id].unique()
dataset = dataset[[self.user_id, self.item_id]]
recs = self.recommend(users=users, k=max(cutoffs), exclude=skip_set,
exclude_known=exclude_known,
verbose=verbose,
**kwargs)
precision_recall_by_user = self.__proxy__.precision_recall_by_user(dataset, recs, cutoffs)
ret = {'precision_recall_by_user': precision_recall_by_user}
pr_agg = precision_recall_by_user.groupby(
'cutoff',
operations={'precision' : _Aggregate.MEAN('precision'),
'recall' : _Aggregate.MEAN('recall')})
pr_agg = pr_agg[['cutoff', 'precision', 'recall']]
ret["precision_recall_overall"] = pr_agg.sort("cutoff")
return ret |
<SYSTEM_TASK:>
Evaluate the prediction error for each user-item pair in the given data
<END_TASK>
<USER_TASK:>
Description:
def evaluate_rmse(self, dataset, target):
"""
Evaluate the prediction error for each user-item pair in the given data
set.
Parameters
----------
dataset : SFrame
An SFrame in the same format as the one used during training.
target : str
The name of the target rating column in `dataset`.
Returns
-------
out : dict
A dictionary with three items: 'rmse_by_user' and 'rmse_by_item',
which are SFrames containing the average rmse for each user and
item, respectively; and 'rmse_overall', which is a float.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> m.evaluate_rmse(test, target='target')
See Also
--------
turicreate.evaluation.rmse
""" |
assert target in dataset.column_names(), \
'Provided dataset must contain a target column with the same \
name as the target used during training.'
y = dataset[target]
yhat = self.predict(dataset)
user_column = self.user_id
item_column = self.item_id
assert user_column in dataset.column_names() and \
item_column in dataset.column_names(), \
'Provided data set must have a column pertaining to user ids and \
item ids, similar to what we had during training.'
result = dataset[[user_column, item_column]]
result['sq_error'] = (y - yhat) * (y - yhat)
rmse_by_user = result.groupby(user_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_user['rmse'] = rmse_by_user['rmse'].apply(lambda x: x**.5)
rmse_by_item = result.groupby(item_column,
{'rmse':_turicreate.aggregate.AVG('sq_error'),
'count':_turicreate.aggregate.COUNT})
rmse_by_item['rmse'] = rmse_by_item['rmse'].apply(lambda x: x**.5)
overall_rmse = result['sq_error'].mean() ** .5
return {'rmse_by_user': rmse_by_user,
'rmse_by_item': rmse_by_item,
'rmse_overall': overall_rmse} |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, dataset, metric='auto',
exclude_known_for_precision_recall=True,
target=None,
verbose=True, **kwargs):
r"""
Evaluate the model's ability to make rating predictions or
recommendations.
If the model is trained to predict a particular target, the
default metric used for model comparison is root-mean-squared error
(RMSE). Suppose :math:`y` and :math:`\widehat{y}` are vectors of length
:math:`N`, where :math:`y` contains the actual ratings and
:math:`\widehat{y}` the predicted ratings. Then the RMSE is defined as
.. math::
RMSE = \sqrt{\frac{1}{N} \sum_{i=1}^N (\widehat{y}_i - y_i)^2} .
If the model was not trained on a target column, the default metrics for
model comparison are precision and recall. Let
:math:`p_k` be a vector of the :math:`k` highest ranked recommendations
for a particular user, and let :math:`a` be the set of items for that
user in the groundtruth `dataset`. The "precision at cutoff k" is
defined as
.. math:: P(k) = \frac{ | a \cap p_k | }{k}
while "recall at cutoff k" is defined as
.. math:: R(k) = \frac{ | a \cap p_k | }{|a|}
Parameters
----------
dataset : SFrame
An SFrame that is in the same format as provided for training.
metric : str, {'auto', 'rmse', 'precision_recall'}, optional
Metric to use for evaluation. The default automatically chooses
'rmse' for models trained with a `target`, and 'precision_recall'
otherwise.
exclude_known_for_precision_recall : bool, optional
A useful option for evaluating precision-recall. Recommender models
have the option to exclude items seen in the training data from the
final recommendation list. Set this option to True when evaluating
on test data, and False when evaluating precision-recall on training
data.
target : str, optional
The name of the target column for evaluating rmse. If the model is
trained with a target column, the default is to using the same
column. If the model is trained without a target column and `metric`
is set to 'rmse', this option must provided by user.
verbose : bool, optional
Enables verbose output. Default is verbose.
**kwargs
When `metric` is set to 'precision_recall', these parameters
are passed on to :meth:`evaluate_precision_recall`.
Returns
-------
out : SFrame or dict
Results from the model evaluation procedure. If the model is trained
on a target (i.e. RMSE is the evaluation criterion), a dictionary
with three items is returned: items *rmse_by_user* and
*rmse_by_item* are SFrames with per-user and per-item RMSE, while
*rmse_overall* is the overall RMSE (a float). If the model is
trained without a target (i.e. precision and recall are the
evaluation criteria) an :py:class:`~turicreate.SFrame` is returned
with both of these metrics for each user at several cutoff values.
Examples
--------
>>> import turicreate as tc
>>> sf = tc.SFrame('https://static.turi.com/datasets/audioscrobbler')
>>> train, test = tc.recommender.util.random_split_by_user(sf)
>>> m = tc.recommender.create(train, target='target')
>>> eval = m.evaluate(test)
See Also
--------
evaluate_precision_recall, evaluate_rmse, precision_recall_by_user
""" |
ret = {}
dataset = self.__prepare_dataset_parameter(dataset)
# If the model does not have a target column, compute prec-recall.
if metric in ['precision_recall', 'auto']:
results = self.evaluate_precision_recall(dataset,
exclude_known=exclude_known_for_precision_recall,
verbose=verbose,
**kwargs)
ret.update(results)
if verbose:
print("\nPrecision and recall summary statistics by cutoff")
print(results['precision_recall_by_user'].groupby('cutoff', \
{'mean_precision': _turicreate.aggregate.AVG('precision'),
'mean_recall': _turicreate.aggregate.AVG('recall')}).topk('cutoff', reverse=True))
if metric in ['rmse', 'auto']:
if target is None:
target = self.target
if target is None or target == "":
_logging.warning("Model trained without a target. Skipping RMSE computation.")
else:
results = self.evaluate_rmse(dataset, target)
ret.update(results)
if verbose:
print("\nOverall RMSE:", results['rmse_overall'])
print("\nPer User RMSE (best)")
print(results['rmse_by_user'].topk('rmse', 1, reverse=True))
print("\nPer User RMSE (worst)")
print(results['rmse_by_user'].topk('rmse', 1))
print("\nPer Item RMSE (best)")
print(results['rmse_by_item'].topk('rmse', 1, reverse=True))
print("\nPer Item RMSE (worst)")
print(results['rmse_by_item'].topk('rmse', 1))
if metric not in ['rmse', 'precision_recall', 'auto']:
raise ValueError('Unknown evaluation metric %s, supported metrics are [\"rmse\", \"precision_recall\"]' % metric)
return ret |
<SYSTEM_TASK:>
Returns a new popularity model matching the data set this model was
<END_TASK>
<USER_TASK:>
Description:
def _get_popularity_baseline(self):
"""
Returns a new popularity model matching the data set this model was
trained with. Can be used for comparison purposes.
""" |
response = self.__proxy__.get_popularity_baseline()
from .popularity_recommender import PopularityRecommender
return PopularityRecommender(response) |
<SYSTEM_TASK:>
For a collection of item -> item pairs, returns information about the
<END_TASK>
<USER_TASK:>
Description:
def _get_item_intersection_info(self, item_pairs):
"""
For a collection of item -> item pairs, returns information about the
users in that intersection.
Parameters
----------
item_pairs : 2-column SFrame of two item columns, or a list of
(item_1, item_2) tuples.
Returns
-------
out : SFrame
A SFrame with the two item columns given above, the number of
users that rated each, and a dictionary mapping the user to a
pair of the ratings, with the first rating being the rating of
the first item and the second being the rating of the second item.
If no ratings are provided, these values are always 1.0.
""" |
if type(item_pairs) is list:
if not all(type(t) in [list, tuple] and len(t) == 2 for t in item_pairs):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
item_name = self.item_id
item_pairs = _turicreate.SFrame({item_name + "_1" : [v1 for v1, v2 in item_pairs],
item_name + "_2" : [v2 for v1, v2 in item_pairs]})
if not isinstance(item_pairs, _turicreate.SFrame):
raise TypeError("item_pairs must be 2-column SFrame of two item "
"columns, or a list of (item_1, item_2) tuples. ")
response = self.__proxy__.get_item_intersection_info(item_pairs)
return response |
<SYSTEM_TASK:>
Load a keras model from disk
<END_TASK>
<USER_TASK:>
Description:
def _load_keras_model(model_network_path, model_weight_path, custom_objects=None):
"""Load a keras model from disk
Parameters
----------
model_network_path: str
Path where the model network path is (json file)
model_weight_path: str
Path where the model network weights are (hd5 file)
custom_objects:
A dictionary of layers or other custom classes
or functions used by the model
Returns
-------
model: A keras model
""" |
from keras.models import model_from_json
import json
# Load the model network
json_file = open(model_network_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
if not custom_objects:
custom_objects = {}
# Load the model weights
loaded_model = model_from_json(loaded_model_json, custom_objects=custom_objects)
loaded_model.load_weights(model_weight_path)
return loaded_model |
<SYSTEM_TASK:>
A method for displaying the Plot object
<END_TASK>
<USER_TASK:>
Description:
def show(self):
"""
A method for displaying the Plot object
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Examples
--------
Suppose 'plt' is an Plot Object
We can view it using:
>>> plt.show()
""" |
global _target
display = False
try:
if _target == 'auto' and \
get_ipython().__class__.__name__ == "ZMQInteractiveShell":
self._repr_javascript_()
display = True
except NameError:
pass
finally:
if not display:
if _sys.platform != 'darwin' and _sys.platform != 'linux2' and _sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
# TODO: allow autodetection of light/dark mode.
# Disabled for now, since the GUI side needs some work (ie. background color).
plot_variation = 0x10 # force light mode
self.__proxy__.call_function('show', {'path_to_client': path_to_client, 'variation': plot_variation}) |
<SYSTEM_TASK:>
A method for saving the Plot object in a vega representation
<END_TASK>
<USER_TASK:>
Description:
def save(self, filepath):
"""
A method for saving the Plot object in a vega representation
Parameters
----------
filepath: string
The destination filepath where the plot object must be saved as.
The extension of this filepath determines what format the plot will
be saved as. Currently supported formats are JSON, PNG, and SVG.
Examples
--------
Suppose 'plt' is an Plot Object
We can save it using:
>>> plt.save('vega_spec.json')
We can also save the vega representation of the plot without data:
>>> plt.save('vega_spec.json', False)
We can save the plot as a PNG/SVG using:
>>> plt.save('test.png')
>>> plt.save('test.svg')
""" |
if type(filepath) != str:
raise ValueError("filepath provided is not a string")
if filepath.endswith(".json"):
# save as vega json
spec = self.get_vega(include_data = True)
with open(filepath, 'w') as fp:
_json.dump(spec, fp)
elif filepath.endswith(".png") or filepath.endswith(".svg"):
# save as png/svg, but json first
spec = self.get_vega(include_data = True)
EXTENSION_START_INDEX = -3
extension = filepath[EXTENSION_START_INDEX:]
temp_file_tuple = _mkstemp()
temp_file_path = temp_file_tuple[1]
with open(temp_file_path, 'w') as fp:
_json.dump(spec, fp)
dirname = _os.path.dirname(__file__)
relative_path_to_vg2png_vg2svg = "../vg2" + extension
absolute_path_to_vg2png_vg2svg = _os.path.join(dirname,
relative_path_to_vg2png_vg2svg)
# try node vg2[png|svg] json_filepath out_filepath
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode == _NODE_NOT_FOUND_ERROR_CODE:
# user doesn't have node installed
raise RuntimeError("Node.js not found. Saving as PNG and SVG" +
" requires Node.js, please download and install Node.js " +
"from here and try again: https://nodejs.org/en/download/")
elif exitcode == _CANVAS_PREBUILT_NOT_FOUND_ERROR:
# try to see if canvas-prebuilt is globally installed
# if it is, then link it
# if not, tell the user to install it
(is_installed_exitcode,
is_installed_stdout,
is_installed_stderr) = _run_cmdline(
"npm ls -g -json | grep canvas-prebuilt")
if is_installed_exitcode == _SUCCESS:
# npm link canvas-prebuilt
link_exitcode, link_stdout, link_stderr = _run_cmdline(
"npm link canvas-prebuilt")
if link_exitcode == _PERMISSION_DENIED_ERROR_CODE:
# They don't have permission, tell them.
raise RuntimeError(link_stderr + '\n\n' +
"`npm link canvas-prebuilt` failed, " +
"Permission Denied.")
elif link_exitcode == _SUCCESS:
# canvas-prebuilt link is now successful, so run the
# node vg2[png|svg] json_filepath out_filepath
# command again.
(exitcode, stdout, stderr) = _run_cmdline("node " +
absolute_path_to_vg2png_vg2svg + " "
+ temp_file_path + " " + filepath)
if exitcode != _SUCCESS:
# something else that we have not identified yet
# happened.
raise RuntimeError(stderr)
else:
raise RuntimeError(link_stderr)
else:
raise RuntimeError("canvas-prebuilt not found. " +
"Saving as PNG and SVG requires canvas-prebuilt, " +
"please download and install canvas-prebuilt by " +
"running this command, and try again: " +
"`npm install -g canvas-prebuilt`")
elif exitcode == _SUCCESS:
pass
else:
raise RuntimeError(stderr)
# delete temp file that user didn't ask for
_run_cmdline("rm " + temp_file_path)
else:
raise NotImplementedError("filename must end in" +
" .json, .svg, or .png") |
<SYSTEM_TASK:>
Get the right value from the scikit-tree
<END_TASK>
<USER_TASK:>
Description:
def _get_value(scikit_value, mode = 'regressor', scaling = 1.0, n_classes = 2, tree_index = 0):
""" Get the right value from the scikit-tree
""" |
# Regression
if mode == 'regressor':
return scikit_value[0] * scaling
# Binary classification
if n_classes == 2:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0][1] * scaling / scikit_value[0].sum()
# boosted tree
else:
value = scikit_value[0][0] * scaling
if value == 0.5:
value = value - 1e-7
# Multiclass classification
else:
# Decision tree
if len(scikit_value[0]) != 1:
value = scikit_value[0] / scikit_value[0].sum()
# boosted tree
else:
value = {tree_index: scikit_value[0] * scaling}
return value |
<SYSTEM_TASK:>
Convert a generic tree regressor model to the protobuf spec.
<END_TASK>
<USER_TASK:>
Description:
def convert_tree_ensemble(model, input_features,
output_features = ('predicted_class', float),
mode = 'regressor',
base_prediction = None,
class_labels = None,
post_evaluation_transform = None):
"""
Convert a generic tree regressor model to the protobuf spec.
This currently supports:
* Decision tree regression
* Gradient boosted tree regression
* Random forest regression
* Decision tree classifier.
* Gradient boosted tree classifier.
* Random forest classifier.
----------
Parameters
model: [DecisionTreeRegressor | GradientBoostingRegression | RandomForestRegressor]
A scikit learn tree model.
feature_names : list of strings, optional (default=None)
Names of each of the features.
target: str
Name of the output column.
base_prediction: double
Base prediction value.
mode: str in ['regressor', 'classifier']
Mode of the tree model.
class_labels: list[int]
List of classes
post_evaluation_transform: list[int]
Post evaluation transform
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
""" |
num_dimensions = get_input_dimension(model)
features = process_or_validate_features(input_features, num_dimensions)
n_classes = None
if mode == 'classifier':
n_classes = model.n_classes_
if class_labels is None:
class_labels = range(n_classes)
else:
if len(class_labels) != n_classes:
raise ValueError("Number of classes in model (%d) does not match "
"length of supplied class list (%d)."
% (n_classes, len(class_labels)))
coreml_tree = TreeEnsembleClassifier(input_features, class_labels, output_features)
if post_evaluation_transform is not None:
coreml_tree.set_post_evaluation_transform(post_evaluation_transform)
# Base prediction not provided
if base_prediction is None:
if n_classes == 2:
base_prediction = [0.0]
else:
base_prediction = [0.0 for c in range(n_classes)]
coreml_tree.set_default_prediction_value(base_prediction)
else:
if base_prediction is None:
base_prediction = 0.0
coreml_tree = TreeEnsembleRegressor(input_features, output_features)
coreml_tree.set_default_prediction_value(base_prediction)
# Single tree
if hasattr(model, 'tree_'):
_recurse(coreml_tree, model.tree_, tree_id = 0, node_id = 0,
mode = mode, n_classes = n_classes)
# Multiple trees
elif hasattr(model, 'estimators_'):
is_ensembling_in_separate_trees = False
if type(model.estimators_) != list:
is_ensembling_in_separate_trees = len(model.estimators_.shape) > 0 and model.estimators_.shape[1] > 1
estimators = model.estimators_.flatten()
else:
estimators = model.estimators_
scaling = model.learning_rate if hasattr(model, 'learning_rate') else 1.0 / len(estimators)
for tree_id, base_model in enumerate(estimators):
if is_ensembling_in_separate_trees:
tree_index = tree_id % n_classes
else:
tree_index = 0
_recurse(coreml_tree, base_model.tree_, tree_id, node_id = 0,
scaling = scaling, mode = mode, n_classes = n_classes, tree_index = tree_index)
else:
raise TypeError('Unknown scikit-learn tree model type.')
return coreml_tree.spec |
<SYSTEM_TASK:>
Returns SFrame of style images used for training the model
<END_TASK>
<USER_TASK:>
Description:
def get_styles(self, style=None):
"""
Returns SFrame of style images used for training the model
Parameters
----------
style: int or list, optional
The selected style or list of styles to return. If `None`, all
styles will be returned
See Also
--------
stylize
Examples
--------
>>> model.get_styles()
Columns:
style int
image Image
Rows: 4
Data:
+-------+--------------------------+
| style | image |
+-------+--------------------------+
| 0 | Height: 642 Width: 642 |
| 1 | Height: 642 Width: 642 |
| 2 | Height: 642 Width: 642 |
| 3 | Height: 642 Width: 642 |
+-------+--------------------------+
""" |
style, _ = self._style_input_check(style)
return self.styles.filter_by(style, self._index_column) |
<SYSTEM_TASK:>
Load a libsvm model from a path on disk.
<END_TASK>
<USER_TASK:>
Description:
def load_model(model_path):
"""Load a libsvm model from a path on disk.
This currently supports:
* C-SVC
* NU-SVC
* Epsilon-SVR
* NU-SVR
Parameters
----------
model_path: str
Path on disk where the libsvm model representation is.
Returns
-------
model: libsvm_model
A model of the libsvm format.
""" |
if not(HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
from svmutil import svm_load_model # From libsvm
import os
if (not os.path.exists(model_path)):
raise IOError("Expected a valid file path. %s does not exist" % model_path)
return svm_load_model(model_path) |
<SYSTEM_TASK:>
Annotate an input or output multiArray feature in a Neural Network spec to
<END_TASK>
<USER_TASK:>
Description:
def add_enumerated_multiarray_shapes(spec, feature_name, shapes):
"""
Annotate an input or output multiArray feature in a Neural Network spec to
to accommodate a list of enumerated array shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param shapes: [] | NeuralNetworkMultiArrayShape
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> array_shapes = [flexible_shape_utils.NeuralNetworkMultiArrayShape(3)]
>>> second_shape = flexible_shape_utils.NeuralNetworkMultiArrayShape()
>>> second_shape.set_channel_shape(3)
>>> second_shape.set_height_shape(10)
>>> second_shape.set_width_shape(15)
>>> array_shapes.append(second_shape)
>>> flexible_shape_utils.add_enumerated_multiarray_shapes(spec, feature_name='my_multiarray_featurename', shapes=array_shapes)
:return:
None. The spec object is updated
""" |
if not isinstance(shapes, list):
shapes = [shapes]
for shape in shapes:
if not isinstance(shape, NeuralNetworkMultiArrayShape):
raise Exception(
'Shape ranges should be of type NeuralNetworkMultiArrayShape')
shape._validate_multiarray_shape()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to add enumerated shapes to '
'a non-multiArray feature type')
if feature.type.multiArrayType.WhichOneof(
'ShapeFlexibility') != 'enumeratedShapes':
feature.type.multiArrayType.ClearField('ShapeFlexibility')
eshape_len = len(feature.type.multiArrayType.enumeratedShapes.shapes)
# Add default array shape to list of enumerated shapes if enumerated shapes
# field is currently empty
if eshape_len == 0:
fixed_shape = feature.type.multiArrayType.shape
if len(fixed_shape) == 1:
fs = NeuralNetworkMultiArrayShape(fixed_shape[0])
shapes.append(fs)
elif len(fixed_shape) == 3:
fs = NeuralNetworkMultiArrayShape()
fs.set_channel_shape(fixed_shape[0])
fs.set_height_shape(fixed_shape[1])
fs.set_width_shape(fixed_shape[2])
shapes.append(fs)
else:
raise Exception('Original fixed multiArray shape for {} is invalid'
.format(feature_name))
for shape in shapes:
s = feature.type.multiArrayType.enumeratedShapes.shapes.add()
s.shape.extend(shape.multiarray_shape)
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) |
<SYSTEM_TASK:>
Annotate an input or output image feature in a Neural Network spec to
<END_TASK>
<USER_TASK:>
Description:
def add_enumerated_image_sizes(spec, feature_name, sizes):
"""
Annotate an input or output image feature in a Neural Network spec to
to accommodate a list of enumerated image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the image feature for which to add size information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param sizes: [] | NeuralNetworkImageSize
A single or a list of NeuralNetworkImageSize objects which encode valid
size information for a image feature
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> image_sizes = [flexible_shape_utils.NeuralNetworkImageSize(128, 128)]
>>> image_sizes.append(flexible_shape_utils.NeuralNetworkImageSize(256, 256))
>>> flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='my_multiarray_featurename', sizes=image_sizes)
:return:
None. The spec object is updated
""" |
if not isinstance(sizes, list):
sizes = [sizes]
for size in sizes:
if not isinstance(size, NeuralNetworkImageSize):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSize')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add enumerated sizes to '
'a non-image feature type')
if feature.type.imageType.WhichOneof(
'SizeFlexibility') != 'enumeratedSizes':
feature.type.imageType.ClearField('SizeFlexibility')
esizes_len = len(feature.type.imageType.enumeratedSizes.sizes)
# Add default image size to list of enumerated sizes if enumerated sizes
# field is currently empty
if esizes_len == 0:
fixed_height = feature.type.imageType.height
fixed_width = feature.type.imageType.width
sizes.append(NeuralNetworkImageSize(fixed_height, fixed_width))
for size in sizes:
s = feature.type.imageType.enumeratedSizes.sizes.add()
s.height = size.height
s.width = size.width
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) |
<SYSTEM_TASK:>
Annotate an input or output Image feature in a Neural Network spec to
<END_TASK>
<USER_TASK:>
Description:
def update_image_size_range(spec, feature_name, size_range):
"""
Annotate an input or output Image feature in a Neural Network spec to
to accommodate a range of image sizes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the Image feature for which to add shape information.
If the feature is not found in the input or output descriptions then
an exception is thrown
:param size_range: NeuralNetworkImageSizeRange
A NeuralNetworkImageSizeRange object with the populated image size
range information.
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange()
>>> img_size_ranges.add_height_range(64, 128)
>>> img_size_ranges.add_width_range(128, -1)
>>> flexible_shape_utils.update_image_size_range(spec, feature_name='my_multiarray_featurename', size_range=img_size_ranges)
:return:
None. The spec object is updated
""" |
if not isinstance(size_range, NeuralNetworkImageSizeRange):
raise Exception(
'Shape ranges should be of type NeuralNetworkImageSizeRange')
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'imageType':
raise Exception('Trying to add size ranges for '
'a non-image feature type')
feature.type.imageType.ClearField('SizeFlexibility')
feature.type.imageType.imageSizeRange.heightRange.lowerBound = size_range.get_height_range().lowerBound
feature.type.imageType.imageSizeRange.heightRange.upperBound = size_range.get_height_range().upperBound
feature.type.imageType.imageSizeRange.widthRange.lowerBound = size_range.get_width_range().lowerBound
feature.type.imageType.imageSizeRange.widthRange.upperBound = size_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) |
<SYSTEM_TASK:>
Annotate an input or output MLMultiArray feature in a Neural Network spec
<END_TASK>
<USER_TASK:>
Description:
def update_multiarray_shape_range(spec, feature_name, shape_range):
"""
Annotate an input or output MLMultiArray feature in a Neural Network spec
to accommodate a range of shapes
:param spec: MLModel
The MLModel spec containing the feature
:param feature_name: str
The name of the feature for which to add shape range
information. If the feature is not found in the input or output
descriptions then an exception is thrown
:param shape_range: NeuralNetworkMultiArrayShapeRange
A NeuralNetworkMultiArrayShapeRange object with the populated shape
range information. The shape_range object must either contain only
shape information for channel or channel, height and width. If
the object is invalid then an exception is thrown
Examples
--------
.. sourcecode:: python
>>> import coremltools
>>> from coremltools.models.neural_network import flexible_shape_utils
>>> spec = coremltools.utils.load_spec('mymodel.mlmodel')
>>> shape_range = flexible_shape_utils.NeuralNetworkMultiArrayShapeRange()
>>> shape_range.add_channel_range((1, 3))
>>> shape_range.add_width_range((128, 256))
>>> shape_range.add_height_range((128, 256))
>>> flexible_shape_utils.update_multiarray_shape_range(spec, feature_name='my_multiarray_featurename', shape_range=shape_range)
:return:
None. The spec is updated
""" |
if not isinstance(shape_range, NeuralNetworkMultiArrayShapeRange):
raise Exception('Shape range should be of type MultiArrayShapeRange')
shape_range.validate_array_shape_range()
feature = _get_feature(spec, feature_name)
if feature.type.WhichOneof('Type') != 'multiArrayType':
raise Exception('Trying to update shape range for '
'a non-multiArray feature type')
# Add channel range
feature.type.multiArrayType.ClearField('ShapeFlexibility')
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_channel_range().lowerBound
s.upperBound = shape_range.get_channel_range().upperBound
if shape_range.get_shape_range_dims() > 1:
# Add height range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_height_range().lowerBound
s.upperBound = shape_range.get_height_range().upperBound
# Add width range
s = feature.type.multiArrayType.shapeRange.sizeRanges.add()
s.lowerBound = shape_range.get_width_range().lowerBound
s.upperBound = shape_range.get_width_range().upperBound
# Bump up specification version
spec.specificationVersion = max(_MINIMUM_FLEXIBLE_SHAPES_SPEC_VERSION,
spec.specificationVersion) |
<SYSTEM_TASK:>
For a given model specification, returns a dictionary with a shape range object for each input feature name.
<END_TASK>
<USER_TASK:>
Description:
def get_allowed_shape_ranges(spec):
"""
For a given model specification, returns a dictionary with a shape range object for each input feature name.
""" |
shaper = NeuralNetworkShaper(spec, False)
inputs = _get_input_names(spec)
output = {}
for input in inputs:
output[input] = shaper.shape(input)
return output |
<SYSTEM_TASK:>
Examines a model specification and determines if it can compute results for more than one output shape.
<END_TASK>
<USER_TASK:>
Description:
def can_allow_multiple_input_shapes(spec):
"""
Examines a model specification and determines if it can compute results for more than one output shape.
:param spec: MLModel
The protobuf specification of the model.
:return: Bool
Returns True if the model can allow multiple input shapes, False otherwise.
""" |
# First, check that the model actually has a neural network in it
try:
layers = _get_nn_layers(spec)
except:
raise Exception('Unable to verify that this model contains a neural network.')
try:
shaper = NeuralNetworkShaper(spec, False)
except:
raise Exception('Unable to compute shapes for this neural network.')
inputs = _get_input_names(spec)
for name in inputs:
shape_dict = shaper.shape(name)
shape = NeuralNetworkMultiArrayShapeRange(shape_dict)
if (shape.isFlexible()):
return True
return False |
<SYSTEM_TASK:>
Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value.
<END_TASK>
<USER_TASK:>
Description:
def isFlexible(self):
"""
Returns true if any one of the channel, height, or width ranges of this shape allow more than one input value.
""" |
for key, value in self.arrayShapeRange.items():
if key in _CONSTRAINED_KEYS:
if value.isFlexible:
return True
return False |
<SYSTEM_TASK:>
Generate a macro definition or undefinition
<END_TASK>
<USER_TASK:>
Description:
def define_macro(out_f, (name, args, body), undefine=False, check=True):
"""Generate a macro definition or undefinition""" |
if undefine:
out_f.write(
'#undef {0}\n'
.format(macro_name(name))
)
else:
if args:
arg_list = '({0})'.format(', '.join(args))
else:
arg_list = ''
if check:
out_f.write(
'#ifdef {0}\n'
'# error {0} already defined.\n'
'#endif\n'
.format(macro_name(name))
)
out_f.write(
'#define {0}{1} {2}\n'.format(macro_name(name), arg_list, body)
) |
<SYSTEM_TASK:>
Generates the length limits
<END_TASK>
<USER_TASK:>
Description:
def length_limits(max_length_limit, length_limit_step):
"""Generates the length limits""" |
string_len = len(str(max_length_limit))
return [
str(i).zfill(string_len) for i in
xrange(
length_limit_step,
max_length_limit + length_limit_step - 1,
length_limit_step
)
] |
<SYSTEM_TASK:>
Throws when the path does not exist
<END_TASK>
<USER_TASK:>
Description:
def existing_path(value):
"""Throws when the path does not exist""" |
if os.path.exists(value):
return value
else:
raise argparse.ArgumentTypeError("Path {0} not found".format(value)) |
<SYSTEM_TASK:>
Generate the closing part
<END_TASK>
<USER_TASK:>
Description:
def end(self):
"""Generate the closing part""" |
for depth in xrange(len(self.names) - 1, -1, -1):
self.out_f.write('{0}}}\n'.format(self.prefix(depth))) |
<SYSTEM_TASK:>
A function to load a previously saved SoundClassifier instance.
<END_TASK>
<USER_TASK:>
Description:
def _load_version(cls, state, version):
"""
A function to load a previously saved SoundClassifier instance.
""" |
from ._audio_feature_extractor import _get_feature_extractor
from .._mxnet import _mxnet_utils
state['_feature_extractor'] = _get_feature_extractor(state['feature_extractor_name'])
# Load the custom nerual network
num_classes = state['num_classes']
num_inputs = state['_feature_extractor'].output_length
if 'custom_layer_sizes' in state:
# These are deserialized as floats
custom_layer_sizes = list(map(int, state['custom_layer_sizes']))
else:
# Default value, was not part of state for only Turi Create 5.4
custom_layer_sizes = [100, 100]
state['custom_layer_sizes'] = custom_layer_sizes
net = SoundClassifier._build_custom_neural_network(num_inputs, num_classes, custom_layer_sizes)
net_params = net.collect_params()
ctx = _mxnet_utils.get_mxnet_context()
_mxnet_utils.load_net_params_from_state(net_params, state['_custom_classifier'], ctx=ctx)
state['_custom_classifier'] = net
return SoundClassifier(state) |
<SYSTEM_TASK:>
Return the classification for each examples in the ``dataset``.
<END_TASK>
<USER_TASK:>
Description:
def classify(self, dataset, verbose=True, batch_size=64):
"""
Return the classification for each examples in the ``dataset``.
The output SFrame contains predicted class labels and its probability.
Parameters
----------
dataset : SFrame | SArray | dict
The audio data to be classified.
If dataset is an SFrame, it must have a column with the same name as
the feature used for model training, but does not require a target
column. Additional columns are ignored.
verbose : bool, optional
If True, prints progress updates and model details.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SFrame
An SFrame with model predictions, both class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data)
""" |
prob_vector = self.predict(dataset, output_type='probability_vector',
verbose=verbose, batch_size=batch_size)
id_to_label = self._id_to_class_label
return _tc.SFrame({
'class': prob_vector.apply(lambda v: id_to_label[_np.argmax(v)]),
'probability': prob_vector.apply(_np.max)
}) |
<SYSTEM_TASK:>
Convert data into canonical form.
<END_TASK>
<USER_TASK:>
Description:
def _init_data(data, allow_empty, default_name):
"""Convert data into canonical form.""" |
assert (data is not None) or allow_empty
if data is None:
data = []
if isinstance(data, (np.ndarray, NDArray)):
data = [data]
if isinstance(data, list):
if not allow_empty:
assert(len(data) > 0)
if len(data) == 1:
data = OrderedDict([(default_name, data[0])])
else:
data = OrderedDict([('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])
if not isinstance(data, dict):
raise TypeError("Input must be NDArray, numpy.ndarray, " + \
"a list of them or dict with them as values")
for k, v in data.items():
if isinstance(v, NDArray):
data[k] = v.asnumpy()
for k, v in data.items():
if not isinstance(v, np.ndarray):
raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \
"should be NDArray or numpy.ndarray")
return list(data.items()) |
<SYSTEM_TASK:>
The name and shape of data provided by this iterator
<END_TASK>
<USER_TASK:>
Description:
def provide_data(self):
"""The name and shape of data provided by this iterator""" |
return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.data] |
<SYSTEM_TASK:>
The name and shape of label provided by this iterator
<END_TASK>
<USER_TASK:>
Description:
def provide_label(self):
"""The name and shape of label provided by this iterator""" |
return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label] |
<SYSTEM_TASK:>
Make generator 'overrider-id' be preferred to
<END_TASK>
<USER_TASK:>
Description:
def override (overrider_id, overridee_id):
"""Make generator 'overrider-id' be preferred to
'overridee-id'. If, when searching for generators
that could produce a target of certain type,
both those generators are amoung viable generators,
the overridden generator is immediately discarded.
The overridden generators are discarded immediately
after computing the list of viable generators, before
running any of them.""" |
assert isinstance(overrider_id, basestring)
assert isinstance(overridee_id, basestring)
__overrides.setdefault(overrider_id, []).append(overridee_id) |
<SYSTEM_TASK:>
Returns a list of source type which can possibly be converted
<END_TASK>
<USER_TASK:>
Description:
def __viable_source_types_real (target_type):
""" Returns a list of source type which can possibly be converted
to 'target_type' by some chain of generator invocation.
More formally, takes all generators for 'target_type' and
returns union of source types for those generators and result
of calling itself recusrively on source types.
""" |
assert isinstance(target_type, basestring)
generators = []
# 't0' is the initial list of target types we need to process to get a list
# of their viable source target types. New target types will not be added to
# this list.
t0 = type.all_bases (target_type)
# 't' is the list of target types which have not yet been processed to get a
# list of their viable source target types. This list will get expanded as
# we locate more target types to process.
t = t0
result = []
while t:
# Find all generators for current type.
# Unlike 'find_viable_generators' we don't care about prop_set.
generators = __type_to_generators.get (t [0], [])
t = t[1:]
for g in generators:
if not g.source_types():
# Empty source types -- everything can be accepted
result = "*"
# This will terminate outer loop.
t = None
break
for source_type in g.source_types ():
if not source_type in result:
# If generator accepts 'source_type' it
# will happily accept any type derived from it
all = type.all_derived (source_type)
for n in all:
if not n in result:
# Here there is no point in adding target types to
# the list of types to process in case they are or
# have already been on that list. We optimize this
# check by realizing that we only need to avoid the
# original target type's base types. Other target
# types that are or have been on the list of target
# types to process have been added to the 'result'
# list as well and have thus already been eliminated
# by the previous if.
if not n in t0:
t.append (n)
result.append (n)
return result |
<SYSTEM_TASK:>
Returns usage requirements + list of created targets.
<END_TASK>
<USER_TASK:>
Description:
def try_one_generator_really (project, name, generator, target_type, properties, sources):
""" Returns usage requirements + list of created targets.
""" |
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(generator, Generator)
assert isinstance(target_type, basestring)
assert isinstance(properties, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
targets = generator.run (project, name, properties, sources)
usage_requirements = []
success = False
dout("returned " + str(targets))
if targets:
success = True;
if isinstance (targets[0], property_set.PropertySet):
usage_requirements = targets [0]
targets = targets [1]
else:
usage_requirements = property_set.empty ()
dout( " generator" + generator.id() + " spawned ")
# generators.dout [ indent ] " " $(targets) ;
# if $(usage-requirements)
# {
# generators.dout [ indent ] " with usage requirements:" $(x) ;
# }
if success:
return (usage_requirements, targets)
else:
return None |
<SYSTEM_TASK:>
Checks if generator invocation can be pruned, because it's guaranteed
<END_TASK>
<USER_TASK:>
Description:
def try_one_generator (project, name, generator, target_type, properties, sources):
""" Checks if generator invocation can be pruned, because it's guaranteed
to fail. If so, quickly returns empty list. Otherwise, calls
try_one_generator_really.
""" |
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(generator, Generator)
assert isinstance(target_type, basestring)
assert isinstance(properties, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
source_types = []
for s in sources:
source_types.append (s.type ())
viable_source_types = viable_source_types_for_generator (generator)
if source_types and viable_source_types != ['*'] and\
not set_.intersection (source_types, viable_source_types):
if project.manager ().logger ().on ():
id = generator.id ()
project.manager ().logger ().log (__name__, "generator '%s' pruned" % id)
project.manager ().logger ().log (__name__, "source_types" '%s' % source_types)
project.manager ().logger ().log (__name__, "viable_source_types '%s'" % viable_source_types)
return []
else:
return try_one_generator_really (project, name, generator, target_type, properties, sources) |
<SYSTEM_TASK:>
Ensures all 'targets' have types. If this is not so, exists with
<END_TASK>
<USER_TASK:>
Description:
def __ensure_type (targets):
""" Ensures all 'targets' have types. If this is not so, exists with
error.
""" |
assert is_iterable_typed(targets, virtual_target.VirtualTarget)
for t in targets:
if not t.type ():
get_manager().errors()("target '%s' has no type" % str (t)) |
<SYSTEM_TASK:>
Attempts to construct target by finding viable generators, running them
<END_TASK>
<USER_TASK:>
Description:
def __construct_really (project, name, target_type, prop_set, sources):
""" Attempts to construct target by finding viable generators, running them
and selecting the dependency graph.
""" |
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert isinstance(name, basestring) or name is None
assert isinstance(target_type, basestring)
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
viable_generators = find_viable_generators (target_type, prop_set)
result = []
dout(" *** %d viable generators" % len (viable_generators))
generators_that_succeeded = []
for g in viable_generators:
__active_generators.append(g)
r = try_one_generator (project, name, g, target_type, prop_set, sources)
del __active_generators[-1]
if r:
generators_that_succeeded.append(g)
if result:
output = cStringIO.StringIO()
print >>output, "ambiguity found when searching for best transformation"
print >>output, "Trying to produce type '%s' from: " % (target_type)
for s in sources:
print >>output, " - " + s.str()
print >>output, "Generators that succeeded:"
for g in generators_that_succeeded:
print >>output, " - " + g.id()
print >>output, "First generator produced: "
for t in result[1:]:
print >>output, " - " + str(t)
print >>output, "Second generator produced:"
for t in r[1:]:
print >>output, " - " + str(t)
get_manager().errors()(output.getvalue())
else:
result = r;
return result; |
<SYSTEM_TASK:>
Returns true if the generator can be run with the specified
<END_TASK>
<USER_TASK:>
Description:
def match_rank (self, ps):
""" Returns true if the generator can be run with the specified
properties.
""" |
# See if generator's requirements are satisfied by
# 'properties'. Treat a feature name in requirements
# (i.e. grist-only element), as matching any value of the
# feature.
assert isinstance(ps, property_set.PropertySet)
all_requirements = self.requirements ()
property_requirements = []
feature_requirements = []
# This uses strings because genenator requirements allow
# the '<feature>' syntax without value and regular validation
# is not happy about that.
for r in all_requirements:
if get_value (r):
property_requirements.append (r)
else:
feature_requirements.append (r)
return all(ps.get(get_grist(s)) == [get_value(s)] for s in property_requirements) \
and all(ps.get(get_grist(s)) for s in feature_requirements) |
<SYSTEM_TASK:>
Determine the name of the produced target from the
<END_TASK>
<USER_TASK:>
Description:
def determine_output_name(self, sources):
"""Determine the name of the produced target from the
names of the sources.""" |
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
# The simple case if when a name
# of source has single dot. Then, we take the part before
# dot. Several dots can be caused by:
# - Using source file like a.host.cpp
# - A type which suffix has a dot. Say, we can
# type 'host_cpp' with extension 'host.cpp'.
# In the first case, we want to take the part till the last
# dot. In the second case -- no sure, but for now take
# the part till the last dot too.
name = os.path.splitext(sources[0].name())[0]
for s in sources[1:]:
n2 = os.path.splitext(s.name())
if n2 != name:
get_manager().errors()(
"%s: source targets have different names: cannot determine target name"
% (self.id_))
# Names of sources might include directory. We should strip it.
return self.determine_target_name(sources[0].name()) |
<SYSTEM_TASK:>
Constructs targets that are created after consuming 'sources'.
<END_TASK>
<USER_TASK:>
Description:
def generated_targets (self, sources, prop_set, project, name):
""" Constructs targets that are created after consuming 'sources'.
The result will be the list of virtual-target, which the same length
as 'target_types' attribute and with corresponding types.
When 'name' is empty, all source targets must have the same value of
the 'name' attribute, which will be used instead of the 'name' argument.
The value of 'name' attribute for each generated target will be equal to
the 'name' parameter if there's no name pattern for this type. Otherwise,
the '%' symbol in the name pattern will be replaced with the 'name' parameter
to obtain the 'name' attribute.
For example, if targets types are T1 and T2(with name pattern "%_x"), suffixes
for T1 and T2 are .t1 and t2, and source if foo.z, then created files would
be "foo.t1" and "foo_x.t2". The 'name' attribute actually determined the
basename of a file.
Note that this pattern mechanism has nothing to do with implicit patterns
in make. It's a way to produce target which name is different for name of
source.
""" |
if __debug__:
from .targets import ProjectTarget
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert isinstance(project, ProjectTarget)
assert isinstance(name, basestring) or name is None
if not name:
name = self.determine_output_name(sources)
# Assign an action for each target
action = self.action_class()
a = action(project.manager(), sources, self.id_, prop_set)
# Create generated target for each target type.
targets = []
pre = self.name_prefix_
post = self.name_postfix_
for t in self.target_types_:
basename = os.path.basename(name)
generated_name = pre[0] + basename + post[0]
generated_name = os.path.join(os.path.dirname(name), generated_name)
pre = pre[1:]
post = post[1:]
targets.append(virtual_target.FileTarget(generated_name, t, project, a))
return [ project.manager().virtual_targets().register(t) for t in targets ] |
<SYSTEM_TASK:>
Converts several files to consumable types.
<END_TASK>
<USER_TASK:>
Description:
def convert_multiple_sources_to_consumable_types (self, project, prop_set, sources):
""" Converts several files to consumable types.
""" |
if __debug__:
from .targets import ProjectTarget
assert isinstance(project, ProjectTarget)
assert isinstance(prop_set, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
if not self.source_types_:
return list(sources)
acceptable_types = set()
for t in self.source_types_:
acceptable_types.update(type.all_derived(t))
result = []
for source in sources:
if source.type() not in acceptable_types:
transformed = construct_types(
project, None,self.source_types_, prop_set, [source])
# construct_types returns [prop_set, [targets]]
for t in transformed[1]:
if t.type() in self.source_types_:
result.append(t)
if not transformed:
project.manager().logger().log(__name__, " failed to convert ", source)
else:
result.append(source)
result = sequence.unique(result, stable=True)
return result |
<SYSTEM_TASK:>
Returns the sketch summary for the given set of keys. This is only
<END_TASK>
<USER_TASK:>
Description:
def element_sub_sketch(self, keys = None):
"""
Returns the sketch summary for the given set of keys. This is only
applicable for sketch summary created from SArray of sarray or dict type.
For dict SArray, the keys are the keys in dict value.
For array Sarray, the keys are indexes into the array value.
The keys must be passed into original summary() call in order to
be able to be retrieved later
Parameters
-----------
keys : list of str | str | list of int | int
The list of dictionary keys or array index to get sub sketch from.
if not given, then retrieve all sub sketches that are available
Returns
-------
A dictionary that maps from the key(index) to the actual sketch summary
for that key(index)
Examples
--------
>>> sa = turicreate.SArray([{'a':1, 'b':2}, {'a':4, 'd':1}])
>>> s = sa.summary(sub_sketch_keys=['a','b'])
>>> s.element_sub_sketch(['a'])
{'a':
+--------------------+-------+----------+
| item | value | is exact |
+--------------------+-------+----------+
| Length | 2 | Yes |
| Min | 1.0 | Yes |
| Max | 4.0 | Yes |
| Mean | 2.5 | Yes |
| Sum | 5.0 | Yes |
| Variance | 2.25 | Yes |
| Standard Deviation | 1.5 | Yes |
| # Missing Values | 0 | Yes |
| # unique values | 2 | No |
+--------------------+-------+----------+
Most frequent items:
+-------+-----+-----+
| value | 1.0 | 4.0 |
+-------+-----+-----+
| count | 1 | 1 |
+-------+-----+-----+
Quantiles:
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 0% | 1% | 5% | 25% | 50% | 75% | 95% | 99% | 100% |
+-----+-----+-----+-----+-----+-----+-----+-----+------+
| 1.0 | 1.0 | 1.0 | 1.0 | 4.0 | 4.0 | 4.0 | 4.0 | 4.0 |
+-----+-----+-----+-----+-----+-----+-----+-----+------+}
""" |
single_val = False
if keys is None:
keys = []
else:
if not isinstance(keys, list):
single_val = True
keys = [keys]
value_types = set([type(i) for i in keys])
if (len(value_types) > 1):
raise ValueError("All keys should have the same type.")
with cython_context():
ret_sketches = self.__proxy__.element_sub_sketch(keys)
ret = {}
# check return key matches input key
for key in keys:
if key not in ret_sketches:
raise KeyError("Cannot retrieve element sub sketch for key '" + str(key) + "'. Element sub sketch can only be retrieved when the summary object was created using the 'sub_sketch_keys' option.")
for key in ret_sketches:
ret[key] = Sketch(_proxy = ret_sketches[key])
if single_val:
return ret[keys[0]]
else:
return ret |
<SYSTEM_TASK:>
Fetches all needed information from the top-level DBAPI module,
<END_TASK>
<USER_TASK:>
Description:
def _get_global_dbapi_info(dbapi_module, conn):
"""
Fetches all needed information from the top-level DBAPI module,
guessing at the module if it wasn't passed as a parameter. Returns a
dictionary of all the needed variables. This is put in one place to
make sure the error message is clear if the module "guess" is wrong.
""" |
module_given_msg = "The DBAPI2 module given ({0}) is missing the global\n"+\
"variable '{1}'. Please make sure you are supplying a module that\n"+\
"conforms to the DBAPI 2.0 standard (PEP 0249)."
module_not_given_msg = "Hello! I gave my best effort to find the\n"+\
"top-level module that the connection object you gave me came from.\n"+\
"I found '{0}' which doesn't have the global variable '{1}'.\n"+\
"To avoid this confusion, you can pass the module as a parameter using\n"+\
"the 'dbapi_module' argument to either from_sql or to_sql."
if dbapi_module is None:
dbapi_module = _get_module_from_object(conn)
module_given = False
else:
module_given = True
module_name = dbapi_module.__name__ if hasattr(dbapi_module, '__name__') else None
needed_vars = ['apilevel','paramstyle','Error','DATETIME','NUMBER','ROWID']
ret_dict = {}
ret_dict['module_name'] = module_name
for i in needed_vars:
tmp = None
try:
tmp = eval("dbapi_module."+i)
except AttributeError as e:
# Some DBs don't actually care about types, so they won't define
# the types. These are the ACTUALLY needed variables though
if i not in ['apilevel','paramstyle','Error']:
pass
elif module_given:
raise AttributeError(module_given_msg.format(module_name, i))
else:
raise AttributeError(module_not_given_msg.format(module_name, i))
ret_dict[i] = tmp
try:
if ret_dict['apilevel'][0:3] != "2.0":
raise NotImplementedError("Unsupported API version " +\
str(ret_dict['apilevel']) + ". Only DBAPI 2.0 is supported.")
except TypeError as e:
e.message = "Module's 'apilevel' value is invalid."
raise e
acceptable_paramstyles = ['qmark','numeric','named','format','pyformat']
try:
if ret_dict['paramstyle'] not in acceptable_paramstyles:
raise TypeError("Module's 'paramstyle' value is invalid.")
except TypeError as e:
raise TypeError("Module's 'paramstyle' value is invalid.")
return ret_dict |
<SYSTEM_TASK:>
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
<END_TASK>
<USER_TASK:>
Description:
def read_csv_with_errors(cls,
url,
delimiter=',',
header=True,
comment_char='',
escape_char='\\',
double_quote=True,
quote_char='\"',
skip_initial_space=True,
column_type_hints=None,
na_values=["NA"],
line_terminator='\n',
usecols = [],
nrows=None,
skiprows=0,
verbose=True,
nrows_to_infer=100,
true_values=[],
false_values=[],
_only_raw_string_substitutions=False,
**kwargs):
"""
Constructs an SFrame from a CSV file or a path to multiple CSVs, and
returns a pair containing the SFrame and a dict of filenames to SArrays
indicating for each file, what are the incorrectly parsed lines
encountered.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
delimiter : string, optional
This describes the delimiter used for parsing csv files.
header : bool, optional
If true, uses the first row as the column names. Otherwise use the
default column names: 'X1, X2, ...'.
comment_char : string, optional
The character which denotes that the
remainder of the line is a comment.
escape_char : string, optional
Character which begins a C escape sequence. Defaults to backslash(\\)
Set to None to disable.
double_quote : bool, optional
If True, two consecutive quotes in a string are parsed to a single
quote.
quote_char : string, optional
Character sequence that indicates a quote.
skip_initial_space : bool, optional
Ignore extra spaces at the start of a field
column_type_hints : None, type, list[type], dict[string, type], optional
This provides type hints for each column. By default, this method
attempts to detect the type of each column automatically.
Supported types are int, float, str, list, dict, and array.array.
* If a single type is provided, the type will be
applied to all columns. For instance, column_type_hints=float
will force all columns to be parsed as float.
* If a list of types is provided, the types applies
to each column in order, e.g.[int, float, str]
will parse the first column as int, second as float and third as
string.
* If a dictionary of column name to type is provided,
each type value in the dictionary is applied to the key it
belongs to.
For instance {'user':int} will hint that the column called "user"
should be parsed as an integer, and the rest will be type inferred.
na_values : str | list of str, optional
A string or list of strings to be interpreted as missing values.
true_values : str | list of str, optional
A string or list of strings to be interpreted as 1
false_values : str | list of str, optional
A string or list of strings to be interpreted as 0
line_terminator : str, optional
A string to be interpreted as the line terminator. Defaults to "\\n"
which will also correctly match Mac, Linux and Windows line endings
("\\r", "\\n" and "\\r\\n" respectively)
usecols : list of str, optional
A subset of column names to output. If unspecified (default),
all columns will be read. This can provide performance gains if the
number of columns are large. If the input file has no headers,
usecols=['X1','X3'] will read columns 1 and 3.
nrows : int, optional
If set, only this many rows will be read from the file.
skiprows : int, optional
If set, this number of rows at the start of the file are skipped.
verbose : bool, optional
If True, print the progress.
Returns
-------
out : tuple
The first element is the SFrame with good data. The second element
is a dictionary of filenames to SArrays indicating for each file,
what are the incorrectly parsed lines encountered.
See Also
--------
read_csv, SFrame
Examples
--------
>>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv'
>>> (sf, bad_lines) = turicreate.SFrame.read_csv_with_errors(bad_url)
>>> sf
+---------+----------+--------+
| user_id | movie_id | rating |
+---------+----------+--------+
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| ... | ... | ... |
+---------+----------+--------+
[98 rows x 3 columns]
>>> bad_lines
{'https://static.turi.com/datasets/bad_csv_example.csv': dtype: str
Rows: 1
['x,y,z,a,b,c']}
""" |
return cls._read_csv_impl(url,
delimiter=delimiter,
header=header,
error_bad_lines=False, # we are storing errors,
# thus we must not fail
# on bad lines
comment_char=comment_char,
escape_char=escape_char,
double_quote=double_quote,
quote_char=quote_char,
skip_initial_space=skip_initial_space,
column_type_hints=column_type_hints,
na_values=na_values,
line_terminator=line_terminator,
usecols=usecols,
nrows=nrows,
verbose=verbose,
skiprows=skiprows,
store_errors=True,
nrows_to_infer=nrows_to_infer,
true_values=true_values,
false_values=false_values,
_only_raw_string_substitutions=_only_raw_string_substitutions,
**kwargs) |
<SYSTEM_TASK:>
Reads a JSON file representing a table into an SFrame.
<END_TASK>
<USER_TASK:>
Description:
def read_json(cls,
url,
orient='records'):
"""
Reads a JSON file representing a table into an SFrame.
Parameters
----------
url : string
Location of the CSV file or directory to load. If URL is a directory
or a "glob" pattern, all matching files will be loaded.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is expected to contain a single JSON
array, where each array element is a dictionary. If orient="lines",
the file is expected to contain a JSON element per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the JSON file is expected to contain a single
JSON Array where each array element is a dictionary describing the row.
For instance:
>>> !cat input.json
[{'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}]
>>> SFrame.read_json('input.json', orient='records')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If orient="lines", the JSON file is expected to contain a JSON element
per line. If each line contains a dictionary, it is automatically
unpacked.
>>> !cat input.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
If the lines are not dictionaries, the original format is maintained.
>>> !cat input.json
['a','b','c']
['d','e','f']
['g','h','i']
[1,2,3]
>>> g = SFrame.read_json('input.json', orient='lines')
Columns:
X1 list
Rows: 3
Data:
+-----------+
| X1 |
+-----------+
| [a, b, c] |
| [d, e, f] |
| [g, h, i] |
+-----------+
[3 rows x 1 columns]
""" |
if orient == "records":
g = SArray.read_json(url)
if len(g) == 0:
return SFrame()
g = SFrame({'X1':g})
return g.unpack('X1','')
elif orient == "lines":
g = cls.read_csv(url, header=False,na_values=['null'],true_values=['true'],false_values=['false'],
_only_raw_string_substitutions=True)
if g.num_rows() == 0:
return SFrame()
if g.num_columns() != 1:
raise RuntimeError("Input JSON not of expected format")
if g['X1'].dtype == dict:
return g.unpack('X1','')
else:
return g
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")") |
<SYSTEM_TASK:>
Convert an SFrame to a single table in a SQL database.
<END_TASK>
<USER_TASK:>
Description:
def to_sql(self, conn, table_name, dbapi_module=None,
use_python_type_specifiers=False, use_exact_column_names=True):
"""
Convert an SFrame to a single table in a SQL database.
This function does not attempt to create the table or check if a table
named `table_name` exists in the database. It simply assumes that
`table_name` exists in the database and appends to it.
`to_sql` can be thought of as a convenience wrapper around
parameterized SQL insert statements.
Parameters
----------
conn : dbapi2.Connection
A DBAPI2 connection object. Any connection object originating from
the 'connect' method of a DBAPI2-compliant package can be used.
table_name : str
The name of the table to append the data in this SFrame.
dbapi_module : module | package, optional
The top-level DBAPI2 module/package that constructed the given
connection object. By default, a best guess of which module the
connection came from is made. In the event that this guess is wrong,
this will need to be specified.
use_python_type_specifiers : bool, optional
If the DBAPI2 module's parameter marker style is 'format' or
'pyformat', attempt to use accurate type specifiers for each value
('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply
use 's' for all types if they use these parameter markers, so this is
False by default.
use_exact_column_names : bool, optional
Specify the column names of the SFrame when inserting its contents
into the DB. If the specified table does not have the exact same
column names as the SFrame, inserting the data will fail. If False,
the columns in the SFrame are inserted in order without care of the
schema of the DB table. True by default.
""" |
mod_info = _get_global_dbapi_info(dbapi_module, conn)
c = conn.cursor()
col_info = list(zip(self.column_names(), self.column_types()))
if not use_python_type_specifiers:
_pytype_to_printf = lambda x: 's'
# DBAPI2 standard allows for five different ways to specify parameters
sql_param = {
'qmark' : lambda name,col_num,col_type: '?',
'numeric' : lambda name,col_num,col_type:':'+str(col_num+1),
'named' : lambda name,col_num,col_type:':'+str(name),
'format' : lambda name,col_num,col_type:'%'+_pytype_to_printf(col_type),
'pyformat': lambda name,col_num,col_type:'%('+str(name)+')'+_pytype_to_printf(col_type),
}
get_sql_param = sql_param[mod_info['paramstyle']]
# form insert string
ins_str = "INSERT INTO " + str(table_name)
value_str = " VALUES ("
col_str = " ("
count = 0
for i in col_info:
col_str += i[0]
value_str += get_sql_param(i[0],count,i[1])
if count < len(col_info)-1:
col_str += ","
value_str += ","
count += 1
col_str += ")"
value_str += ")"
if use_exact_column_names:
ins_str += col_str
ins_str += value_str
# Some formats require values in an iterable, some a dictionary
if (mod_info['paramstyle'] == 'named' or\
mod_info['paramstyle'] == 'pyformat'):
prepare_sf_row = lambda x:x
else:
col_names = self.column_names()
prepare_sf_row = lambda x: [x[i] for i in col_names]
for i in self:
try:
c.execute(ins_str, prepare_sf_row(i))
except mod_info['Error'] as e:
if hasattr(conn, 'rollback'):
conn.rollback()
raise e
conn.commit()
c.close() |
<SYSTEM_TASK:>
Print the first M rows and N columns of the SFrame in human readable
<END_TASK>
<USER_TASK:>
Description:
def print_rows(self, num_rows=10, num_columns=40, max_column_width=30,
max_row_width=80, output_file=None):
"""
Print the first M rows and N columns of the SFrame in human readable
format.
Parameters
----------
num_rows : int, optional
Number of rows to print.
num_columns : int, optional
Number of columns to print.
max_column_width : int, optional
Maximum width of a column. Columns use fewer characters if possible.
max_row_width : int, optional
Maximum width of a printed row. Columns beyond this width wrap to a
new line. `max_row_width` is automatically reset to be the
larger of itself and `max_column_width`.
output_file: file, optional
The stream or file that receives the output. By default the output
goes to sys.stdout, but it can also be redirected to a file or a
string (using an object of type StringIO).
See Also
--------
head, tail
""" |
if output_file is None:
output_file = sys.stdout
max_row_width = max(max_row_width, max_column_width + 1)
printed_sf = self._imagecols_to_stringcols(num_rows)
row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False,
max_rows_to_display=num_rows,
max_columns=num_columns,
max_column_width=max_column_width,
max_row_width=max_row_width)
footer = "[%d rows x %d columns]\n" % self.shape
print('\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer, file=output_file) |
<SYSTEM_TASK:>
Where other is an SArray of identical length as the current Frame,
<END_TASK>
<USER_TASK:>
Description:
def _row_selector(self, other):
"""
Where other is an SArray of identical length as the current Frame,
this returns a selection of a subset of rows in the current SFrame
where the corresponding row in the selector is non-zero.
""" |
if type(other) is SArray:
if self.__has_size__() and other.__has_size__() and len(other) != len(self):
raise IndexError("Cannot perform logical indexing on arrays of different length.")
with cython_context():
return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__)) |
<SYSTEM_TASK:>
Convert this SFrame to pandas.DataFrame.
<END_TASK>
<USER_TASK:>
Description:
def to_dataframe(self):
"""
Convert this SFrame to pandas.DataFrame.
This operation will construct a pandas.DataFrame in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : pandas.DataFrame
The dataframe which contains all rows of SFrame
""" |
assert HAS_PANDAS, 'pandas is not installed.'
df = pandas.DataFrame()
for i in range(self.num_columns()):
column_name = self.column_names()[i]
df[column_name] = list(self[column_name])
if len(df[column_name]) == 0:
df[column_name] = df[column_name].astype(self.column_types()[i])
return df |
<SYSTEM_TASK:>
Converts this SFrame to a numpy array
<END_TASK>
<USER_TASK:>
Description:
def to_numpy(self):
"""
Converts this SFrame to a numpy array
This operation will construct a numpy array in memory. Care must
be taken when size of the returned object is big.
Returns
-------
out : numpy.ndarray
A Numpy Array containing all the values of the SFrame
""" |
assert HAS_NUMPY, 'numpy is not installed.'
import numpy
return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()])) |
<SYSTEM_TASK:>
Map each row of the SFrame to multiple rows in a new SFrame via a
<END_TASK>
<USER_TASK:>
Description:
def flat_map(self, column_names, fn, column_types='auto', seed=None):
"""
Map each row of the SFrame to multiple rows in a new SFrame via a
function.
The output of `fn` must have type List[List[...]]. Each inner list
will be a single row in the new output, and the collection of these
rows within the outer list make up the data for the output SFrame.
All rows must have the same length and the same order of types to
make sure the result columns are homogeneously typed. For example, if
the first element emitted into in the outer list by `fn` is
[43, 2.3, 'string'], then all other elements emitted into the outer
list must be a list with three elements, where the first is an int,
second is a float, and third is a string. If column_types is not
specified, the first 10 rows of the SFrame are used to determine the
column types of the returned sframe.
Parameters
----------
column_names : list[str]
The column names for the returned SFrame.
fn : function
The function that maps each of the sframe row into multiple rows,
returning List[List[...]]. All outputted rows must have the same
length and order of types.
column_types : list[type], optional
The column types of the output SFrame. Default value will be
automatically inferred by running `fn` on the first 10 rows of the
input. If the types cannot be inferred from the first 10 rows, an
error is raised.
seed : int, optional
Used as the seed if a random number generator is included in `fn`.
Returns
-------
out : SFrame
A new SFrame containing the results of the flat_map of the
original SFrame.
Examples
---------
Repeat each row according to the value in the 'number' column.
>>> sf = turicreate.SFrame({'letter': ['a', 'b', 'c'],
... 'number': [1, 2, 3]})
>>> sf.flat_map(['number', 'letter'],
... lambda x: [list(x.itervalues()) for i in range(0, x['number'])])
+--------+--------+
| number | letter |
+--------+--------+
| 1 | a |
| 2 | b |
| 2 | b |
| 3 | c |
| 3 | c |
| 3 | c |
+--------+--------+
[6 rows x 2 columns]
""" |
assert callable(fn), "Input must be callable"
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# determine the column_types
if column_types == 'auto':
types = set()
sample = self[0:10]
results = [fn(row) for row in sample]
for rows in results:
if type(rows) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
# note: this skips empty lists
for row in rows:
if type(row) is not list:
raise TypeError("Output type of the lambda function must be a list of lists")
types.add(tuple([type(v) for v in row]))
if len(types) == 0:
raise TypeError(
"Could not infer output column types from the first ten rows " +\
"of the SFrame. Please use the 'column_types' parameter to " +\
"set the types.")
if len(types) > 1:
raise TypeError("Mapped rows must have the same length and types")
column_types = list(types.pop())
assert type(column_types) is list, "'column_types' must be a list."
assert len(column_types) == len(column_names), "Number of output columns must match the size of column names"
with cython_context():
return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed)) |
<SYSTEM_TASK:>
Sample a fraction of the current SFrame's rows.
<END_TASK>
<USER_TASK:>
Description:
def sample(self, fraction, seed=None, exact=False):
"""
Sample a fraction of the current SFrame's rows.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
Seed for the random number generator used to sample.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SFrame
A new SFrame containing sampled rows of the current SFrame.
Examples
--------
Suppose we have an SFrame with 6,145 rows.
>>> import random
>>> sf = SFrame({'id': range(0, 6145)})
Retrieve about 30% of the SFrame rows with repeatable results by
setting the random seed.
>>> len(sf.sample(.3, seed=5))
1783
""" |
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (self.num_rows() == 0 or self.num_columns() == 0):
return self
else:
with cython_context():
return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact)) |
<SYSTEM_TASK:>
Save the SFrame to a file system for later use.
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename, format=None):
"""
Save the SFrame to a file system for later use.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL. If the format is 'binary', a directory will be created
at the location which will contain the sframe.
format : {'binary', 'csv', 'json'}, optional
Format in which to save the SFrame. Binary saved SFrames can be
loaded much faster and without any format conversion losses. If not
given, will try to infer the format from filename given. If file
name ends with 'csv' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
See export_csv for more csv saving options.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save('data/training_data_sframe')
>>> # Save the sframe into csv format
>>> sf.save('data/training_data.csv', format='csv')
""" |
if format is None:
if filename.endswith(('.csv', '.csv.gz')):
format = 'csv'
elif filename.endswith(('.json')):
format = 'json'
else:
format = 'binary'
else:
if format is 'csv':
if not filename.endswith(('.csv', '.csv.gz')):
filename = filename + '.csv'
elif format is not 'binary' and format is not 'json':
raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary' and 'json'".format(format))
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
if format is 'binary':
self.__proxy__.save(url)
elif format is 'csv':
assert filename.endswith(('.csv', '.csv.gz'))
self.__proxy__.save_as_csv(url, {})
elif format is 'json':
self.export_json(url)
else:
raise ValueError("Unsupported format: {}".format(format)) |
<SYSTEM_TASK:>
Writes an SFrame to a CSV file.
<END_TASK>
<USER_TASK:>
Description:
def export_csv(self, filename, delimiter=',', line_terminator='\n',
header=True, quote_level=csv.QUOTE_NONNUMERIC, double_quote=True,
escape_char='\\', quote_char='\"', na_rep='',
file_header='', file_footer='', line_prefix='',
_no_prefix_on_first_value=False, **kwargs):
"""
Writes an SFrame to a CSV file.
Parameters
----------
filename : string
The location to save the CSV.
delimiter : string, optional
This describes the delimiter used for writing csv files.
line_terminator: string, optional
The newline character
header : bool, optional
If true, the column names are emitted as a header.
quote_level: csv.QUOTE_ALL | csv.QUOTE_NONE | csv.QUOTE_NONNUMERIC, optional
The quoting level. If csv.QUOTE_ALL, every field is quoted.
if csv.quote_NONE, no field is quoted. If csv.QUOTE_NONNUMERIC, only
non-numeric fileds are quoted. csv.QUOTE_MINIMAL is interpreted as
csv.QUOTE_NONNUMERIC.
double_quote : bool, optional
If True, quotes are escaped as two consecutive quotes
escape_char : string, optional
Character which begins a C escape sequence
quote_char: string, optional
Character used to quote fields
na_rep: string, optional
The value used to denote a missing value.
file_header: string, optional
A string printed to the start of the file
file_footer: string, optional
A string printed to the end of the file
line_prefix: string, optional
A string printed at the start of each value line
""" |
# Pandas argument compatibility
if "sep" in kwargs:
delimiter = kwargs['sep']
del kwargs['sep']
if "quotechar" in kwargs:
quote_char = kwargs['quotechar']
del kwargs['quotechar']
if "doublequote" in kwargs:
double_quote = kwargs['doublequote']
del kwargs['doublequote']
if "lineterminator" in kwargs:
line_terminator = kwargs['lineterminator']
del kwargs['lineterminator']
if len(kwargs) > 0:
raise TypeError("Unexpected keyword arguments " + str(list(kwargs.keys())))
write_csv_options = {}
write_csv_options['delimiter'] = delimiter
write_csv_options['escape_char'] = escape_char
write_csv_options['double_quote'] = double_quote
write_csv_options['quote_char'] = quote_char
if quote_level == csv.QUOTE_MINIMAL:
write_csv_options['quote_level'] = 0
elif quote_level == csv.QUOTE_ALL:
write_csv_options['quote_level'] = 1
elif quote_level == csv.QUOTE_NONNUMERIC:
write_csv_options['quote_level'] = 2
elif quote_level == csv.QUOTE_NONE:
write_csv_options['quote_level'] = 3
write_csv_options['header'] = header
write_csv_options['line_terminator'] = line_terminator
write_csv_options['na_value'] = na_rep
write_csv_options['file_header'] = file_header
write_csv_options['file_footer'] = file_footer
write_csv_options['line_prefix'] = line_prefix
# undocumented option. Disables line prefix on the first value line
write_csv_options['_no_prefix_on_first_value'] = _no_prefix_on_first_value
url = _make_internal_url(filename)
self.__proxy__.save_as_csv(url, write_csv_options) |
<SYSTEM_TASK:>
Writes an SFrame to a JSON file.
<END_TASK>
<USER_TASK:>
Description:
def export_json(self,
filename,
orient='records'):
"""
Writes an SFrame to a JSON file.
Parameters
----------
filename : string
The location to save the JSON file.
orient : string, optional. Either "records" or "lines"
If orient="records" the file is saved as a single JSON array.
If orient="lines", the file is saves as a JSON value per line.
Examples
--------
The orient parameter describes the expected input format of the JSON
file.
If orient="records", the output will be a single JSON Array where
each array element is a dictionary describing the row.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='records')
>>> !cat output.json
[
{'a':1,'b':1},
{'a':2,'b':2},
{'a':3,'b':3},
]
If orient="rows", each row will be emitted as a JSON dictionary to
each file line.
>>> g
Columns:
a int
b int
Rows: 3
Data:
+---+---+
| a | b |
+---+---+
| 1 | 1 |
| 2 | 2 |
| 3 | 3 |
+---+---+
>>> g.export('output.json', orient='rows')
>>> !cat output.json
{'a':1,'b':1}
{'a':2,'b':2}
{'a':3,'b':3}
""" |
if orient == "records":
self.pack_columns(dtype=dict).export_csv(
filename, file_header='[', file_footer=']',
header=False, double_quote=False,
quote_level=csv.QUOTE_NONE,
line_prefix=',',
_no_prefix_on_first_value=True)
elif orient == "lines":
self.pack_columns(dtype=dict).export_csv(
filename, header=False, double_quote=False, quote_level=csv.QUOTE_NONE)
else:
raise ValueError("Invalid value for orient parameter (" + str(orient) + ")") |
<SYSTEM_TASK:>
Performs an incomplete save of an existing SFrame into a directory.
<END_TASK>
<USER_TASK:>
Description:
def _save_reference(self, filename):
"""
Performs an incomplete save of an existing SFrame into a directory.
This saved SFrame may reference SFrames in other locations in the same
filesystem for certain resources.
Parameters
----------
filename : string
The location to save the SFrame. Either a local directory or a
remote URL.
See Also
--------
load_sframe, SFrame
Examples
--------
>>> # Save the sframe into binary format
>>> sf.save_reference('data/training_data_sframe')
""" |
## Save the SFrame
url = _make_internal_url(filename)
with cython_context():
self.__proxy__.save_reference(url) |
<SYSTEM_TASK:>
Returns an SFrame with a new column. The number of elements in the data
<END_TASK>
<USER_TASK:>
Description:
def add_column(self, data, column_name="", inplace=False):
"""
Returns an SFrame with a new column. The number of elements in the data
given must match the length of every other column of the SFrame.
If no name is given, a default name is chosen.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data to add.
column_name : string, optional
The name of the column. If no name is given, a default name is
chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_columns
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sa = turicreate.SArray(['cat', 'dog', 'fossa'])
>>> # This line is equivalent to `sf['species'] = sa`
>>> res = sf.add_column(sa, 'species')
>>> res
+----+-----+---------+
| id | val | species |
+----+-----+---------+
| 1 | A | cat |
| 2 | B | dog |
| 3 | C | fossa |
+----+-----+---------+
[3 rows x 3 columns]
""" |
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
if isinstance(data, _Iterable):
data = SArray(data)
else:
if self.num_columns() == 0:
data = SArray([data])
else:
data = SArray.from_const(data, self.num_rows())
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_column(data.__proxy__, column_name)
ret._cache = None
return ret |
<SYSTEM_TASK:>
Returns an SFrame with multiple columns added. The number of
<END_TASK>
<USER_TASK:>
Description:
def add_columns(self, data, column_names=None, inplace=False):
"""
Returns an SFrame with multiple columns added. The number of
elements in all columns must match the length of every other column of
the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
add_column
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf2 = turicreate.SFrame({'species': ['cat', 'dog', 'fossa'],
... 'age': [3, 5, 9]})
>>> res = sf.add_columns(sf2)
>>> res
+----+-----+-----+---------+
| id | val | age | species |
+----+-----+-----+---------+
| 1 | A | 3 | cat |
| 2 | B | 5 | dog |
| 3 | C | 9 | fossa |
+----+-----+-----+---------+
[3 rows x 4 columns]
""" |
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.add_columns([x.__proxy__ for x in datalist], column_names)
ret._cache = None
return ret |
<SYSTEM_TASK:>
Returns an SFrame with a column removed.
<END_TASK>
<USER_TASK:>
Description:
def remove_column(self, column_name, inplace=False):
"""
Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
""" |
column_name = str(column_name)
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
colid = self.column_names().index(column_name)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret |
<SYSTEM_TASK:>
Returns an SFrame with one or more columns removed.
<END_TASK>
<USER_TASK:>
Description:
def remove_columns(self, column_names, inplace=False):
"""
Returns an SFrame with one or more columns removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_names : list or iterable
A list or iterable of column names.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given columns removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]})
>>> res = sf.remove_columns(['val1', 'val2'])
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
""" |
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError('Cannot find column %s' % name)
# Delete it going backwards so we don't invalidate indices
deletion_indices = sorted(existing_columns[name] for name in column_names)
if inplace:
ret = self
else:
ret = self.copy()
for colid in reversed(deletion_indices):
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret |
<SYSTEM_TASK:>
Returns an SFrame with two column positions swapped.
<END_TASK>
<USER_TASK:>
Description:
def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Returns an SFrame with two column positions swapped.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with swapped columns.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> res = sf.swap_columns('id', 'val')
>>> res
+-----+-----+
| val | id |
+-----+-----+
| A | 1 |
| B | 2 |
| C | 3 |
+----+-----+
[3 rows x 2 columns]
""" |
colnames = self.column_names()
colid_1 = colnames.index(column_name_1)
colid_2 = colnames.index(column_name_2)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.swap_columns(colid_1, colid_2)
ret._cache = None
return ret |
<SYSTEM_TASK:>
Returns an SFrame with columns renamed. ``names`` is expected to be a
<END_TASK>
<USER_TASK:>
Description:
def rename(self, names, inplace=False):
"""
Returns an SFrame with columns renamed. ``names`` is expected to be a
dict specifying the old and new names. This changes the names of the
columns given as the keys and replaces them with the names given as the
values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict [string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The current SFrame.
See Also
--------
column_names
Examples
--------
>>> sf = SFrame({'X1': ['Alice','Bob'],
... 'X2': ['123 Fake Street','456 Fake Street']})
>>> res = sf.rename({'X1': 'name', 'X2':'address'})
>>> res
+-------+-----------------+
| name | address |
+-------+-----------------+
| Alice | 123 Fake Street |
| Bob | 456 Fake Street |
+-------+-----------------+
[2 rows x 2 columns]
""" |
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
all_columns = set(self.column_names())
for k in names:
if not k in all_columns:
raise ValueError('Cannot find column %s in the SFrame' % k)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
for k in names:
colid = ret.column_names().index(k)
ret.__proxy__.set_column_name(colid, names[k])
ret._cache = None
return ret |
<SYSTEM_TASK:>
Add the rows of an SFrame to the end of this SFrame.
<END_TASK>
<USER_TASK:>
Description:
def append(self, other):
"""
Add the rows of an SFrame to the end of this SFrame.
Both SFrames must have the same set of columns with the same column
names and column types.
Parameters
----------
other : SFrame
Another SFrame whose rows are appended to the current SFrame.
Returns
-------
out : SFrame
The result SFrame from the append operation.
Examples
--------
>>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']})
>>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> sf = sf.append(sf2)
>>> sf
+----+-----+
| id | val |
+----+-----+
| 4 | D |
| 6 | F |
| 8 | H |
| 1 | A |
| 2 | B |
| 3 | C |
+----+-----+
[6 rows x 2 columns]
""" |
if type(other) is not SFrame:
raise RuntimeError("SFrame append can only work with SFrame")
left_empty = len(self.column_names()) == 0
right_empty = len(other.column_names()) == 0
if (left_empty and right_empty):
return SFrame()
if (left_empty or right_empty):
non_empty_sframe = self if right_empty else other
return non_empty_sframe.__copy__()
my_column_names = self.column_names()
my_column_types = self.column_types()
other_column_names = other.column_names()
if (len(my_column_names) != len(other_column_names)):
raise RuntimeError("Two SFrames have to have the same number of columns")
# check if the order of column name is the same
column_name_order_match = True
for i in range(len(my_column_names)):
if other_column_names[i] != my_column_names[i]:
column_name_order_match = False
break
processed_other_frame = other
if not column_name_order_match:
# we allow name order of two sframes to be different, so we create a new sframe from
# "other" sframe to make it has exactly the same shape
processed_other_frame = SFrame()
for i in range(len(my_column_names)):
col_name = my_column_names[i]
if(col_name not in other_column_names):
raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame")
other_column = other.select_column(col_name)
processed_other_frame.add_column(other_column, col_name, inplace=True)
# check column type
if my_column_types[i] != other_column.dtype:
raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype))
with cython_context():
return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__)) |
<SYSTEM_TASK:>
Explore the SFrame in an interactive GUI. Opens a new app window.
<END_TASK>
<USER_TASK:>
Description:
def explore(self, title=None):
"""
Explore the SFrame in an interactive GUI. Opens a new app window.
Parameters
----------
title : str
The plot title to show for the resulting visualization. Defaults to None.
If the title is None, a default title will be provided.
Returns
-------
None
Examples
--------
Suppose 'sf' is an SFrame, we can view it using:
>>> sf.explore()
To override the default plot title and axis labels:
>>> sf.explore(title="My Plot Title")
""" |
import sys
import os
if sys.platform != 'darwin' and sys.platform != 'linux2' and sys.platform != 'linux':
raise NotImplementedError('Visualization is currently supported only on macOS and Linux.')
path_to_client = _get_client_app_path()
if title is None:
title = ""
self.__proxy__.explore(path_to_client, title) |
<SYSTEM_TASK:>
Splits a datetime column of SFrame to multiple columns, with each value in a
<END_TASK>
<USER_TASK:>
Description:
def split_datetime(self, column_name, column_name_prefix=None, limit=None, timezone=False):
"""
Splits a datetime column of SFrame to multiple columns, with each value in a
separate column. Returns a new SFrame with the expanded column replaced with
a list of new columns. The expanded column must be of datetime type.
For more details regarding name generation and
other, refer to :py:func:`turicreate.SArray.split_datetime()`
Parameters
----------
column_name : str
Name of the unpacked column.
column_name_prefix : str, optional
If provided, expanded column names would start with the given prefix.
If not provided, the default value is the name of the expanded column.
limit: list[str], optional
Limits the set of datetime elements to expand.
Possible values are 'year','month','day','hour','minute','second',
'weekday', 'isoweekday', 'tmweekday', and 'us'.
If not provided, only ['year','month','day','hour','minute','second']
are expanded.
timezone : bool, optional
A boolean parameter that determines whether to show the timezone
column or not. Defaults to False.
Returns
-------
out : SFrame
A new SFrame that contains rest of columns from original SFrame with
the given column replaced with a collection of expanded columns.
Examples
---------
>>> sf
Columns:
id int
submission datetime
Rows: 2
Data:
+----+-------------------------------------------------+
| id | submission |
+----+-------------------------------------------------+
| 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))|
| 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))|
+----+-------------------------------------------------+
>>> sf.split_datetime('submission',limit=['hour','minute'])
Columns:
id int
submission.hour int
submission.minute int
Rows: 2
Data:
+----+-----------------+-------------------+
| id | submission.hour | submission.minute |
+----+-----------------+-------------------+
| 1 | 7 | 17 |
| 2 | 5 | 43 |
+----+-----------------+-------------------+
""" |
if column_name not in self.column_names():
raise KeyError("column '" + column_name + "' does not exist in current SFrame")
if column_name_prefix is None:
column_name_prefix = column_name
new_sf = self[column_name].split_datetime(column_name_prefix, limit, timezone)
# construct return SFrame, check if there is conflict
rest_columns = [name for name in self.column_names() if name != column_name]
new_names = new_sf.column_names()
while set(new_names).intersection(rest_columns):
new_names = [name + ".1" for name in new_names]
new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))), inplace=True)
ret_sf = self.select_columns(rest_columns)
ret_sf.add_columns(new_sf, inplace=True)
return ret_sf |
<SYSTEM_TASK:>
Convert a "wide" column of an SFrame to one or two "tall" columns by
<END_TASK>
<USER_TASK:>
Description:
def stack(self, column_name, new_column_name=None, drop_na=False, new_column_type=None):
"""
Convert a "wide" column of an SFrame to one or two "tall" columns by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The returned SFrame includes the newly created column(s) and all
columns other than the one that is stacked.
Parameters
--------------
column_name : str
The column to stack. This column must be of dict/list/array type
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
new_column_type : type | list of types, optional
The new column types. If original column is a list/array type
new_column_type must be a single type, or a list of one type. If
original column is of dict type, new_column_type must be a list of
two types. If not provided, the types are automatically inferred
from the first 100 values of the SFrame.
Returns
-------
out : SFrame
A new SFrame that contains newly stacked column(s) plus columns in
original SFrame other than the stacked column.
See Also
--------
unstack
Examples
---------
Suppose 'sf' is an SFrame that contains a column of dict type:
>>> sf = turicreate.SFrame({'topic':[1,2,3,4],
... 'words': [{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}]
... })
+-------+----------------------+
| topic | words |
+-------+----------------------+
| 1 | {'a': 3, 'cat': 2} |
| 2 | {'a': 1, 'the': 2} |
| 3 | {'the': 1, 'dog': 3} |
| 4 | {} |
+-------+----------------------+
[4 rows x 2 columns]
Stack would stack all keys in one column and all values in another
column:
>>> sf.stack('words', new_column_name=['word', 'count'])
+-------+------+-------+
| topic | word | count |
+-------+------+-------+
| 1 | a | 3 |
| 1 | cat | 2 |
| 2 | a | 1 |
| 2 | the | 2 |
| 3 | the | 1 |
| 3 | dog | 3 |
| 4 | None | None |
+-------+------+-------+
[7 rows x 3 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set drop_na=True in the parameters to stack.
Suppose 'sf' is an SFrame that contains a user and his/her friends,
where 'friends' columns is an array type. Stack on 'friends' column
would create a user/friend list for each user/friend pair:
>>> sf = turicreate.SFrame({'topic':[1,2,3],
... 'friends':[[2,3,4], [5,6],
... [4,5,10,None]]
... })
>>> sf
+-------+------------------+
| topic | friends |
+-------+------------------+
| 1 | [2, 3, 4] |
| 2 | [5, 6] |
| 3 | [4, 5, 10, None] |
+----- -+------------------+
[3 rows x 2 columns]
>>> sf.stack('friends', new_column_name='friend')
+-------+--------+
| topic | friend |
+-------+--------+
| 1 | 2 |
| 1 | 3 |
| 1 | 4 |
| 2 | 5 |
| 2 | 6 |
| 3 | 4 |
| 3 | 5 |
| 3 | 10 |
| 3 | None |
+-------+--------+
[9 rows x 2 columns]
""" |
# validate column_name
column_name = str(column_name)
if column_name not in self.column_names():
raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.")
stack_column_type = self[column_name].dtype
if (stack_column_type not in [dict, array.array, list]):
raise TypeError("Stack is only supported for column of dict/list/array type.")
# user defined types. do some checking
if new_column_type is not None:
# if new_column_type is a single type, just make it a list of one type
if type(new_column_type) is type:
new_column_type = [new_column_type]
if (stack_column_type in [list, array.array]) and len(new_column_type) != 1:
raise ValueError("Expecting a single column type to unpack list or array columns")
if (stack_column_type in [dict]) and len(new_column_type) != 2:
raise ValueError("Expecting two column types to unpack a dict column")
if (new_column_name is not None):
if stack_column_type == dict:
if (type(new_column_name) is not list):
raise TypeError("new_column_name has to be a list to stack dict type")
elif (len(new_column_name) != 2):
raise TypeError("new_column_name must have length of two")
else:
if (type(new_column_name) != str):
raise TypeError("new_column_name has to be a str")
new_column_name = [new_column_name]
# check if the new column name conflicts with existing ones
for name in new_column_name:
if (name in self.column_names()) and (name != column_name):
raise ValueError("Column with name '" + name + "' already exists, pick a new column name")
else:
if stack_column_type == dict:
new_column_name = ["",""]
else:
new_column_name = [""]
# infer column types
head_row = SArray(self[column_name].head(100)).dropna()
if (len(head_row) == 0):
raise ValueError("Cannot infer column type because there is not enough rows to infer value")
if new_column_type is None:
# we have to perform type inference
if stack_column_type == dict:
# infer key/value type
keys = []; values = []
for row in head_row:
for val in row:
keys.append(val)
if val is not None: values.append(row[val])
new_column_type = [
infer_type_of_list(keys),
infer_type_of_list(values)
]
else:
values = [v for v in itertools.chain.from_iterable(head_row)]
new_column_type = [infer_type_of_list(values)]
with cython_context():
return SFrame(_proxy=self.__proxy__.stack(column_name,
new_column_name,
new_column_type, drop_na)) |
<SYSTEM_TASK:>
Concatenate values from one or two columns into one column, grouping by
<END_TASK>
<USER_TASK:>
Description:
def unstack(self, column_names, new_column_name=None):
"""
Concatenate values from one or two columns into one column, grouping by
all other columns. The resulting column could be of type list, array or
dictionary. If ``column_names`` is a numeric column, the result will be of
array.array type. If ``column_names`` is a non-numeric column, the new column
will be of list type. If ``column_names`` is a list of two columns, the new
column will be of dict type where the keys are taken from the first
column in the list.
Parameters
----------
column_names : str | [str, str]
The column(s) that is(are) to be concatenated.
If str, then collapsed column type is either array or list.
If [str, str], then collapsed column type is dict
new_column_name : str, optional
New column name. If not given, a name is generated automatically.
Returns
-------
out : SFrame
A new SFrame containing the grouped columns as well as the new
column.
See Also
--------
stack : The inverse of unstack.
groupby : ``unstack`` is a special version of ``groupby`` that uses the
:mod:`~turicreate.aggregate.CONCAT` aggregator
Notes
-----
- There is no guarantee the resulting SFrame maintains the same order as
the original SFrame.
- Missing values are maintained during unstack.
- When unstacking into a dictionary, if there is more than one instance
of a given key for a particular group, an arbitrary value is selected.
Examples
--------
>>> sf = turicreate.SFrame({'count':[4, 2, 1, 1, 2, None],
... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'],
... 'word':['a', 'c', 'c', 'a', 'b', None]})
>>> sf.unstack(column_names=['word', 'count'], new_column_name='words')
+----------+------------------+
| topic | words |
+----------+------------------+
| elephant | {'a': 1, 'b': 2} |
| dog | {'c': 1} |
| cat | {'a': 4, 'c': 2} |
| fish | None |
+----------+------------------+
[4 rows x 2 columns]
>>> sf = turicreate.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3],
... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]})
>>> sf.unstack('friend', new_column_name='new name')
+------+-----------+
| user | new name |
+------+-----------+
| 3 | [5] |
| 1 | [2, 3, 4] |
| 2 | [6, 4, 5] |
| 4 | [2, 3] |
+------+-----------+
[4 rows x 2 columns]
""" |
if (type(column_names) != str and len(column_names) != 2):
raise TypeError("'column_names' parameter has to be either a string or a list of two strings.")
with cython_context():
if type(column_names) == str:
key_columns = [i for i in self.column_names() if i != column_names]
if new_column_name is not None:
return self.groupby(key_columns, {new_column_name : aggregate.CONCAT(column_names)})
else:
return self.groupby(key_columns, aggregate.CONCAT(column_names))
elif len(column_names) == 2:
key_columns = [i for i in self.column_names() if i not in column_names]
if new_column_name is not None:
return self.groupby(key_columns, {new_column_name: aggregate.CONCAT(column_names[0], column_names[1])})
else:
return self.groupby(key_columns, aggregate.CONCAT(column_names[0], column_names[1])) |
<SYSTEM_TASK:>
Sort current SFrame by the given columns, using the given sort order.
<END_TASK>
<USER_TASK:>
Description:
def sort(self, key_column_names, ascending=True):
"""
Sort current SFrame by the given columns, using the given sort order.
Only columns that are type of str, int and float can be sorted.
Parameters
----------
key_column_names : str | list of str | list of (str, bool) pairs
Names of columns to be sorted. The result will be sorted first by
first column, followed by second column, and so on. All columns will
be sorted in the same order as governed by the `ascending`
parameter. To control the sort ordering for each column
individually, `key_column_names` must be a list of (str, bool) pairs.
Given this case, the first value is the column name and the second
value is a boolean indicating whether the sort order is ascending.
ascending : bool, optional
Sort all columns in the given order.
Returns
-------
out : SFrame
A new SFrame that is sorted according to given sort criteria
See Also
--------
topk
Examples
--------
Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'.
To sort by column 'a', ascending
>>> sf = turicreate.SFrame({'a':[1,3,2,1],
... 'b':['a','c','b','b'],
... 'c':['x','y','z','y']})
>>> sf
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 3 | c | y |
| 2 | b | z |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
>>> sf.sort('a')
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a', descending
>>> sf.sort('a', ascending = False)
+---+---+---+
| a | b | c |
+---+---+---+
| 3 | c | y |
| 2 | b | z |
| 1 | a | x |
| 1 | b | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' and 'b', all ascending
>>> sf.sort(['a', 'b'])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | a | x |
| 1 | b | y |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
To sort by column 'a' ascending, and then by column 'c' descending
>>> sf.sort([('a', True), ('c', False)])
+---+---+---+
| a | b | c |
+---+---+---+
| 1 | b | y |
| 1 | a | x |
| 2 | b | z |
| 3 | c | y |
+---+---+---+
[4 rows x 3 columns]
""" |
sort_column_names = []
sort_column_orders = []
# validate key_column_names
if (type(key_column_names) == str):
sort_column_names = [key_column_names]
elif (type(key_column_names) == list):
if (len(key_column_names) == 0):
raise ValueError("Please provide at least one column to sort")
first_param_types = set([type(i) for i in key_column_names])
if (len(first_param_types) != 1):
raise ValueError("key_column_names element are not of the same type")
first_param_type = first_param_types.pop()
if (first_param_type == tuple):
sort_column_names = [i[0] for i in key_column_names]
sort_column_orders = [i[1] for i in key_column_names]
elif(first_param_type == str):
sort_column_names = key_column_names
else:
raise TypeError("key_column_names type is not supported")
else:
raise TypeError("key_column_names type is not correct. Supported types are str, list of str or list of (str,bool) pair.")
# use the second parameter if the sort order is not given
if (len(sort_column_orders) == 0):
sort_column_orders = [ascending for i in sort_column_names]
# make sure all column exists
my_column_names = set(self.column_names())
for column in sort_column_names:
if (type(column) != str):
raise TypeError("Only string parameter can be passed in as column names")
if (column not in my_column_names):
raise ValueError("SFrame has no column named: '" + str(column) + "'")
if (self[column].dtype not in (str, int, float,datetime.datetime)):
raise TypeError("Only columns of type (str, int, float) can be sorted")
with cython_context():
return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders)) |
<SYSTEM_TASK:>
Remove missing values from an SFrame. A missing value is either ``None``
<END_TASK>
<USER_TASK:>
Description:
def dropna(self, columns=None, how='any'):
"""
Remove missing values from an SFrame. A missing value is either ``None``
or ``NaN``. If ``how`` is 'any', a row will be removed if any of the
columns in the ``columns`` parameter contains at least one missing
value. If ``how`` is 'all', a row will be removed if all of the columns
in the ``columns`` parameter are missing values.
If the ``columns`` parameter is not specified, the default is to
consider all columns when searching for missing values.
Parameters
----------
columns : list or str, optional
The columns to use when looking for missing values. By default, all
columns are used.
how : {'any', 'all'}, optional
Specifies whether a row should be dropped if at least one column
has missing values, or if all columns have missing values. 'any' is
default.
Returns
-------
out : SFrame
SFrame with missing values removed (according to the given rules).
See Also
--------
dropna_split : Drops missing rows from the SFrame and returns them.
Examples
--------
Drop all missing values.
>>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.dropna()
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
Drop rows where every value is missing.
>>> sf.dropna(any="all")
+------+---+
| a | b |
+------+---+
| 1 | a |
| None | b |
+------+---+
[2 rows x 2 columns]
Drop rows where column 'a' has a missing value.
>>> sf.dropna('a', any="all")
+---+---+
| a | b |
+---+---+
| 1 | a |
+---+---+
[1 rows x 2 columns]
""" |
# If the user gives me an empty list (the indicator to use all columns)
# NA values being dropped would not be the expected behavior. This
# is a NOOP, so let's not bother the server
if type(columns) is list and len(columns) == 0:
return SFrame(_proxy=self.__proxy__)
(columns, all_behavior) = self.__dropna_errchk(columns, how)
with cython_context():
return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False)) |
<SYSTEM_TASK:>
Fill all missing values with a given value in a given column. If the
<END_TASK>
<USER_TASK:>
Description:
def fillna(self, column_name, value):
"""
Fill all missing values with a given value in a given column. If the
``value`` is not the same type as the values in ``column_name``, this method
attempts to convert the value to the original column's type. If this
fails, an error is raised.
Parameters
----------
column_name : str
The name of the column to modify.
value : type convertible to SArray's type
The value used to replace all missing values.
Returns
-------
out : SFrame
A new SFrame with the specified value in place of missing values.
See Also
--------
dropna
Examples
--------
>>> sf = turicreate.SFrame({'a':[1, None, None],
... 'b':['13.1', '17.2', None]})
>>> sf = sf.fillna('a', 0)
>>> sf
+---+------+
| a | b |
+---+------+
| 1 | 13.1 |
| 0 | 17.2 |
| 0 | None |
+---+------+
[3 rows x 2 columns]
""" |
# Normal error checking
if type(column_name) is not str:
raise TypeError("column_name must be a str")
ret = self[self.column_names()]
ret[column_name] = ret[column_name].fillna(value)
return ret |
<SYSTEM_TASK:>
Returns an SFrame with a new column that numbers each row
<END_TASK>
<USER_TASK:>
Description:
def add_row_number(self, column_name='id', start=0, inplace=False):
"""
Returns an SFrame with a new column that numbers each row
sequentially. By default the count starts at 0, but this can be changed
to a positive or negative number. The new column will be named with
the given column name. An error will be raised if the given column
name already exists in the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : str, optional
The name of the new column that will hold the row numbers.
start : int, optional
The number used to start the row number count.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The new SFrame with a column name
Notes
-----
The range of numbers is constrained by a signed 64-bit integer, so
beware of overflow if you think the results in the row number column
will be greater than 9 quintillion.
Examples
--------
>>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]})
>>> sf.add_row_number()
+----+------+------+
| id | a | b |
+----+------+------+
| 0 | 1 | a |
| 1 | None | b |
| 2 | None | None |
+----+------+------+
[3 rows x 3 columns]
""" |
if type(column_name) is not str:
raise TypeError("Must give column_name as strs")
if type(start) is not int:
raise TypeError("Must give start as int")
if column_name in self.column_names():
raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame")
the_col = _create_sequential_sarray(self.num_rows(), start)
# Make sure the row number column is the first column
new_sf = SFrame()
new_sf.add_column(the_col, column_name, inplace=True)
new_sf.add_columns(self, inplace=True)
if inplace:
self.__proxy__ = new_sf.__proxy__
return self
else:
return new_sf |
<SYSTEM_TASK:>
Adds the FileDescriptorProto and its types to this pool.
<END_TASK>
<USER_TASK:>
Description:
def AddSerializedFile(self, serialized_file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
serialized_file_desc_proto: A bytes string, serialization of the
FileDescriptorProto to add.
""" |
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
serialized_file_desc_proto)
self.Add(file_desc_proto) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.