text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Returns the directory for this target.
<END_TASK>
<USER_TASK:>
Description:
def path (self):
""" Returns the directory for this target.
"""
|
if not self.path_:
if self.action_:
p = self.action_.properties ()
(target_path, relative_to_build_dir) = p.target_path ()
if relative_to_build_dir:
# Indicates that the path is relative to
# build dir.
target_path = os.path.join (self.project_.build_dir (), target_path)
# Store the computed path, so that it's not recomputed
# any more
self.path_ = target_path
return os.path.normpath(self.path_)
|
<SYSTEM_TASK:>
Generates actual build instructions.
<END_TASK>
<USER_TASK:>
Description:
def actualize (self):
""" Generates actual build instructions.
"""
|
if self.actualized_:
return
self.actualized_ = True
ps = self.properties ()
properties = self.adjust_properties (ps)
actual_targets = []
for i in self.targets ():
actual_targets.append (i.actualize ())
self.actualize_sources (self.sources (), properties)
self.engine_.add_dependency (actual_targets, self.actual_sources_ + self.dependency_only_sources_)
# FIXME: check the comment below. Was self.action_name_ [1]
# Action name can include additional rule arguments, which should not
# be passed to 'set-target-variables'.
# FIXME: breaking circular dependency
import toolset
toolset.set_target_variables (self.manager_, self.action_name_, actual_targets, properties)
engine = self.manager_.engine ()
# FIXME: this is supposed to help --out-xml option, but we don't
# implement that now, and anyway, we should handle it in Python,
# not but putting variables on bjam-level targets.
bjam.call("set-target-variable", actual_targets, ".action", repr(self))
self.manager_.engine ().set_update_action (self.action_name_, actual_targets, self.actual_sources_,
properties)
# Since we set up creating action here, we also set up
# action for cleaning up
self.manager_.engine ().set_update_action ('common.Clean', 'clean-all',
actual_targets)
return actual_targets
|
<SYSTEM_TASK:>
Helper for 'actualize_sources'.
<END_TASK>
<USER_TASK:>
Description:
def actualize_source_type (self, sources, prop_set):
""" Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets.
"""
|
assert is_iterable_typed(sources, VirtualTarget)
assert isinstance(prop_set, property_set.PropertySet)
result = []
for i in sources:
scanner = None
# FIXME: what's this?
# if isinstance (i, str):
# i = self.manager_.get_object (i)
if i.type ():
scanner = b2.build.type.get_scanner (i.type (), prop_set)
r = i.actualize (scanner)
result.append (r)
return result
|
<SYSTEM_TASK:>
Returns all targets referenced by this subvariant,
<END_TASK>
<USER_TASK:>
Description:
def all_referenced_targets(self, result):
"""Returns all targets referenced by this subvariant,
either directly or indirectly, and either as sources,
or as dependency properties. Targets referred with
dependency property are returned a properties, not targets."""
|
if __debug__:
from .property import Property
assert is_iterable_typed(result, (VirtualTarget, Property))
# Find directly referenced targets.
deps = self.build_properties().dependency()
all_targets = self.sources_ + deps
# Find other subvariants.
r = []
for e in all_targets:
if not e in result:
result.add(e)
if isinstance(e, property.Property):
t = e.value
else:
t = e
# FIXME: how can this be?
cs = t.creating_subvariant()
if cs:
r.append(cs)
r = unique(r)
for s in r:
if s != self:
s.all_referenced_targets(result)
|
<SYSTEM_TASK:>
Creates additional files for the individual MPL-containers.
<END_TASK>
<USER_TASK:>
Description:
def create_more_container_files(sourceDir, suffix, maxElements, containers, containers2):
"""Creates additional files for the individual MPL-containers."""
|
# Create files for each MPL-container with 20 to 'maxElements' elements
# which will be used during generation.
for container in containers:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line)
for container in containers2:
for i in range(20, maxElements, 10):
# Create copy of "template"-file.
newFile = os.path.join( sourceDir, container, container + str(i+10) + "_c" + suffix )
shutil.copyfile( os.path.join( sourceDir, container, container + "20_c" + suffix ), newFile )
# Adjust copy of "template"-file accordingly.
for line in fileinput.input( newFile, inplace=1, mode="rU" ):
line = re.sub(r'20', '%TWENTY%', line.rstrip())
line = re.sub(r'11', '%ELEVEN%', line.rstrip())
line = re.sub(r'10(?![0-9])', '%TEN%', line.rstrip())
line = re.sub(r'%TWENTY%', re.escape(str(i+10)), line.rstrip())
line = re.sub(r'%ELEVEN%', re.escape(str(i + 1)), line.rstrip())
line = re.sub(r'%TEN%', re.escape(str(i)), line.rstrip())
print(line)
|
<SYSTEM_TASK:>
Creates additional source- and header-files for the numbered sequence MPL-containers.
<END_TASK>
<USER_TASK:>
Description:
def create_input_for_numbered_sequences(headerDir, sourceDir, containers, maxElements):
"""Creates additional source- and header-files for the numbered sequence MPL-containers."""
|
# Create additional container-list without "map".
containersWithoutMap = containers[:]
try:
containersWithoutMap.remove('map')
except ValueError:
# We can safely ignore if "map" is not contained in 'containers'!
pass
# Create header/source-files.
create_more_container_files(headerDir, ".hpp", maxElements, containers, containersWithoutMap)
create_more_container_files(sourceDir, ".cpp", maxElements, containers, containersWithoutMap)
|
<SYSTEM_TASK:>
Adjusts the limits of variadic sequence MPL-containers.
<END_TASK>
<USER_TASK:>
Description:
def adjust_container_limits_for_variadic_sequences(headerDir, containers, maxElements):
"""Adjusts the limits of variadic sequence MPL-containers."""
|
for container in containers:
headerFile = os.path.join( headerDir, "limits", container + ".hpp" )
regexMatch = r'(define\s+BOOST_MPL_LIMIT_' + container.upper() + r'_SIZE\s+)[0-9]+'
regexReplace = r'\g<1>' + re.escape( str(maxElements) )
for line in fileinput.input( headerFile, inplace=1, mode="rU" ):
line = re.sub(regexMatch, regexReplace, line.rstrip())
print(line)
|
<SYSTEM_TASK:>
Add an inner product layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_inner_product(self, name, W, b, input_channels, output_channels, has_bias,
input_name, output_name, **kwargs):
"""
Add an inner product layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array or bytes()
Weight matrix of shape (output_channels, input_channels)
If W is of type bytes(), i.e. quantized, other quantization related arguments must be provided as well (see below).
b: numpy.array
Bias vector of shape (output_channels, ).
input_channels: int
Number of input channels.
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
Quantization arguments expected in kwargs, when W is of type bytes():
quantization_type : str
When weights are quantized (i.e. W is of type bytes()), this should be either "linear" or "lut".
nbits: int
Should be between 1 and 8 (inclusive). Number of bits per weight value. Only applicable when
weights are quantized.
quant_scale: numpy.array(dtype=numpy.float32)
scale vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_bias: numpy.array(dtype=numpy.float32)
bias vector to be used with linear quantization. Must be of length either 1 or output_channels.
quant_lut: numpy.array(dtype=numpy.float32)
the LUT (look up table) to be used with LUT quantization. Must be of length 2^nbits.
See Also
--------
add_embedding, add_convolution
"""
|
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.innerProduct
# Fill in the parameters
spec_layer_params.inputChannels = input_channels
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
if len(kwargs) == 0:
weights.floatValue.extend(map(float, W.flatten()))
else:
_verify_quantization_arguments(weight=W, output_channels=output_channels, **kwargs)
_fill_quantized_weights(weights_message=weights, W=W, **kwargs)
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten()))
|
<SYSTEM_TASK:>
Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
<END_TASK>
<USER_TASK:>
Description:
def add_resize_bilinear(self, name, input_name, output_name, target_height=1, target_width=1,
mode='ALIGN_ENDPOINTS_MODE'):
"""
Add resize bilinear layer to the model. A layer that resizes the input to a given spatial size using bilinear interpolation.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
target_height: int
Output height dimension.
target_width: int
Output width dimension.
mode: str
Following values are supported: 'STRICT_ALIGN_ENDPOINTS_MODE', 'ALIGN_ENDPOINTS_MODE', 'UPSAMPLE_MODE', 'ROI_ALIGN_MODE'.
This parameter determines the sampling grid used for bilinear interpolation. Kindly refer to NeuralNetwork.proto for details.
See Also
--------
add_upsample
"""
|
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.resizeBilinear
spec_layer_params.targetSize.append(target_height)
spec_layer_params.targetSize.append(target_width)
if mode == 'ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ALIGN_ENDPOINTS_MODE')
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('STRICT_ALIGN_ENDPOINTS_MODE')
elif mode == 'UPSAMPLE_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('UPSAMPLE_MODE')
elif mode == 'ROI_ALIGN_MODE':
spec_layer_params.mode.samplingMethod = _NeuralNetwork_pb2.SamplingMode.Method.Value('ROI_ALIGN_MODE')
else:
raise ValueError("Unspported resize bilinear mode %s" % mode)
|
<SYSTEM_TASK:>
Serialize model summary into a dict with ordered lists of sections and section titles
<END_TASK>
<USER_TASK:>
Description:
def _toolkit_serialize_summary_struct(model, sections, section_titles):
"""
Serialize model summary into a dict with ordered lists of sections and section titles
Parameters
----------
model : Model object
sections : Ordered list of lists (sections) of tuples (field,value)
[
[(field1, value1), (field2, value2)],
[(field3, value3), (field4, value4)],
]
section_titles : Ordered list of section titles
Returns
-------
output_dict : A dict with two entries:
'sections' : ordered list with tuples of the form ('label',value)
'section_titles' : ordered list of section labels
"""
|
output_dict = dict()
output_dict['sections'] = [ [ ( field[0], __extract_model_summary_value(model, field[1]) ) \
for field in section ]
for section in sections ]
output_dict['section_titles'] = section_titles
return output_dict
|
<SYSTEM_TASK:>
Finds the only column in `SFrame` with a type specified by `target_type`.
<END_TASK>
<USER_TASK:>
Description:
def _find_only_column_of_type(sframe, target_type, type_name, col_name):
"""
Finds the only column in `SFrame` with a type specified by `target_type`.
If there are zero or more than one such columns, an exception will be
raised. The name and type of the target column should be provided as
strings for the purpose of error feedback.
"""
|
image_column_name = None
if type(target_type) != list:
target_type = [target_type]
for name, ctype in zip(sframe.column_names(), sframe.column_types()):
if ctype in target_type:
if image_column_name is not None:
raise ToolkitError('No "{col_name}" column specified and more than one {type_name} column in "dataset". Can not infer correct {col_name} column.'.format(col_name=col_name, type_name=type_name))
image_column_name = name
if image_column_name is None:
raise ToolkitError('No %s column in "dataset".' % type_name)
return image_column_name
|
<SYSTEM_TASK:>
Finds the only column in `sframe` with a type of turicreate.Image.
<END_TASK>
<USER_TASK:>
Description:
def _find_only_image_column(sframe):
"""
Finds the only column in `sframe` with a type of turicreate.Image.
If there are zero or more than one image columns, an exception will
be raised.
"""
|
from turicreate import Image
return _find_only_column_of_type(sframe, target_type=Image,
type_name='image', col_name='feature')
|
<SYSTEM_TASK:>
Return a tuple of sections and section titles.
<END_TASK>
<USER_TASK:>
Description:
def _summarize_coefficients(top_coefs, bottom_coefs):
"""
Return a tuple of sections and section titles.
Sections are pretty print of model coefficients
Parameters
----------
top_coefs : SFrame of top k coefficients
bottom_coefs : SFrame of bottom k coefficients
Returns
-------
(sections, section_titles) : tuple
sections : list
summary sections for top/bottom k coefficients
section_titles : list
summary section titles
"""
|
def get_row_name(row):
if row['index'] is None:
return row['name']
else:
return "%s[%s]" % (row['name'], row['index'])
if len(top_coefs) == 0:
top_coefs_list = [('No Positive Coefficients', _precomputed_field('') )]
else:
top_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in top_coefs ]
if len(bottom_coefs) == 0:
bottom_coefs_list = [('No Negative Coefficients', _precomputed_field(''))]
else:
bottom_coefs_list = [ (get_row_name(row),
_precomputed_field(row['value'])) \
for row in bottom_coefs ]
return ([top_coefs_list, bottom_coefs_list], \
[ 'Highest Positive Coefficients', 'Lowest Negative Coefficients'] )
|
<SYSTEM_TASK:>
Returns a tuple of the top k values from the positive and
<END_TASK>
<USER_TASK:>
Description:
def _toolkit_get_topk_bottomk(values, k=5):
"""
Returns a tuple of the top k values from the positive and
negative values in a SArray
Parameters
----------
values : SFrame of model coefficients
k: Maximum number of largest positive and k lowest negative numbers to return
Returns
-------
(topk_positive, bottomk_positive) : tuple
topk_positive : list
floats that represent the top 'k' ( or less ) positive
values
bottomk_positive : list
floats that represent the top 'k' ( or less ) negative
values
"""
|
top_values = values.topk('value', k=k)
top_values = top_values[top_values['value'] > 0]
bottom_values = values.topk('value', k=k, reverse=True)
bottom_values = bottom_values[bottom_values['value'] < 0]
return (top_values, bottom_values)
|
<SYSTEM_TASK:>
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
<END_TASK>
<USER_TASK:>
Description:
def _make_repr_table_from_sframe(X):
"""
Serializes an SFrame to a list of strings, that, when printed, creates a well-formatted table.
"""
|
assert isinstance(X, _SFrame)
column_names = X.column_names()
out_data = [ [None]*len(column_names) for i in range(X.num_rows())]
column_sizes = [len(s) for s in column_names]
for i, c in enumerate(column_names):
for j, e in enumerate(X[c]):
out_data[j][i] = str(e)
column_sizes[i] = max(column_sizes[i], len(e))
# now, go through and pad everything.
out_data = ([ [cn.ljust(k, ' ') for cn, k in zip(column_names, column_sizes)],
["-"*k for k in column_sizes] ]
+ [ [e.ljust(k, ' ') for e, k in zip(row, column_sizes)] for row in out_data] )
return [' '.join(row) for row in out_data]
|
<SYSTEM_TASK:>
Display a toolkit repr according to some simple rules.
<END_TASK>
<USER_TASK:>
Description:
def _toolkit_repr_print(model, fields, section_titles, width = None):
"""
Display a toolkit repr according to some simple rules.
Parameters
----------
model : Turi Create model
fields: List of lists of tuples
Each tuple should be (display_name, field_name), where field_name can
be a string or a _precomputed_field object.
section_titles: List of section titles, one per list in the fields arg.
Example
-------
model_fields = [
("L1 penalty", 'l1_penalty'),
("L2 penalty", 'l2_penalty'),
("Examples", 'num_examples'),
("Features", 'num_features'),
("Coefficients", 'num_coefficients')]
solver_fields = [
("Solver", 'solver'),
("Solver iterations", 'training_iterations'),
("Solver status", 'training_solver_status'),
("Training time (sec)", 'training_time')]
training_fields = [
("Log-likelihood", 'training_loss')]
fields = [model_fields, solver_fields, training_fields]:
section_titles = ['Model description',
'Solver description',
'Training information']
_toolkit_repr_print(model, fields, section_titles)
"""
|
assert len(section_titles) == len(fields), \
"The number of section titles ({0}) ".format(len(section_titles)) +\
"doesn't match the number of groups of fields, {0}.".format(len(fields))
out_fields = [ ("Class", model.__class__.__name__), ""]
# Record the max_width so that if width is not provided, we calculate it.
max_width = len("Class")
for index, (section_title, field_list) in enumerate(zip(section_titles, fields)):
# Add in the section header.
out_fields += [section_title, "-"*len(section_title)]
# Add in all the key-value pairs
for f in field_list:
if isinstance(f, tuple):
f = (str(f[0]), f[1])
out_fields.append( (f[0], __extract_model_summary_value(model, f[1])) )
max_width = max(max_width, len(f[0]))
elif isinstance(f, _SFrame):
out_fields.append("")
out_fields += _make_repr_table_from_sframe(f)
out_fields.append("")
else:
raise TypeError("Type of field %s not recognized." % str(f))
# Add in the empty footer.
out_fields.append("")
if width is None:
width = max_width
# Now, go through and format the key_value pairs nicely.
def format_key_pair(key, value):
if type(key) is list:
key = ','.join(str(k) for k in key)
return key.ljust(width, ' ') + ' : ' + str(value)
out_fields = [s if type(s) is str else format_key_pair(*s) for s in out_fields]
return '\n'.join(out_fields)
|
<SYSTEM_TASK:>
Map returning value, if it is unity SFrame, SArray, map it
<END_TASK>
<USER_TASK:>
Description:
def _map_unity_proxy_to_object(value):
"""
Map returning value, if it is unity SFrame, SArray, map it
"""
|
vtype = type(value)
if vtype in _proxy_map:
return _proxy_map[vtype](value)
elif vtype == list:
return [_map_unity_proxy_to_object(v) for v in value]
elif vtype == dict:
return {k:_map_unity_proxy_to_object(v) for k,v in value.items()}
else:
return value
|
<SYSTEM_TASK:>
Same as select columns but redirect runtime error to ToolkitError.
<END_TASK>
<USER_TASK:>
Description:
def _toolkits_select_columns(dataset, columns):
"""
Same as select columns but redirect runtime error to ToolkitError.
"""
|
try:
return dataset.select_columns(columns)
except RuntimeError:
missing_features = list(set(columns).difference(set(dataset.column_names())))
raise ToolkitError("Input data does not contain the following columns: " +
"{}".format(missing_features))
|
<SYSTEM_TASK:>
Check if a column exists in an SFrame with error message.
<END_TASK>
<USER_TASK:>
Description:
def _raise_error_if_column_exists(dataset, column_name = 'dataset',
dataset_variable_name = 'dataset',
column_name_error_message_name = 'column_name'):
"""
Check if a column exists in an SFrame with error message.
"""
|
err_msg = 'The SFrame {0} must contain the column {1}.'.format(
dataset_variable_name,
column_name_error_message_name)
if column_name not in dataset.column_names():
raise ToolkitError(str(err_msg))
|
<SYSTEM_TASK:>
Check whether or not the requested option is one of the allowed values.
<END_TASK>
<USER_TASK:>
Description:
def _check_categorical_option_type(option_name, option_value, possible_values):
"""
Check whether or not the requested option is one of the allowed values.
"""
|
err_msg = '{0} is not a valid option for {1}. '.format(option_value, option_name)
err_msg += ' Expected one of: '.format(possible_values)
err_msg += ', '.join(map(str, possible_values))
if option_value not in possible_values:
raise ToolkitError(err_msg)
|
<SYSTEM_TASK:>
Check if the input is an SArray. Provide a proper error
<END_TASK>
<USER_TASK:>
Description:
def _raise_error_if_not_sarray(dataset, variable_name="SArray"):
"""
Check if the input is an SArray. Provide a proper error
message otherwise.
"""
|
err_msg = "Input %s is not an SArray."
if not isinstance(dataset, _SArray):
raise ToolkitError(err_msg % variable_name)
|
<SYSTEM_TASK:>
Check if the input is empty.
<END_TASK>
<USER_TASK:>
Description:
def _raise_error_if_sframe_empty(dataset, variable_name="SFrame"):
"""
Check if the input is empty.
"""
|
err_msg = "Input %s either has no rows or no columns. A non-empty SFrame "
err_msg += "is required."
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ToolkitError(err_msg % variable_name)
|
<SYSTEM_TASK:>
Checks if numeric parameter is within given range
<END_TASK>
<USER_TASK:>
Description:
def _numeric_param_check_range(variable_name, variable_value, range_bottom, range_top):
"""
Checks if numeric parameter is within given range
"""
|
err_msg = "%s must be between %i and %i"
if variable_value < range_bottom or variable_value > range_top:
raise ToolkitError(err_msg % (variable_name, range_bottom, range_top))
|
<SYSTEM_TASK:>
Validate and canonicalize training and validation data.
<END_TASK>
<USER_TASK:>
Description:
def _validate_data(dataset, target, features=None, validation_set='auto'):
"""
Validate and canonicalize training and validation data.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable.
features : list[string], optional
List of feature names used.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance, with
the same schema as the training dataset. Can also be None or 'auto'.
Returns
-------
dataset : SFrame
The input dataset, minus any columns not referenced by target or
features
validation_set : SFrame or str
A canonicalized version of the input validation_set. For SFrame
arguments, the returned SFrame only includes those columns referenced by
target or features. SFrame arguments that do not match the schema of
dataset, or string arguments that are not 'auto', trigger an exception.
"""
|
_raise_error_if_not_sframe(dataset, "training dataset")
# Determine columns to keep
if features is None:
features = [feat for feat in dataset.column_names() if feat != target]
if not hasattr(features, '__iter__'):
raise TypeError("Input 'features' must be a list.")
if not all([isinstance(x, str) for x in features]):
raise TypeError(
"Invalid feature %s: Feature names must be of type str" % x)
# Check validation_set argument
if isinstance(validation_set, str):
# Only string value allowed is 'auto'
if validation_set != 'auto':
raise TypeError('Unrecognized value for validation_set.')
elif isinstance(validation_set, _SFrame):
# Attempt to append the two datasets together to check schema
validation_set.head().append(dataset.head())
# Reduce validation set to requested columns
validation_set = _toolkits_select_columns(
validation_set, features + [target])
elif not validation_set is None:
raise TypeError("validation_set must be either 'auto', None, or an "
"SFrame matching the training data.")
# Reduce training set to requested columns
dataset = _toolkits_select_columns(dataset, features + [target])
return dataset, validation_set
|
<SYSTEM_TASK:>
Validate a row label column. If the row label is not specified, a column is
<END_TASK>
<USER_TASK:>
Description:
def _validate_row_label(dataset, label=None, default_label='__id'):
"""
Validate a row label column. If the row label is not specified, a column is
created with row numbers, named with the string in the `default_label`
parameter.
Parameters
----------
dataset : SFrame
Input dataset.
label : str, optional
Name of the column containing row labels.
default_label : str, optional
The default column name if `label` is not specified. A column with row
numbers is added to the output SFrame in this case.
Returns
-------
dataset : SFrame
The input dataset, but with an additional row label column, *if* there
was no input label.
label : str
The final label column name.
"""
|
## If no label is provided, set it to be a default and add a row number to
# dataset. Check that this new name does not conflict with an existing
# name.
if not label:
## Try a bunch of variations of the default label to find one that's not
# already a column name.
label_name_base = default_label
label = default_label
i = 1
while label in dataset.column_names():
label = label_name_base + '.{}'.format(i)
i += 1
dataset = dataset.add_row_number(column_name=label)
## Validate the label name and types.
if not isinstance(label, str):
raise TypeError("The row label column name '{}' must be a string.".format(label))
if not label in dataset.column_names():
raise ToolkitError("Row label column '{}' not found in the dataset.".format(label))
if not dataset[label].dtype in (str, int):
raise TypeError("Row labels must be integers or strings.")
## Return the modified dataset and label
return dataset, label
|
<SYSTEM_TASK:>
Returns Mac version as a tuple of integers, making it easy to do proper
<END_TASK>
<USER_TASK:>
Description:
def _mac_ver():
"""
Returns Mac version as a tuple of integers, making it easy to do proper
version comparisons. On non-Macs, it returns an empty tuple.
"""
|
import platform
import sys
if sys.platform == 'darwin':
ver_str = platform.mac_ver()[0]
return tuple([int(v) for v in ver_str.split('.')])
else:
return ()
|
<SYSTEM_TASK:>
Print a message making it clear to the user what compute resource is used in
<END_TASK>
<USER_TASK:>
Description:
def _print_neural_compute_device(cuda_gpus, use_mps, cuda_mem_req=None, has_mps_impl=True):
"""
Print a message making it clear to the user what compute resource is used in
neural network training.
"""
|
num_cuda_gpus = len(cuda_gpus)
if num_cuda_gpus >= 1:
gpu_names = ', '.join(gpu['name'] for gpu in cuda_gpus)
if use_mps:
from ._mps_utils import mps_device_name
print('Using GPU to create model ({})'.format(mps_device_name()))
elif num_cuda_gpus >= 1:
from . import _mxnet_utils
plural = 's' if num_cuda_gpus >= 2 else ''
print('Using GPU{} to create model ({})'.format(plural, gpu_names))
if cuda_mem_req is not None:
_mxnet_utils._warn_if_less_than_cuda_free_memory(cuda_mem_req, max_devices=num_cuda_gpus)
else:
import sys
print('Using CPU to create model')
if sys.platform == 'darwin' and _mac_ver() < (10, 14) and has_mps_impl:
print('NOTE: If available, an AMD GPU can be leveraged on macOS 10.14+ for faster model creation')
|
<SYSTEM_TASK:>
Get a proto class from the MessageFactory by name.
<END_TASK>
<USER_TASK:>
Description:
def _GetMessageFromFactory(factory, full_name):
"""Get a proto class from the MessageFactory by name.
Args:
factory: a MessageFactory instance.
full_name: str, the fully qualified name of the proto type.
Returns:
A class, for the type identified by full_name.
Raises:
KeyError, if the proto is not found in the factory's descriptor pool.
"""
|
proto_descriptor = factory.pool.FindMessageTypeByName(full_name)
proto_cls = factory.GetPrototype(proto_descriptor)
return proto_cls
|
<SYSTEM_TASK:>
Create a Protobuf class whose fields are basic types.
<END_TASK>
<USER_TASK:>
Description:
def MakeSimpleProtoClass(fields, full_name=None, pool=None):
"""Create a Protobuf class whose fields are basic types.
Note: this doesn't validate field names!
Args:
fields: dict of {name: field_type} mappings for each field in the proto. If
this is an OrderedDict the order will be maintained, otherwise the
fields will be sorted by name.
full_name: optional str, the fully-qualified name of the proto type.
pool: optional DescriptorPool instance.
Returns:
a class, the new protobuf class with a FileDescriptor.
"""
|
factory = message_factory.MessageFactory(pool=pool)
if full_name is not None:
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# Get a list of (name, field_type) tuples from the fields dict. If fields was
# an OrderedDict we keep the order, but otherwise we sort the field to ensure
# consistent ordering.
field_items = fields.items()
if not isinstance(fields, OrderedDict):
field_items = sorted(field_items)
# Use a consistent file name that is unlikely to conflict with any imported
# proto files.
fields_hash = hashlib.sha1()
for f_name, f_type in field_items:
fields_hash.update(f_name.encode('utf-8'))
fields_hash.update(str(f_type).encode('utf-8'))
proto_file_name = fields_hash.hexdigest() + '.proto'
# If the proto is anonymous, use the same hash to name it.
if full_name is None:
full_name = ('net.proto2.python.public.proto_builder.AnonymousProto_' +
fields_hash.hexdigest())
try:
proto_cls = _GetMessageFromFactory(factory, full_name)
return proto_cls
except KeyError:
# The factory's DescriptorPool doesn't know about this class yet.
pass
# This is the first time we see this proto: add a new descriptor to the pool.
factory.pool.Add(
_MakeFileDescriptorProto(proto_file_name, full_name, field_items))
return _GetMessageFromFactory(factory, full_name)
|
<SYSTEM_TASK:>
Returns user-defined metadata, making sure information all models should
<END_TASK>
<USER_TASK:>
Description:
def _get_model_metadata(model_class, metadata, version=None):
"""
Returns user-defined metadata, making sure information all models should
have is also available, as a dictionary
"""
|
from turicreate import __version__
info = {
'turicreate_version': __version__,
'type': model_class,
}
if version is not None:
info['version'] = str(version)
info.update(metadata)
return info
|
<SYSTEM_TASK:>
Sets user-defined metadata, making sure information all models should have
<END_TASK>
<USER_TASK:>
Description:
def _set_model_metadata(mlmodel, model_class, metadata, version=None):
"""
Sets user-defined metadata, making sure information all models should have
is also available
"""
|
info = _get_model_metadata(model_class, metadata, version)
mlmodel.user_defined_metadata.update(info)
|
<SYSTEM_TASK:>
Converts name to Json name and returns it.
<END_TASK>
<USER_TASK:>
Description:
def _ToJsonName(name):
"""Converts name to Json name and returns it."""
|
capitalize_next = False
result = []
for c in name:
if c == '_':
capitalize_next = True
elif capitalize_next:
result.append(c.upper())
capitalize_next = False
else:
result += c
return ''.join(result)
|
<SYSTEM_TASK:>
Sets the descriptor's options
<END_TASK>
<USER_TASK:>
Description:
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
|
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
|
<SYSTEM_TASK:>
Copies this to the matching proto in descriptor_pb2.
<END_TASK>
<USER_TASK:>
Description:
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
|
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
|
<SYSTEM_TASK:>
Returns the string name of an enum value.
<END_TASK>
<USER_TASK:>
Description:
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
|
return self.enum_types_by_name[enum].values_by_number[value].name
|
<SYSTEM_TASK:>
Given a target_reference, made in context of 'project',
<END_TASK>
<USER_TASK:>
Description:
def resolve_reference(target_reference, project):
""" Given a target_reference, made in context of 'project',
returns the AbstractTarget instance that is referred to, as well
as properties explicitly specified for this reference.
"""
|
# Separate target name from properties override
assert isinstance(target_reference, basestring)
assert isinstance(project, ProjectTarget)
split = _re_separate_target_from_properties.match (target_reference)
if not split:
raise BaseException ("Invalid reference: '%s'" % target_reference)
id = split.group (1)
sproperties = []
if split.group (3):
sproperties = property.create_from_strings(feature.split(split.group(3)))
sproperties = feature.expand_composites(sproperties)
# Find the target
target = project.find (id)
return (target, property_set.create(sproperties))
|
<SYSTEM_TASK:>
Registers the specified target as a main target alternatives.
<END_TASK>
<USER_TASK:>
Description:
def main_target_alternative (self, target):
""" Registers the specified target as a main target alternatives.
Returns 'target'.
"""
|
assert isinstance(target, AbstractTarget)
target.project ().add_alternative (target)
return target
|
<SYSTEM_TASK:>
Returns the requirement to use when declaring a main target,
<END_TASK>
<USER_TASK:>
Description:
def main_target_requirements(self, specification, project):
"""Returns the requirement to use when declaring a main target,
which are obtained by
- translating all specified property paths, and
- refining project requirements with the one specified for the target
'specification' are the properties xplicitly specified for a
main target
'project' is the project where the main taret is to be declared."""
|
assert is_iterable_typed(specification, basestring)
assert isinstance(project, ProjectTarget)
# create a copy since the list is being modified
specification = list(specification)
specification.extend(toolset.requirements())
requirements = property_set.refine_from_user_input(
project.get("requirements"), specification,
project.project_module(), project.get("location"))
return requirements
|
<SYSTEM_TASK:>
Helper rules to detect cycles in main target references.
<END_TASK>
<USER_TASK:>
Description:
def start_building (self, main_target_instance):
""" Helper rules to detect cycles in main target references.
"""
|
assert isinstance(main_target_instance, MainTarget)
if id(main_target_instance) in self.targets_being_built_:
names = []
for t in self.targets_being_built_.values() + [main_target_instance]:
names.append (t.full_name())
get_manager().errors()("Recursion in main target references\n")
self.targets_being_built_[id(main_target_instance)] = main_target_instance
|
<SYSTEM_TASK:>
Creates a TypedTarget with the specified properties.
<END_TASK>
<USER_TASK:>
Description:
def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements):
""" Creates a TypedTarget with the specified properties.
The 'name', 'sources', 'requirements', 'default_build' and
'usage_requirements' are assumed to be in the form specified
by the user in Jamfile corresponding to 'project'.
"""
|
assert isinstance(type, basestring)
assert isinstance(project, ProjectTarget)
assert is_iterable_typed(sources, basestring)
assert is_iterable_typed(requirements, basestring)
assert is_iterable_typed(default_build, basestring)
return self.main_target_alternative (TypedTarget (name, project, type,
self.main_target_sources (sources, name),
self.main_target_requirements (requirements, project),
self.main_target_default_build (default_build, project),
self.main_target_usage_requirements (usage_requirements, project)))
|
<SYSTEM_TASK:>
Generates all possible targets contained in this project.
<END_TASK>
<USER_TASK:>
Description:
def generate (self, ps):
""" Generates all possible targets contained in this project.
"""
|
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets().log(
"Building project '%s' with '%s'" % (self.name (), str(ps)))
self.manager_.targets().increase_indent ()
result = GenerateResult ()
for t in self.targets_to_build ():
g = t.generate (ps)
result.extend (g)
self.manager_.targets().decrease_indent ()
return result
|
<SYSTEM_TASK:>
Computes and returns a list of AbstractTarget instances which
<END_TASK>
<USER_TASK:>
Description:
def targets_to_build (self):
""" Computes and returns a list of AbstractTarget instances which
must be built when this project is built.
"""
|
result = []
if not self.built_main_targets_:
self.build_main_targets ()
# Collect all main targets here, except for "explicit" ones.
for n, t in self.main_target_.iteritems ():
if not t.name () in self.explicit_targets_:
result.append (t)
# Collect all projects referenced via "projects-to-build" attribute.
self_location = self.get ('location')
for pn in self.get ('projects-to-build'):
result.append (self.find(pn + "/"))
return result
|
<SYSTEM_TASK:>
Add 'target' to the list of targets in this project
<END_TASK>
<USER_TASK:>
Description:
def mark_targets_as_explicit (self, target_names):
"""Add 'target' to the list of targets in this project
that should be build only by explicit request."""
|
# Record the name of the target, not instance, since this
# rule is called before main target instaces are created.
assert is_iterable_typed(target_names, basestring)
self.explicit_targets_.update(target_names)
|
<SYSTEM_TASK:>
Tells if a main target with the specified name exists.
<END_TASK>
<USER_TASK:>
Description:
def has_main_target (self, name):
"""Tells if a main target with the specified name exists."""
|
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets()
return name in self.main_target_
|
<SYSTEM_TASK:>
Returns a 'MainTarget' class instance corresponding to the 'name'.
<END_TASK>
<USER_TASK:>
Description:
def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
|
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None)
|
<SYSTEM_TASK:>
Find and return the target with the specified id, treated
<END_TASK>
<USER_TASK:>
Description:
def find_really(self, id):
""" Find and return the target with the specified id, treated
relative to self.
"""
|
assert isinstance(id, basestring)
result = None
current_location = self.get ('location')
__re_split_project_target = re.compile (r'(.*)//(.*)')
split = __re_split_project_target.match (id)
project_part = None
target_part = None
if split:
project_part = split.group(1)
target_part = split.group(2)
if not target_part:
get_manager().errors()(
'Project ID, "{}", is not a valid target reference. There should '
'be either a target name after the "//" or the "//" should be removed '
'from the target reference.'
.format(id)
)
project_registry = self.project_.manager ().projects ()
extra_error_message = ''
if project_part:
# There's explicit project part in id. Looks up the
# project and pass the request to it.
pm = project_registry.find (project_part, current_location)
if pm:
project_target = project_registry.target (pm)
result = project_target.find (target_part, no_error=1)
else:
extra_error_message = "error: could not find project '$(project_part)'"
else:
# Interpret target-name as name of main target
# Need to do this before checking for file. Consider this:
#
# exe test : test.cpp ;
# install s : test : <location>. ;
#
# After first build we'll have target 'test' in Jamfile and file
# 'test' on the disk. We need target to override the file.
result = None
if self.has_main_target(id):
result = self.main_target(id)
if not result:
result = FileReference (self.manager_, id, self.project_)
if not result.exists ():
# File actually does not exist.
# Reset 'target' so that an error is issued.
result = None
if not result:
# Interpret id as project-id
project_module = project_registry.find (id, current_location)
if project_module:
result = project_registry.target (project_module)
return result
|
<SYSTEM_TASK:>
Adds a new constant for this project.
<END_TASK>
<USER_TASK:>
Description:
def add_constant(self, name, value, path=0):
"""Adds a new constant for this project.
The constant will be available for use in Jamfile
module for this project. If 'path' is true,
the constant will be interpreted relatively
to the location of project.
"""
|
assert isinstance(name, basestring)
assert is_iterable_typed(value, basestring)
assert isinstance(path, int) # will also match bools
if path:
l = self.location_
if not l:
# Project corresponding to config files do not have
# 'location' attribute, but do have source location.
# It might be more reasonable to make every project have
# a location and use some other approach to prevent buildable
# targets in config files, but that's for later.
l = self.get('source-location')
value = os.path.join(l, value[0])
# Now make the value absolute path. Constants should be in
# platform-native form.
value = [os.path.normpath(os.path.join(os.getcwd(), value))]
self.constants_[name] = value
bjam.call("set-variable", self.project_module(), name, value)
|
<SYSTEM_TASK:>
Add a new alternative for this target.
<END_TASK>
<USER_TASK:>
Description:
def add_alternative (self, target):
""" Add a new alternative for this target.
"""
|
assert isinstance(target, BasicTarget)
d = target.default_build ()
if self.alternatives_ and self.default_build_ != d:
get_manager().errors()("default build must be identical in all alternatives\n"
"main target is '%s'\n"
"with '%s'\n"
"differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ()))
else:
self.default_build_ = d
self.alternatives_.append (target)
|
<SYSTEM_TASK:>
Select an alternative for this main target, by finding all alternatives
<END_TASK>
<USER_TASK:>
Description:
def generate (self, ps):
""" Select an alternative for this main target, by finding all alternatives
which requirements are satisfied by 'properties' and picking the one with
longest requirements set.
Returns the result of calling 'generate' on that alternative.
"""
|
assert isinstance(ps, property_set.PropertySet)
self.manager_.targets ().start_building (self)
# We want composite properties in build request act as if
# all the properties it expands too are explicitly specified.
ps = ps.expand ()
all_property_sets = self.apply_default_build (ps)
result = GenerateResult ()
for p in all_property_sets:
result.extend (self.__generate_really (p))
self.manager_.targets ().end_building (self)
return result
|
<SYSTEM_TASK:>
Generates the main target with the given property set
<END_TASK>
<USER_TASK:>
Description:
def __generate_really (self, prop_set):
""" Generates the main target with the given property set
and returns a list which first element is property_set object
containing usage_requirements of generated target and with
generated virtual target in other elements. It's possible
that no targets are generated.
"""
|
assert isinstance(prop_set, property_set.PropertySet)
best_alternative = self.__select_alternatives (prop_set, debug=0)
self.best_alternative = best_alternative
if not best_alternative:
# FIXME: revive.
# self.__select_alternatives(prop_set, debug=1)
self.manager_.errors()(
"No best alternative for '%s'.\n"
% (self.full_name(),))
result = best_alternative.generate (prop_set)
# Now return virtual targets for the only alternative
return result
|
<SYSTEM_TASK:>
Given build request and requirements, return properties
<END_TASK>
<USER_TASK:>
Description:
def common_properties (self, build_request, requirements):
""" Given build request and requirements, return properties
common to dependency build request and target build
properties.
"""
|
# For optimization, we add free unconditional requirements directly,
# without using complex algorithsm.
# This gives the complex algorithm better chance of caching results.
# The exact effect of this "optimization" is no longer clear
assert isinstance(build_request, property_set.PropertySet)
assert isinstance(requirements, property_set.PropertySet)
free_unconditional = []
other = []
for p in requirements.all():
if p.feature.free and not p.condition and p.feature.name != 'conditional':
free_unconditional.append(p)
else:
other.append(p)
other = property_set.create(other)
key = (build_request, other)
if key not in self.request_cache:
self.request_cache[key] = self.__common_properties2 (build_request, other)
return self.request_cache[key].add_raw(free_unconditional)
|
<SYSTEM_TASK:>
Returns the alternative condition for this alternative, if
<END_TASK>
<USER_TASK:>
Description:
def match (self, property_set_, debug):
""" Returns the alternative condition for this alternative, if
the condition is satisfied by 'property_set'.
"""
|
# The condition is composed of all base non-conditional properties.
# It's not clear if we should expand 'self.requirements_' or not.
# For one thing, it would be nice to be able to put
# <toolset>msvc-6.0
# in requirements.
# On the other hand, if we have <variant>release in condition it
# does not make sense to require <optimization>full to be in
# build request just to select this variant.
assert isinstance(property_set_, property_set.PropertySet)
bcondition = self.requirements_.base ()
ccondition = self.requirements_.conditional ()
condition = b2.util.set.difference (bcondition, ccondition)
if debug:
print " next alternative: required properties:", [str(p) for p in condition]
if b2.util.set.contains (condition, property_set_.all()):
if debug:
print " matched"
return condition
else:
return None
|
<SYSTEM_TASK:>
Takes a target reference, which might be either target id
<END_TASK>
<USER_TASK:>
Description:
def generate_dependency_properties(self, properties, ps):
""" Takes a target reference, which might be either target id
or a dependency property, and generates that target using
'property_set' as build request.
Returns a tuple (result, usage_requirements).
"""
|
assert is_iterable_typed(properties, property.Property)
assert isinstance(ps, property_set.PropertySet)
result_properties = []
usage_requirements = []
for p in properties:
result = generate_from_reference(p.value, self.project_, ps)
for t in result.targets():
result_properties.append(property.Property(p.feature, t))
usage_requirements += result.usage_requirements().all()
return (result_properties, usage_requirements)
|
<SYSTEM_TASK:>
Given the set of generated targets, and refined build
<END_TASK>
<USER_TASK:>
Description:
def compute_usage_requirements (self, subvariant):
""" Given the set of generated targets, and refined build
properties, determines and sets appripriate usage requirements
on those targets.
"""
|
assert isinstance(subvariant, virtual_target.Subvariant)
rproperties = subvariant.build_properties ()
xusage_requirements =self.evaluate_requirements(
self.usage_requirements_, rproperties, "added")
# We generate all dependency properties and add them,
# as well as their usage requirements, to result.
(r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties)
extra = r1 + r2
result = property_set.create (xusage_requirements.non_dependency () + extra)
# Propagate usage requirements we've got from sources, except
# for the <pch-header> and <pch-file> features.
#
# That feature specifies which pch file to use, and should apply
# only to direct dependents. Consider:
#
# pch pch1 : ...
# lib lib1 : ..... pch1 ;
# pch pch2 :
# lib lib2 : pch2 lib1 ;
#
# Here, lib2 should not get <pch-header> property from pch1.
#
# Essentially, when those two features are in usage requirements,
# they are propagated only to direct dependents. We might need
# a more general mechanism, but for now, only those two
# features are special.
properties = []
for p in subvariant.sources_usage_requirements().all():
if p.feature.name not in ('pch-header', 'pch-file'):
properties.append(p)
if 'shared' in rproperties.get('link'):
new_properties = []
for p in properties:
if p.feature.name != 'library':
new_properties.append(p)
properties = new_properties
result = result.add_raw(properties)
return result
|
<SYSTEM_TASK:>
Creates a new subvariant-dg instances for 'targets'
<END_TASK>
<USER_TASK:>
Description:
def create_subvariant (self, root_targets, all_targets,
build_request, sources,
rproperties, usage_requirements):
"""Creates a new subvariant-dg instances for 'targets'
- 'root-targets' the virtual targets will be returned to dependents
- 'all-targets' all virtual
targets created while building this main target
- 'build-request' is property-set instance with
requested build properties"""
|
assert is_iterable_typed(root_targets, virtual_target.VirtualTarget)
assert is_iterable_typed(all_targets, virtual_target.VirtualTarget)
assert isinstance(build_request, property_set.PropertySet)
assert is_iterable_typed(sources, virtual_target.VirtualTarget)
assert isinstance(rproperties, property_set.PropertySet)
assert isinstance(usage_requirements, property_set.PropertySet)
for e in root_targets:
e.root (True)
s = Subvariant (self, build_request, sources,
rproperties, usage_requirements, all_targets)
for v in all_targets:
if not v.creating_subvariant():
v.creating_subvariant(s)
return s
|
<SYSTEM_TASK:>
Declares a new variant.
<END_TASK>
<USER_TASK:>
Description:
def variant (name, parents_or_properties, explicit_properties = []):
""" Declares a new variant.
First determines explicit properties for this variant, by
refining parents' explicit properties with the passed explicit
properties. The result is remembered and will be used if
this variant is used as parent.
Second, determines the full property set for this variant by
adding to the explicit properties default values for all properties
which neither present nor are symmetric.
Lastly, makes appropriate value of 'variant' property expand
to the full property set.
name: Name of the variant
parents_or_properties: Specifies parent variants, if
'explicit_properties' are given,
and explicit_properties otherwise.
explicit_properties: Explicit properties.
"""
|
parents = []
if not explicit_properties:
explicit_properties = parents_or_properties
else:
parents = parents_or_properties
inherited = property_set.empty()
if parents:
# If we allow multiple parents, we'd have to to check for conflicts
# between base variants, and there was no demand for so to bother.
if len (parents) > 1:
raise BaseException ("Multiple base variants are not yet supported")
p = parents[0]
# TODO: the check may be stricter
if not feature.is_implicit_value (p):
raise BaseException ("Invalid base variant '%s'" % p)
inherited = __variant_explicit_properties[p]
explicit_properties = property_set.create_with_validation(explicit_properties)
explicit_properties = inherited.refine(explicit_properties)
# Record explicitly specified properties for this variant
# We do this after inheriting parents' properties, so that
# they affect other variants, derived from this one.
__variant_explicit_properties[name] = explicit_properties
feature.extend('variant', [name])
feature.compose ("<variant>" + name, explicit_properties.all())
|
<SYSTEM_TASK:>
For all virtual targets for the same dependency graph as self,
<END_TASK>
<USER_TASK:>
Description:
def adjust_properties (self, prop_set):
""" For all virtual targets for the same dependency graph as self,
i.e. which belong to the same main target, add their directories
to include path.
"""
|
assert isinstance(prop_set, property_set.PropertySet)
s = self.targets () [0].creating_subvariant ()
return prop_set.add_raw (s.implicit_includes ('include', 'H'))
|
<SYSTEM_TASK:>
Create a model that makes recommendations using item popularity. When no
<END_TASK>
<USER_TASK:>
Description:
def create(observation_data,
user_id='user_id', item_id='item_id', target=None,
user_data=None, item_data=None,
random_seed=0,
verbose=True):
"""
Create a model that makes recommendations using item popularity. When no
target column is provided, the popularity is determined by the number of
observations involving each item. When a target is provided, popularity
is computed using the item's mean target value. When the target column
contains ratings, for example, the model computes the mean rating for
each item and uses this to rank items for recommendations.
Parameters
----------
observation_data : SFrame
The dataset to use for training the model. It must contain a column of
user ids and a column of item ids. Each row represents an observed
interaction between the user and the item. The (user, item) pairs
are stored with the model so that they can later be excluded from
recommendations if desired. It can optionally contain a target ratings
column. All other columns are interpreted by the underlying model as
side features for the observations.
The user id and item id columns must be of type 'int' or 'str'. The
target column must be of type 'int' or 'float'.
user_id : string, optional
The name of the column in `observation_data` that corresponds to the
user id.
item_id : string, optional
The name of the column in `observation_data` that corresponds to the
item id.
target : string, optional
The `observation_data` can optionally contain a column of scores
representing ratings given by the users. If present, the name of this
column may be specified variables `target`.
user_data : SFrame, optional
Side information for the users. This SFrame must have a column with
the same name as what is specified by the `user_id` input parameter.
`user_data` can provide any amount of additional user-specific
information.
item_data : SFrame, optional
Side information for the items. This SFrame must have a column with
the same name as what is specified by the `item_id` input parameter.
`item_data` can provide any amount of additional item-specific
information.
verbose : bool, optional
Enables verbose output.
Examples
--------
>>> sf = turicreate.SFrame({'user_id': ["0", "0", "0", "1", "1", "2", "2", "2"],
... 'item_id': ["a", "b", "c", "a", "b", "b", "c", "d"],
... 'rating': [1, 3, 2, 5, 4, 1, 4, 3]})
>>> m = turicreate.popularity_recommender.create(sf, target='rating')
See Also
--------
PopularityRecommender
"""
|
from turicreate._cython.cy_server import QuietProgress
opts = {}
model_proxy = _turicreate.extensions.popularity()
model_proxy.init_options(opts)
if user_data is None:
user_data = _turicreate.SFrame()
if item_data is None:
item_data = _turicreate.SFrame()
nearest_items = _turicreate.SFrame()
opts = {'user_id': user_id,
'item_id': item_id,
'target': target,
'random_seed': 1}
extra_data = {"nearest_items" : _turicreate.SFrame()}
with QuietProgress(verbose):
model_proxy.train(observation_data, user_data, item_data, opts, extra_data)
return PopularityRecommender(model_proxy)
|
<SYSTEM_TASK:>
Replaces the grist of a string by a new one.
<END_TASK>
<USER_TASK:>
Description:
def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
|
assert is_iterable_typed(features, basestring) or isinstance(features, basestring)
assert isinstance(new_grist, basestring)
# this function is used a lot in the build phase and the original implementation
# was extremely slow; thus some of the weird-looking optimizations for this function.
single_item = False
if isinstance(features, str):
features = [features]
single_item = True
result = []
for feature in features:
# '<feature>value' -> ('<feature', '>', 'value')
# 'something' -> ('something', '', '')
# '<toolset>msvc/<feature>value' -> ('<toolset', '>', 'msvc/<feature>value')
grist, split, value = feature.partition('>')
# if a partition didn't occur, then grist is just 'something'
# set the value to be the grist
if not value and not split:
value = grist
result.append(new_grist + value)
if single_item:
return result[0]
return result
|
<SYSTEM_TASK:>
Gets the value of a property, that is, the part following the grist, if any.
<END_TASK>
<USER_TASK:>
Description:
def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
|
assert is_iterable_typed(property, basestring) or isinstance(property, basestring)
return replace_grist (property, '')
|
<SYSTEM_TASK:>
Returns the grist of a string.
<END_TASK>
<USER_TASK:>
Description:
def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
|
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ]
|
<SYSTEM_TASK:>
Returns the value without grist.
<END_TASK>
<USER_TASK:>
Description:
def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
|
assert is_iterable_typed(value, basestring) or isinstance(value, basestring)
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ]
|
<SYSTEM_TASK:>
Replaces the suffix of name by new_suffix.
<END_TASK>
<USER_TASK:>
Description:
def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
|
assert isinstance(name, basestring)
assert isinstance(new_suffix, basestring)
split = os.path.splitext (name)
return split [0] + new_suffix
|
<SYSTEM_TASK:>
Returns true if running on windows, whether in cygwin or not.
<END_TASK>
<USER_TASK:>
Description:
def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
|
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False
|
<SYSTEM_TASK:>
Validate the main Kmeans dataset.
<END_TASK>
<USER_TASK:>
Description:
def _validate_dataset(dataset):
"""
Validate the main Kmeans dataset.
Parameters
----------
dataset: SFrame
Input dataset.
"""
|
if not (isinstance(dataset, _SFrame)):
raise TypeError("Input 'dataset' must be an SFrame.")
if dataset.num_rows() == 0 or dataset.num_columns() == 0:
raise ValueError("Input 'dataset' has no data.")
|
<SYSTEM_TASK:>
Validate the initial centers.
<END_TASK>
<USER_TASK:>
Description:
def _validate_initial_centers(initial_centers):
"""
Validate the initial centers.
Parameters
----------
initial_centers : SFrame
Initial cluster center locations, in SFrame form.
"""
|
if not (isinstance(initial_centers, _SFrame)):
raise TypeError("Input 'initial_centers' must be an SFrame.")
if initial_centers.num_rows() == 0 or initial_centers.num_columns() == 0:
raise ValueError("An 'initial_centers' argument is provided " +
"but has no data.")
|
<SYSTEM_TASK:>
Validate the combination of the `num_clusters` and `initial_centers`
<END_TASK>
<USER_TASK:>
Description:
def _validate_num_clusters(num_clusters, initial_centers, num_rows):
"""
Validate the combination of the `num_clusters` and `initial_centers`
parameters in the Kmeans model create function. If the combination is
valid, determine and return the correct number of clusters.
Parameters
----------
num_clusters : int
Specified number of clusters.
initial_centers : SFrame
Specified initial cluster center locations, in SFrame form. If the
number of rows in this SFrame does not match `num_clusters`, there is a
problem.
num_rows : int
Number of rows in the input dataset.
Returns
-------
_num_clusters : int
The correct number of clusters to use going forward
"""
|
## Basic validation
if num_clusters is not None and not isinstance(num_clusters, int):
raise _ToolkitError("Parameter 'num_clusters' must be an integer.")
## Determine the correct number of clusters.
if initial_centers is None:
if num_clusters is None:
raise ValueError("Number of clusters cannot be determined from " +
"'num_clusters' or 'initial_centers'. You must " +
"specify one of these arguments.")
else:
_num_clusters = num_clusters
else:
num_centers = initial_centers.num_rows()
if num_clusters is None:
_num_clusters = num_centers
else:
if num_clusters != num_centers:
raise ValueError("The value of 'num_clusters' does not match " +
"the number of provided initial centers. " +
"Please provide only one of these arguments " +
"or ensure the values match.")
else:
_num_clusters = num_clusters
if _num_clusters > num_rows:
raise ValueError("The desired number of clusters exceeds the number " +
"of data points. Please set 'num_clusters' to be " +
"smaller than the number of data points.")
return _num_clusters
|
<SYSTEM_TASK:>
Identify the subset of desired `features` that are valid for the Kmeans
<END_TASK>
<USER_TASK:>
Description:
def _validate_features(features, column_type_map, valid_types, label):
"""
Identify the subset of desired `features` that are valid for the Kmeans
model. A warning is emitted for each feature that is excluded.
Parameters
----------
features : list[str]
Desired feature names.
column_type_map : dict[str, type]
Dictionary mapping each column name to the type of values in the
column.
valid_types : list[type]
Exclude features whose type is not in this list.
label : str
Name of the row label column.
Returns
-------
valid_features : list[str]
Names of features to include in the model.
"""
|
if not isinstance(features, list):
raise TypeError("Input 'features' must be a list, if specified.")
if len(features) == 0:
raise ValueError("If specified, input 'features' must contain " +
"at least one column name.")
## Remove duplicates
num_original_features = len(features)
features = set(features)
if len(features) < num_original_features:
_logging.warning("Duplicates have been removed from the list of features")
## Remove the row label
if label in features:
features.remove(label)
_logging.warning("The row label has been removed from the list of features.")
## Check the type of each feature against the list of valid types
valid_features = []
for ftr in features:
if not isinstance(ftr, str):
_logging.warning("Feature '{}' excluded. ".format(ftr) +
"Features must be specified as strings " +
"corresponding to column names in the input dataset.")
elif ftr not in column_type_map.keys():
_logging.warning("Feature '{}' excluded because ".format(ftr) +
"it is not in the input dataset.")
elif column_type_map[ftr] not in valid_types:
_logging.warning("Feature '{}' excluded because of its type. ".format(ftr) +
"Kmeans features must be int, float, dict, or array.array type.")
else:
valid_features.append(ftr)
if len(valid_features) == 0:
raise _ToolkitError("All specified features have been excluded. " +
"Please specify valid features.")
return valid_features
|
<SYSTEM_TASK:>
Create a k-means clustering model. The KmeansModel object contains the
<END_TASK>
<USER_TASK:>
Description:
def create(dataset, num_clusters=None, features=None, label=None,
initial_centers=None, max_iterations=10, batch_size=None,
verbose=True):
"""
Create a k-means clustering model. The KmeansModel object contains the
computed cluster centers and the cluster assignment for each instance in
the input 'dataset'.
Given a number of clusters, k-means iteratively chooses the best cluster
centers and assigns nearby points to the best cluster. If no points change
cluster membership between iterations, the algorithm terminates.
Parameters
----------
dataset : SFrame
Each row in the SFrame is an observation.
num_clusters : int
Number of clusters. This is the 'k' in k-means.
features : list[str], optional
Names of feature columns to use in computing distances between
observations and cluster centers. 'None' (the default) indicates that
all columns should be used as features. Columns may be of the following
types:
- *Numeric*: values of numeric type integer or float.
- *Array*: list of numeric (int or float) values. Each list element
is treated as a distinct feature in the model.
- *Dict*: dictionary of keys mapped to numeric values. Each unique key
is treated as a distinct feature in the model.
Note that columns of type *list* are not supported. Convert them to
array columns if all entries in the list are of numeric types.
label : str, optional
Name of the column to use as row labels in the Kmeans output. The
values in this column must be integers or strings. If not specified,
row numbers are used by default.
initial_centers : SFrame, optional
Initial centers to use when starting the K-means algorithm. If
specified, this parameter overrides the *num_clusters* parameter. The
'initial_centers' SFrame must contain the same features used in the
input 'dataset'.
If not specified (the default), initial centers are chosen
intelligently with the K-means++ algorithm.
max_iterations : int, optional
The maximum number of iterations to run. Prints a warning if the
algorithm does not converge after max_iterations iterations. If set to
0, the model returns clusters defined by the initial centers and
assignments to those centers.
batch_size : int, optional
Number of randomly-chosen data points to use in each iteration. If
'None' (the default) or greater than the number of rows in 'dataset',
then this parameter is ignored: all rows of `dataset` are used in each
iteration and model training terminates once point assignments stop
changing or `max_iterations` is reached.
verbose : bool, optional
If True, print model training progress to the screen.
Returns
-------
out : KmeansModel
A Model object containing a cluster id for each vertex, and the centers
of the clusters.
See Also
--------
KmeansModel
Notes
-----
- Integer features in the 'dataset' or 'initial_centers' inputs are
converted internally to float type, and the corresponding features in the
output centers are float-typed.
- It can be important for the K-means model to standardize the features so
they have the same scale. This function does *not* standardize
automatically.
References
----------
- `Wikipedia - k-means clustering
<http://en.wikipedia.org/wiki/K-means_clustering>`_
- Artuhur, D. and Vassilvitskii, S. (2007) `k-means++: The Advantages of
Careful Seeding <http://ilpubs.stanford.edu:8090/778/1/2006-13.pdf>`_. In
Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete
Algorithms. pp. 1027-1035.
- Elkan, C. (2003) `Using the triangle inequality to accelerate k-means
<http://www.aaai.org/Papers/ICML/2003/ICML03-022.pdf>`_. In Proceedings
of the Twentieth International Conference on Machine Learning, Volume 3,
pp. 147-153.
- Sculley, D. (2010) `Web Scale K-Means Clustering
<http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf>`_. In
Proceedings of the 19th International Conference on World Wide Web. pp.
1177-1178
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
"""
|
opts = {'model_name': 'kmeans',
'max_iterations': max_iterations,
}
## Validate the input dataset and initial centers.
_validate_dataset(dataset)
if initial_centers is not None:
_validate_initial_centers(initial_centers)
## Validate and determine the correct number of clusters.
opts['num_clusters'] = _validate_num_clusters(num_clusters,
initial_centers,
dataset.num_rows())
## Validate the row label
col_type_map = {c: dataset[c].dtype for c in dataset.column_names()}
if label is not None:
_validate_row_label(label, col_type_map)
if label in ['cluster_id', 'distance']:
raise ValueError("Row label column name cannot be 'cluster_id' " +
"or 'distance'; these are reserved for other " +
"columns in the Kmeans model's output.")
opts['row_labels'] = dataset[label]
opts['row_label_name'] = label
else:
opts['row_labels'] = _tc.SArray.from_sequence(dataset.num_rows())
opts['row_label_name'] = 'row_id'
## Validate the features relative to the input dataset.
if features is None:
features = dataset.column_names()
valid_features = _validate_features(features, col_type_map,
valid_types=[_array, dict, int, float],
label=label)
sf_features = dataset.select_columns(valid_features)
opts['features'] = sf_features
## Validate the features in the initial centers (if provided)
if initial_centers is not None:
try:
initial_centers = initial_centers.select_columns(valid_features)
except:
raise ValueError("Specified features cannot be extracted from " +
"the provided initial centers.")
if initial_centers.column_types() != sf_features.column_types():
raise TypeError("Feature types are different in the dataset and " +
"initial centers.")
else:
initial_centers = _tc.SFrame()
opts['initial_centers'] = initial_centers
## Validate the batch size and determine the training method.
if batch_size is None:
opts['method'] = 'elkan'
opts['batch_size'] = dataset.num_rows()
else:
opts['method'] = 'minibatch'
opts['batch_size'] = batch_size
## Create and return the model
with _QuietProgress(verbose):
params = _tc.extensions._kmeans.train(opts)
return KmeansModel(params['model'])
|
<SYSTEM_TASK:>
Return predicted cluster label for instances in the new 'dataset'.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset, output_type='cluster_id', verbose=True):
"""
Return predicted cluster label for instances in the new 'dataset'.
K-means predictions are made by assigning each new instance to the
closest cluster center.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include the features used for
model training; additional columns are ignored.
output_type : {'cluster_id', 'distance'}, optional
Form of the prediction. 'cluster_id' (the default) returns the
cluster label assigned to each input instance, while 'distance'
returns the Euclidean distance between the instance and its
assigned cluster's center.
verbose : bool, optional
If True, print progress updates to the screen.
Returns
-------
out : SArray
Model predictions. Depending on the specified `output_type`, either
the assigned cluster label or the distance of each point to its
closest cluster center. The order of the predictions is the same as
order of the input data rows.
See Also
--------
create
Examples
--------
>>> sf = turicreate.SFrame({
... 'x1': [0.6777, -9.391, 7.0385, 2.2657, 7.7864, -10.16, -8.162,
... 8.8817, -9.525, -9.153, 2.0860, 7.6619, 6.5511, 2.7020],
... 'x2': [5.6110, 8.5139, 5.3913, 5.4743, 8.3606, 7.8843, 2.7305,
... 5.1679, 6.7231, 3.7051, 1.7682, 7.4608, 3.1270, 6.5624]})
...
>>> model = turicreate.kmeans.create(sf, num_clusters=3)
...
>>> sf_new = turicreate.SFrame({'x1': [-5.6584, -1.0167, -9.6181],
... 'x2': [-6.3803, -3.7937, -1.1022]})
>>> clusters = model.predict(sf_new, output_type='cluster_id')
>>> print clusters
[1, 0, 1]
"""
|
## Validate the input dataset.
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Validate the output type.
if not isinstance(output_type, str):
raise TypeError("The 'output_type' parameter must be a string.")
if not output_type in ('cluster_id', 'distance'):
raise ValueError("The 'output_type' parameter must be either " +
"'cluster_label' or 'distance'.")
## Get model features.
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Compute predictions.
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'dataset': sf_features}
with _QuietProgress(verbose):
result = _tc.extensions._kmeans.predict(opts)
sf_result = result['predictions']
if output_type == 'distance':
return sf_result['distance']
else:
return sf_result['cluster_id']
|
<SYSTEM_TASK:>
Return the value of a given field.
<END_TASK>
<USER_TASK:>
Description:
def _get(self, field):
"""
Return the value of a given field.
+-----------------------+----------------------------------------------+
| Field | Description |
+=======================+==============================================+
| batch_size | Number of randomly chosen examples to use in |
| | each training iteration. |
+-----------------------+----------------------------------------------+
| cluster_id | Cluster assignment for each data point and |
| | Euclidean distance to the cluster center |
+-----------------------+----------------------------------------------+
| cluster_info | Cluster centers, sum of squared Euclidean |
| | distances from each cluster member to the |
| | assigned center, and the number of data |
| | points belonging to the cluster |
+-----------------------+----------------------------------------------+
| features | Names of feature columns |
+-----------------------+----------------------------------------------+
| max_iterations | Maximum number of iterations to perform |
+-----------------------+----------------------------------------------+
| method | Algorithm used to train the model. |
+-----------------------+----------------------------------------------+
| num_clusters | Number of clusters |
+-----------------------+----------------------------------------------+
| num_examples | Number of examples in the dataset |
+-----------------------+----------------------------------------------+
| num_features | Number of feature columns used |
+-----------------------+----------------------------------------------+
| num_unpacked_features | Number of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
| training_iterations | Total number of iterations performed |
+-----------------------+----------------------------------------------+
| training_time | Total time taken to cluster the data |
+-----------------------+----------------------------------------------+
| unpacked_features | Names of features unpacked from the |
| | feature columns |
+-----------------------+----------------------------------------------+
Parameters
----------
field : str
The name of the field to query.
Returns
-------
out
Value of the requested field
"""
|
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'field': field}
response = _tc.extensions._kmeans.get_value(opts)
return response['value']
|
<SYSTEM_TASK:>
If `text` is an SArray of strings or an SArray of lists of strings, the
<END_TASK>
<USER_TASK:>
Description:
def count_words(text, to_lower=True, delimiters=DEFAULT_DELIMITERS):
"""
If `text` is an SArray of strings or an SArray of lists of strings, the
occurances of word are counted for each row in the SArray.
If `text` is an SArray of dictionaries, the keys are tokenized and the
values are the counts. Counts for the same word, in the same row, are
added together.
This output is commonly known as the "bag-of-words" representation of text
data.
Parameters
----------
text : SArray[str | dict | list]
SArray of type: string, dict or list.
to_lower : bool, optional
If True, all strings are converted to lower case before counting.
delimiters : list[str], None, optional
Input strings are tokenized using `delimiters` characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[dict]
An SArray with the same length as the`text` input. For each row, the keys
of the dictionary are the words and the values are the corresponding counts.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Bag of words model <http://en.wikipedia.org/wiki/Bag-of-words_model>`_
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps.",
"Word word WORD, word!!!word"])
# Run count_words
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'quick': 1, 'brown': 1, 'the': 1, 'fox': 1, 'jumps.': 1},
{'word,': 5}]
# Run count_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.count_words(sa, delimiters=None)
dtype: dict
Rows: 2
[{'brown': 1, 'jumps': 1, 'fox': 1, '.': 1, 'quick': 1, 'the': 1},
{'word': 3, 'word!!!word': 1, ',': 1}]
# Run count_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 0.5},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bob': 1.5, 'alice': 1.5}, {'a': 5, 'dog': 5, 'cat': 5}]
# Run count_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah'], ['a dog', 'a dog cat']])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bar': 1, 'bah': 1, 'one': 1}, {'a': 2, 'dog': 2, 'cat': 1}]
"""
|
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.WordCounter(features='docs',
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
output_sf = fe.fit_transform(sf)
return output_sf['docs']
|
<SYSTEM_TASK:>
Return an SArray of ``dict`` type where each element contains the count
<END_TASK>
<USER_TASK:>
Description:
def count_ngrams(text, n=2, method="word", to_lower=True,
delimiters=DEFAULT_DELIMITERS,
ignore_punct=True, ignore_space=True):
"""
Return an SArray of ``dict`` type where each element contains the count
for each of the n-grams that appear in the corresponding input element.
The n-grams can be specified to be either character n-grams or word
n-grams. The input SArray could contain strings, dicts with string keys
and numeric values, or lists of strings.
Parameters
----------
Text : SArray[str | dict | list]
Input text data.
n : int, optional
The number of words in each n-gram. An ``n`` value of 1 returns word
counts.
method : {'word', 'character'}, optional
If "word", the function performs a count of word n-grams. If
"character", does a character n-gram count.
to_lower : bool, optional
If True, all words are converted to lower case before counting.
delimiters : list[str], None, optional
If method is "word", input strings are tokenized using `delimiters`
characters in this list. Each entry in this list must contain a single
character. If set to `None`, then a Penn treebank-style tokenization is
used, which contains smart handling of punctuations. If method is
"character," this option is ignored.
ignore_punct : bool, optional
If method is "character", indicates if *punctuations* between words are
counted as part of the n-gram. For instance, with the input SArray
element of "fun.games", if this parameter is set to False one
tri-gram would be 'n.g'. If ``ignore_punct`` is set to True, there
would be no such tri-gram (there would still be 'nga'). This
parameter has no effect if the method is set to "word".
ignore_space : bool, optional
If method is "character", indicates if *spaces* between words are
counted as part of the n-gram. For instance, with the input SArray
element of "fun games", if this parameter is set to False one
tri-gram would be 'n g'. If ``ignore_space`` is set to True, there
would be no such tri-gram (there would still be 'nga'). This
parameter has no effect if the method is set to "word".
Returns
-------
out : SArray[dict]
An SArray of dictionary type, where each key is the n-gram string
and each value is its count.
See Also
--------
count_words, tokenize,
Notes
-----
- Ignoring case (with ``to_lower``) involves a full string copy of the
SArray data. To increase speed for large documents, set ``to_lower`` to
False.
- Punctuation and spaces are both delimiters by default when counting
word n-grams. When counting character n-grams, one may choose to ignore
punctuations, spaces, neither, or both.
References
----------
- `N-gram wikipedia article <http://en.wikipedia.org/wiki/N-gram>`_
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Counting word n-grams:
>>> sa = turicreate.SArray(['I like big dogs. I LIKE BIG DOGS.'])
>>> turicreate.text_analytics.count_ngrams(sa, 3)
dtype: dict
Rows: 1
[{'big dogs i': 1, 'like big dogs': 2, 'dogs i like': 1, 'i like big': 2}]
# Counting character n-grams:
>>> sa = turicreate.SArray(['Fun. Is. Fun'])
>>> turicreate.text_analytics.count_ngrams(sa, 3, "character")
dtype: dict
Rows: 1
{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}]
# Run count_ngrams with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 0.5},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.count_ngrams(sa)
dtype: dict
Rows: 2
[{'bob alice': 0.5, 'alice bob': 1}, {'dog cat': 5, 'a dog': 5}]
# Run count_ngrams with list input
>>> sa = turicreate.SArray([['one', 'bar bah'], ['a dog', 'a dog cat']])
>>> turicreate.text_analytics.count_ngrams(sa)
dtype: dict
Rows: 2
[{'bar bah': 1}, {'dog cat': 1, 'a dog': 2}]
"""
|
_raise_error_if_not_sarray(text, "text")
# Compute ngrams counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.NGramCounter(features='docs',
n=n,
method=method,
to_lower=to_lower,
delimiters=delimiters,
ignore_punct=ignore_punct,
ignore_space=ignore_space,
output_column_prefix=None)
output_sf = fe.fit_transform(sf)
return output_sf['docs']
|
<SYSTEM_TASK:>
Compute the TF-IDF scores for each word in each document. The collection
<END_TASK>
<USER_TASK:>
Description:
def tf_idf(text):
"""
Compute the TF-IDF scores for each word in each document. The collection
of documents must be in bag-of-words format.
.. math::
\mbox{TF-IDF}(w, d) = tf(w, d) * log(N / f(w))
where :math:`tf(w, d)` is the number of times word :math:`w` appeared in
document :math:`d`, :math:`f(w)` is the number of documents word :math:`w`
appeared in, :math:`N` is the number of documents, and we use the
natural logarithm.
Parameters
----------
text : SArray[str | dict | list]
Input text data.
Returns
-------
out : SArray[dict]
The same document corpus where each score has been replaced by the
TF-IDF transformation.
See Also
--------
count_words, count_ngrams, tokenize,
References
----------
- `Wikipedia - TF-IDF <https://en.wikipedia.org/wiki/TFIDF>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> docs_tfidf = turicreate.text_analytics.tf_idf(docs)
"""
|
_raise_error_if_not_sarray(text, "text")
if len(text) == 0:
return _turicreate.SArray()
dataset = _turicreate.SFrame({'docs': text})
scores = _feature_engineering.TFIDF('docs').fit_transform(dataset)
return scores['docs']
|
<SYSTEM_TASK:>
Tokenize the input SArray of text strings and return the list of tokens.
<END_TASK>
<USER_TASK:>
Description:
def tokenize(text, to_lower=False, delimiters=DEFAULT_DELIMITERS):
"""
Tokenize the input SArray of text strings and return the list of tokens.
Parameters
----------
text : SArray[str]
Input data of strings representing English text. This tokenizer is not
intended to process XML, HTML, or other structured text formats.
to_lower : bool, optional
If True, all strings are converted to lower case before tokenization.
delimiters : list[str], None, optional
Input strings are tokenized using delimiter characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[list]
Each text string in the input is mapped to a list of tokens.
See Also
--------
count_words, count_ngrams, tf_idf
References
----------
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
>>> docs = turicreate.SArray(['This is the first sentence.',
"This one, it's the second sentence."])
# Default tokenization by space characters
>>> turicreate.text_analytics.tokenize(docs)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence.'],
['This', 'one,', "it's", 'the', 'second', 'sentence.']]
# Penn treebank-style tokenization
>>> turicreate.text_analytics.tokenize(docs, delimiters=None)
dtype: list
Rows: 2
[['This', 'is', 'the', 'first', 'sentence', '.'],
['This', 'one', ',', 'it', "'s", 'the', 'second', 'sentence', '.']]
"""
|
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.Tokenizer(features='docs',
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
tokens = fe.fit_transform(sf)
return tokens['docs']
|
<SYSTEM_TASK:>
Cross-validation with given paramaters.
<END_TASK>
<USER_TASK:>
Description:
def cv(params, dtrain, num_boost_round=10, nfold=3, metrics=(),
obj=None, feval=None, fpreproc=None, as_pandas=True,
show_progress=None, show_stdv=True, seed=0):
# pylint: disable = invalid-name
"""Cross-validation with given paramaters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
metrics : list of strings
Evaluation metrics to be watched in CV.
obj : function
Custom objective function.
feval : function
Custom evaluation function.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
as_pandas : bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return np.ndarray
show_progress : bool or None, default None
Whether to display the progress. If None, progress will be displayed
when np.ndarray is returned.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
Returns
-------
evaluation history : list(string)
"""
|
results = []
cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc)
for i in range(num_boost_round):
for fold in cvfolds:
fold.update(i, obj)
res = aggcv([f.eval(i, feval) for f in cvfolds],
show_stdv=show_stdv, show_progress=show_progress,
as_pandas=as_pandas)
results.append(res)
if as_pandas:
try:
import pandas as pd
results = pd.DataFrame(results)
except ImportError:
results = np.array(results)
else:
results = np.array(results)
return results
|
<SYSTEM_TASK:>
Remove the layer, and reconnect each of its predecessor to each of
<END_TASK>
<USER_TASK:>
Description:
def _remove_layer_and_reconnect(self, layer):
""" Remove the layer, and reconnect each of its predecessor to each of
its successor
"""
|
successors = self.get_successors(layer)
predecessors = self.get_predecessors(layer)
# remove layer's edges
for succ in successors:
self._remove_edge(layer, succ)
for pred in predecessors:
self._remove_edge(pred, layer)
# connect predecessors and successors
for pred in predecessors:
for succ in successors:
self._add_edge(pred, succ)
# remove layer in the data structures
self.layer_list.remove(layer)
self.keras_layer_map.pop(layer)
# re-assign input and output layers if layer happens to be an
# input / output layer
if layer in self.input_layers:
idx = self.input_layers.index(layer)
self.input_layers.pop(idx)
for pred in predecessors:
self.input_layers.insert(idx, pred)
idx += 1
if layer in self.output_layers:
idx = self.output_layers.index(layer)
self.output_layers.pop(idx)
for succ in successors:
self.output_layers.insert(idx, succ)
idx += 1
|
<SYSTEM_TASK:>
Constructs an SArray of size with a const value.
<END_TASK>
<USER_TASK:>
Description:
def from_const(cls, value, size, dtype=type(None)):
"""
Constructs an SArray of size with a const value.
Parameters
----------
value : [int | float | str | array.array | list | dict | datetime]
The value to fill the SArray
size : int
The size of the SArray
dtype : type
The type of the SArray. If not specified, is automatically detected
from the value. This should be specified if value=None since the
actual type of the SArray can be anything.
Examples
--------
Construct an SArray consisting of 10 zeroes:
>>> turicreate.SArray.from_const(0, 10)
Construct an SArray consisting of 10 missing string values:
>>> turicreate.SArray.from_const(None, 10, str)
"""
|
assert isinstance(size, (int, long)) and size >= 0, "size must be a positive int"
if not isinstance(value, (type(None), int, float, str, array.array, list, dict, datetime.datetime)):
raise TypeError('Cannot create sarray of value type %s' % str(type(value)))
proxy = UnitySArrayProxy()
proxy.load_from_const(value, size, dtype)
return cls(_proxy=proxy)
|
<SYSTEM_TASK:>
Construct an SArray from a json file or glob of json files.
<END_TASK>
<USER_TASK:>
Description:
def read_json(cls, filename):
"""
Construct an SArray from a json file or glob of json files.
The json file must contain a list of dictionaries. The returned
SArray type will be of dict type
Parameters
----------
filename : str
The filename or glob to load into an SArray.
Examples
--------
Construct an SArray from a local JSON file named 'data.json':
>>> turicreate.SArray.read_json('/data/data.json')
Construct an SArray from all JSON files /data/data*.json
>>> turicreate.SArray.read_json('/data/data*.json')
"""
|
proxy = UnitySArrayProxy()
proxy.load_from_json_record_files(_make_internal_url(filename))
return cls(_proxy = proxy)
|
<SYSTEM_TASK:>
Selects elements from either istrue or isfalse depending on the value
<END_TASK>
<USER_TASK:>
Description:
def where(cls, condition, istrue, isfalse, dtype=None):
"""
Selects elements from either istrue or isfalse depending on the value
of the condition SArray.
Parameters
----------
condition : SArray
An SArray of values such that for each value, if non-zero, yields a
value from istrue, otherwise from isfalse.
istrue : SArray or constant
The elements selected if condition is true. If istrue is an SArray,
this must be of the same length as condition.
isfalse : SArray or constant
The elements selected if condition is false. If istrue is an SArray,
this must be of the same length as condition.
dtype : type
The type of result SArray. This is required if both istrue and isfalse
are constants of ambiguous types.
Examples
--------
Returns an SArray with the same values as g with values above 10
clipped to 10
>>> g = SArray([6,7,8,9,10,11,12,13])
>>> SArray.where(g > 10, 10, g)
dtype: int
Rows: 8
[6, 7, 8, 9, 10, 10, 10, 10]
Returns an SArray with the same values as g with values below 10
clipped to 10
>>> SArray.where(g > 10, g, 10)
dtype: int
Rows: 8
[10, 10, 10, 10, 10, 11, 12, 13]
Returns an SArray with the same values of g with all values == 1
replaced by None
>>> g = SArray([1,2,3,4,1,2,3,4])
>>> SArray.where(g == 1, None, g)
dtype: int
Rows: 8
[None, 2, 3, 4, None, 2, 3, 4]
Returns an SArray with the same values of g, but with each missing value
replaced by its corresponding element in replace_none
>>> g = SArray([1,2,None,None])
>>> replace_none = SArray([3,3,2,2])
>>> SArray.where(g != None, g, replace_none)
dtype: int
Rows: 4
[1, 2, 2, 2]
"""
|
true_is_sarray = isinstance(istrue, SArray)
false_is_sarray = isinstance(isfalse, SArray)
if not true_is_sarray and false_is_sarray:
istrue = cls(_proxy=condition.__proxy__.to_const(istrue, isfalse.dtype))
if true_is_sarray and not false_is_sarray:
isfalse = cls(_proxy=condition.__proxy__.to_const(isfalse, istrue.dtype))
if not true_is_sarray and not false_is_sarray:
if dtype is None:
if istrue is None:
dtype = type(isfalse)
elif isfalse is None:
dtype = type(istrue)
elif type(istrue) != type(isfalse):
raise TypeError("true and false inputs are of different types")
elif type(istrue) == type(isfalse):
dtype = type(istrue)
if dtype is None:
raise TypeError("Both true and false are None. Resultant type cannot be inferred.")
istrue = cls(_proxy=condition.__proxy__.to_const(istrue, dtype))
isfalse = cls(_proxy=condition.__proxy__.to_const(isfalse, dtype))
return cls(_proxy=condition.__proxy__.ternary_operator(istrue.__proxy__, isfalse.__proxy__))
|
<SYSTEM_TASK:>
Saves the SArray to file.
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename, format=None):
"""
Saves the SArray to file.
The saved SArray will be in a directory named with the `targetfile`
parameter.
Parameters
----------
filename : string
A local path or a remote URL. If format is 'text', it will be
saved as a text file. If format is 'binary', a directory will be
created at the location which will contain the SArray.
format : {'binary', 'text', 'csv'}, optional
Format in which to save the SFrame. Binary saved SArrays can be
loaded much faster and without any format conversion losses.
'text' and 'csv' are synonymous: Each SArray row will be written
as a single line in an output text file. If not
given, will try to infer the format from filename given. If file
name ends with 'csv', 'txt' or '.csv.gz', then save as 'csv' format,
otherwise save as 'binary' format.
"""
|
from .sframe import SFrame as _SFrame
if format is None:
if filename.endswith(('.csv', '.csv.gz', 'txt')):
format = 'text'
else:
format = 'binary'
if format == 'binary':
with cython_context():
self.__proxy__.save(_make_internal_url(filename))
elif format == 'text' or format == 'csv':
sf = _SFrame({'X1':self})
with cython_context():
sf.__proxy__.save_as_csv(_make_internal_url(filename), {'header':False})
else:
raise ValueError("Unsupported format: {}".format(format))
|
<SYSTEM_TASK:>
This returns an SArray with, for each input string, a dict from the unique,
<END_TASK>
<USER_TASK:>
Description:
def _count_words(self, to_lower=True, delimiters=["\r", "\v", "\n", "\f", "\t", " "]):
"""
This returns an SArray with, for each input string, a dict from the unique,
delimited substrings to their number of occurrences within the original
string.
The SArray must be of type string.
..WARNING:: This function is deprecated, and will be removed in future
versions of Turi Create. Please use the `text_analytics.count_words`
function instead.
Parameters
----------
to_lower : bool, optional
"to_lower" indicates whether to map the input strings to lower case
before counts
delimiters: list[string], optional
"delimiters" is a list of which characters to delimit on to find tokens
Returns
-------
out : SArray
for each input string, a dict from the unique, delimited substrings
to their number of occurrences within the original string.
Examples
--------
>>> sa = turicreate.SArray(["The quick brown fox jumps.",
"Word word WORD, word!!!word"])
>>> sa._count_words()
dtype: dict
Rows: 2
[{'quick': 1, 'brown': 1, 'jumps': 1, 'fox': 1, 'the': 1},
{'word': 2, 'word,': 1, 'word!!!word': 1}]
"""
|
if (self.dtype != str):
raise TypeError("Only SArray of string type is supported for counting bag of words")
if (not all([len(delim) == 1 for delim in delimiters])):
raise ValueError("Delimiters must be single-character strings")
# construct options, will extend over time
options = dict()
options["to_lower"] = to_lower == True
# defaults to std::isspace whitespace delimiters if no others passed in
options["delimiters"] = delimiters
with cython_context():
return SArray(_proxy=self.__proxy__.count_bag_of_words(options))
|
<SYSTEM_TASK:>
Create a boolean SArray by checking the keys of an SArray of
<END_TASK>
<USER_TASK:>
Description:
def dict_has_any_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has any of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains any key in the input list.
See Also
--------
dict_has_all_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"animal":1},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_any_keys(["is", "this", "are"])
dtype: int
Rows: 3
[1, 0, 1]
"""
|
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys))
|
<SYSTEM_TASK:>
Create a boolean SArray by checking the keys of an SArray of
<END_TASK>
<USER_TASK:>
Description:
def dict_has_all_keys(self, keys):
"""
Create a boolean SArray by checking the keys of an SArray of
dictionaries. An element of the output SArray is True if the
corresponding input element's dictionary has all of the given keys.
Fails on SArrays whose data type is not ``dict``.
Parameters
----------
keys : list
A list of key values to check each dictionary against.
Returns
-------
out : SArray
A SArray of int type, where each element indicates whether the
input SArray element contains all keys in the input list.
See Also
--------
dict_has_any_keys
Examples
--------
>>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7},
{"this": 2, "are": 1, "cat": 5}])
>>> sa.dict_has_all_keys(["is", "this"])
dtype: int
Rows: 2
[1, 0]
"""
|
if not _is_non_string_iterable(keys):
keys = [keys]
with cython_context():
return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
|
<SYSTEM_TASK:>
Filter this SArray by a function.
<END_TASK>
<USER_TASK:>
Description:
def filter(self, fn, skip_na=True, seed=None):
"""
Filter this SArray by a function.
Returns a new SArray filtered by this SArray. If `fn` evaluates an
element to true, this element is copied to the new SArray. If not, it
isn't. Throws an exception if the return type of `fn` is not castable
to a boolean value.
Parameters
----------
fn : function
Function that filters the SArray. Must evaluate to bool or int.
skip_na : bool, optional
If True, will not apply fn to any undefined values.
seed : int, optional
Used as the seed if a random number generator is included in fn.
Returns
-------
out : SArray
The SArray filtered by fn. Each element of the SArray is of
type int.
Examples
--------
>>> sa = turicreate.SArray([1,2,3])
>>> sa.filter(lambda x: x < 3)
dtype: int
Rows: 2
[1, 2]
"""
|
assert callable(fn), "Input must be callable"
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
with cython_context():
return SArray(_proxy=self.__proxy__.filter(fn, skip_na, seed))
|
<SYSTEM_TASK:>
Create an SArray which contains a subsample of the current SArray.
<END_TASK>
<USER_TASK:>
Description:
def sample(self, fraction, seed=None, exact=False):
"""
Create an SArray which contains a subsample of the current SArray.
Parameters
----------
fraction : float
Fraction of the rows to fetch. Must be between 0 and 1.
if exact is False (default), the number of rows returned is
approximately the fraction times the number of rows.
seed : int, optional
The random seed for the random number generator.
exact: bool, optional
Defaults to False. If exact=True, an exact fraction is returned,
but at a performance penalty.
Returns
-------
out : SArray
The new SArray which contains the subsampled rows.
Examples
--------
>>> sa = turicreate.SArray(range(10))
>>> sa.sample(.3)
dtype: int
Rows: 3
[2, 6, 9]
"""
|
if (fraction > 1 or fraction < 0):
raise ValueError('Invalid sampling rate: ' + str(fraction))
if (len(self) == 0):
return SArray()
if seed is None:
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
with cython_context():
return SArray(_proxy=self.__proxy__.sample(fraction, seed, exact))
|
<SYSTEM_TASK:>
Returns an SArray with a hash of each element. seed can be used
<END_TASK>
<USER_TASK:>
Description:
def hash(self, seed=0):
"""
Returns an SArray with a hash of each element. seed can be used
to change the hash function to allow this method to be used for
random number generation.
Parameters
----------
seed : int
Defaults to 0. Can be changed to different values to get
different hash results.
Returns
-------
out : SArray
An integer SArray with a hash value for each element. Identical
elements are hashed to the same value
"""
|
with cython_context():
return SArray(_proxy=self.__proxy__.hash(seed))
|
<SYSTEM_TASK:>
Get the index of the minimum numeric value in SArray.
<END_TASK>
<USER_TASK:>
Description:
def argmin(self):
"""
Get the index of the minimum numeric value in SArray.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type.
Returns
-------
out : int
index of the minimum value of SArray
See Also
--------
argmax
Examples
--------
>>> turicreate.SArray([14, 62, 83, 72, 77, 96, 5, 25, 69, 66]).argmin()
"""
|
from .sframe import SFrame as _SFrame
if len(self) == 0:
return None
if not any([isinstance(self[0], i) for i in [int,float,long]]):
raise TypeError("SArray must be of type 'int', 'long', or 'float'.")
sf = _SFrame(self).add_row_number()
sf_out = sf.groupby(key_column_names=[],operations={'minimum_x1': _aggregate.ARGMIN('X1','id')})
return sf_out['minimum_x1'][0]
|
<SYSTEM_TASK:>
Mean of all the values in the SArray, or mean image.
<END_TASK>
<USER_TASK:>
Description:
def mean(self):
"""
Mean of all the values in the SArray, or mean image.
Returns None on an empty SArray. Raises an exception if called on an
SArray with non-numeric type or non-Image type.
Returns
-------
out : float | turicreate.Image
Mean of all values in SArray, or image holding per-pixel mean
across the input SArray.
"""
|
with cython_context():
if self.dtype == _Image:
from .. import extensions
return extensions.generate_mean(self)
else:
return self.__proxy__.mean()
|
<SYSTEM_TASK:>
Create a new SArray with all the values cast to str. The string format is
<END_TASK>
<USER_TASK:>
Description:
def datetime_to_str(self,format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to str. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The format to output the string. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
Returns
-------
out : SArray[str]
The SArray converted to the type 'str'.
Examples
--------
>>> dt = datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5))
>>> sa = turicreate.SArray([dt])
>>> sa.datetime_to_str("%e %b %Y %T %ZP")
dtype: str
Rows: 1
[20 Oct 2011 09:30:10 GMT-05:00]
See Also
----------
str_to_datetime
References
----------
[1] Boost date time from string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
|
if(self.dtype != datetime.datetime):
raise TypeError("datetime_to_str expects SArray of datetime as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.datetime_to_str(format))
|
<SYSTEM_TASK:>
Create a new SArray with all the values cast to datetime. The string format is
<END_TASK>
<USER_TASK:>
Description:
def str_to_datetime(self,format="%Y-%m-%dT%H:%M:%S%ZP"):
"""
Create a new SArray with all the values cast to datetime. The string format is
specified by the 'format' parameter.
Parameters
----------
format : str
The string format of the input SArray. Default format is "%Y-%m-%dT%H:%M:%S%ZP".
If format is "ISO", the the format is "%Y%m%dT%H%M%S%F%q"
Returns
-------
out : SArray[datetime.datetime]
The SArray converted to the type 'datetime'.
Examples
--------
>>> sa = turicreate.SArray(["20-Oct-2011 09:30:10 GMT-05:30"])
>>> sa.str_to_datetime("%d-%b-%Y %H:%M:%S %ZP")
dtype: datetime
Rows: 1
datetime.datetime(2011, 10, 20, 9, 30, 10, tzinfo=GMT(-5.5))
See Also
----------
datetime_to_str
References
----------
[1] boost date time to string conversion guide (http://www.boost.org/doc/libs/1_48_0/doc/html/date_time/date_time_io.html)
"""
|
if(self.dtype != str):
raise TypeError("str_to_datetime expects SArray of str as input SArray")
with cython_context():
return SArray(_proxy=self.__proxy__.str_to_datetime(format))
|
<SYSTEM_TASK:>
Create a new SArray with all values cast to the given type. Throws an
<END_TASK>
<USER_TASK:>
Description:
def astype(self, dtype, undefined_on_failure=False):
"""
Create a new SArray with all values cast to the given type. Throws an
exception if the types are not castable to the given type.
Parameters
----------
dtype : {int, float, str, list, array.array, dict, datetime.datetime}
The type to cast the elements to in SArray
undefined_on_failure: bool, optional
If set to True, runtime cast failures will be emitted as missing
values rather than failing.
Returns
-------
out : SArray [dtype]
The SArray converted to the type ``dtype``.
Notes
-----
- The string parsing techniques used to handle conversion to dictionary
and list types are quite generic and permit a variety of interesting
formats to be interpreted. For instance, a JSON string can usually be
interpreted as a list or a dictionary type. See the examples below.
- For datetime-to-string and string-to-datetime conversions,
use sa.datetime_to_str() and sa.str_to_datetime() functions.
- For array.array to turicreate.Image conversions, use sa.pixel_array_to_image()
Examples
--------
>>> sa = turicreate.SArray(['1','2','3','4'])
>>> sa.astype(int)
dtype: int
Rows: 4
[1, 2, 3, 4]
Given an SArray of strings that look like dicts, convert to a dictionary
type:
>>> sa = turicreate.SArray(['{1:2 3:4}', '{a:b c:d}'])
>>> sa.astype(dict)
dtype: dict
Rows: 2
[{1: 2, 3: 4}, {'a': 'b', 'c': 'd'}]
"""
|
if (dtype == _Image) and (self.dtype == array.array):
raise TypeError("Cannot cast from image type to array with sarray.astype(). Please use sarray.pixel_array_to_image() instead.")
with cython_context():
return SArray(_proxy=self.__proxy__.astype(dtype, undefined_on_failure))
|
<SYSTEM_TASK:>
Create a new SArray with each value clipped to be within the given
<END_TASK>
<USER_TASK:>
Description:
def clip(self, lower=float('nan'), upper=float('nan')):
"""
Create a new SArray with each value clipped to be within the given
bounds.
In this case, "clipped" means that values below the lower bound will be
set to the lower bound value. Values above the upper bound will be set
to the upper bound value. This function can operate on SArrays of
numeric type as well as array type, in which case each individual
element in each array is clipped. By default ``lower`` and ``upper`` are
set to ``float('nan')`` which indicates the respective bound should be
ignored. The method fails if invoked on an SArray of non-numeric type.
Parameters
----------
lower : int, optional
The lower bound used to clip. Ignored if equal to ``float('nan')``
(the default).
upper : int, optional
The upper bound used to clip. Ignored if equal to ``float('nan')``
(the default).
Returns
-------
out : SArray
See Also
--------
clip_lower, clip_upper
Examples
--------
>>> sa = turicreate.SArray([1,2,3])
>>> sa.clip(2,2)
dtype: int
Rows: 3
[2, 2, 2]
"""
|
with cython_context():
return SArray(_proxy=self.__proxy__.clip(lower, upper))
|
<SYSTEM_TASK:>
Create new SArray with all values clipped to the given lower bound. This
<END_TASK>
<USER_TASK:>
Description:
def clip_lower(self, threshold):
"""
Create new SArray with all values clipped to the given lower bound. This
function can operate on numeric arrays, as well as vector arrays, in
which case each individual element in each vector is clipped. Throws an
exception if the SArray is empty or the types are non-numeric.
Parameters
----------
threshold : float
The lower bound used to clip values.
Returns
-------
out : SArray
See Also
--------
clip, clip_upper
Examples
--------
>>> sa = turicreate.SArray([1,2,3])
>>> sa.clip_lower(2)
dtype: int
Rows: 3
[2, 2, 3]
"""
|
with cython_context():
return SArray(_proxy=self.__proxy__.clip(threshold, float('nan')))
|
<SYSTEM_TASK:>
Get an SArray that contains the last n elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def tail(self, n=10):
"""
Get an SArray that contains the last n elements in the SArray.
Parameters
----------
n : int
The number of elements to fetch
Returns
-------
out : SArray
A new SArray which contains the last n rows of the current SArray.
"""
|
with cython_context():
return SArray(_proxy=self.__proxy__.tail(n))
|
<SYSTEM_TASK:>
Create an SArray indicating which elements are in the top k.
<END_TASK>
<USER_TASK:>
Description:
def is_topk(self, topk=10, reverse=False):
"""
Create an SArray indicating which elements are in the top k.
Entries are '1' if the corresponding element in the current SArray is a
part of the top k elements, and '0' if that corresponding element is
not. Order is descending by default.
Parameters
----------
topk : int
The number of elements to determine if 'top'
reverse : bool
If True, return the topk elements in ascending order
Returns
-------
out : SArray (of type int)
Notes
-----
This is used internally by SFrame's topk function.
"""
|
with cython_context():
return SArray(_proxy = self.__proxy__.topk_index(topk, reverse))
|
<SYSTEM_TASK:>
Summary statistics that can be calculated with one pass over the SArray.
<END_TASK>
<USER_TASK:>
Description:
def summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a turicreate.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~turicreate.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~turicreate.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
|
from ..data_structures.sketch import Sketch
if (self.dtype == _Image):
raise TypeError("summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys is not None):
if (self.dtype != dict and self.dtype != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not _is_non_string_iterable(sub_sketch_keys):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop()
if (self.dtype == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys)
|
<SYSTEM_TASK:>
Return an SFrame containing counts of unique values. The resulting
<END_TASK>
<USER_TASK:>
Description:
def value_counts(self):
"""
Return an SFrame containing counts of unique values. The resulting
SFrame will be sorted in descending frequency.
Returns
-------
out : SFrame
An SFrame containing 2 columns : 'value', and 'count'. The SFrame will
be sorted in descending order by the column 'count'.
See Also
--------
SFrame.summary
Examples
--------
>>> sa = turicreate.SArray([1,1,2,2,2,2,3,3,3,3,3,3,3])
>>> sa.value_counts()
Columns:
value int
count int
Rows: 3
Data:
+-------+-------+
| value | count |
+-------+-------+
| 3 | 7 |
| 2 | 4 |
| 1 | 2 |
+-------+-------+
[3 rows x 2 columns]
"""
|
from .sframe import SFrame as _SFrame
return _SFrame({'value':self}).groupby('value', {'count':_aggregate.COUNT}).sort('count', ascending=False)
|
<SYSTEM_TASK:>
Append an SArray to the current SArray. Creates a new SArray with the
<END_TASK>
<USER_TASK:>
Description:
def append(self, other):
"""
Append an SArray to the current SArray. Creates a new SArray with the
rows from both SArrays. Both SArrays must be of the same type.
Parameters
----------
other : SArray
Another SArray whose rows are appended to current SArray.
Returns
-------
out : SArray
A new SArray that contains rows from both SArrays, with rows from
the ``other`` SArray coming after all rows from the current SArray.
See Also
--------
SFrame.append
Examples
--------
>>> sa = turicreate.SArray([1, 2, 3])
>>> sa2 = turicreate.SArray([4, 5, 6])
>>> sa.append(sa2)
dtype: int
Rows: 6
[1, 2, 3, 4, 5, 6]
"""
|
if type(other) is not SArray:
raise RuntimeError("SArray append can only work with SArray")
if self.dtype != other.dtype:
raise RuntimeError("Data types in both SArrays have to be the same")
with cython_context():
return SArray(_proxy = self.__proxy__.append(other.__proxy__))
|
<SYSTEM_TASK:>
Get all unique values in the current SArray.
<END_TASK>
<USER_TASK:>
Description:
def unique(self):
"""
Get all unique values in the current SArray.
Raises a TypeError if the SArray is of dictionary type. Will not
necessarily preserve the order of the given SArray in the new SArray.
Returns
-------
out : SArray
A new SArray that contains the unique values of the current SArray.
See Also
--------
SFrame.unique
"""
|
from .sframe import SFrame as _SFrame
tmp_sf = _SFrame()
tmp_sf.add_column(self, 'X1', inplace=True)
res = tmp_sf.groupby('X1',{})
return SArray(_proxy=res['X1'].__proxy__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.