text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Visualize the model.
<END_TASK>
<USER_TASK:>
Description:
def visualize_spec(self, port=None, input_shape_dict=None):
"""
Visualize the model.
Parameters
----------
port : int
if server is to be hosted on specific localhost port
input_shape_dict : dict
The shapes are calculated assuming the batch and sequence
are 1 i.e. (1, 1, C, H, W). If either is not 1, then provide
full input shape
Returns
-------
None
Examples
--------
>>> model = coreml.models.MLModel('HousePricer.mlmodel')
>>> model.visualize_spec()
""" |
spec = self._spec
model_type = spec.WhichOneof('Type')
model_description = spec.description
input_spec = model_description.input
output_spec = model_description.output
spec_inputs = []
for model_input in input_spec:
spec_inputs.append((model_input.name, str(model_input.type)))
spec_outputs = []
for model_output in output_spec:
spec_outputs.append((model_output.name, str(model_output.type)))
cy_nodes = []
cy_edges = []
cy_nodes.append({
'data': {
'id': 'input_node',
'name': '',
'info': {
'type': 'input node'
},
'classes': 'input',
}
})
for model_input, input_type in spec_inputs:
cy_nodes.append({
'data': {
'id': str(model_input),
'name': str(model_input),
'info': {
'type': "\n".join(str(input_type).split("\n")),
'inputs': str([]),
'outputs': str([model_input])
},
'parent': 'input_node'
},
'classes': 'input'
})
if model_type == 'pipeline':
pipeline_spec = spec.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineRegressor':
pipeline_spec = spec.pipelineRegressor.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'pipelineClassifier':
pipeline_spec = spec.pipelineClassifier.pipeline
cy_data = _pipeline_nodes_and_edges(cy_nodes,
cy_edges,
pipeline_spec,
spec_outputs
)
elif model_type == 'neuralNetwork':
nn_spec = spec.neuralNetwork
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkClassifier':
nn_spec = spec.neuralNetworkClassifier
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
elif model_type == 'neuralNetworkRegressor':
nn_spec = spec.neuralNetworkRegressor
cy_data = _neural_network_nodes_and_edges(nn_spec,
cy_nodes,
cy_edges,
spec_outputs,
input_spec,
input_shape_dict=input_shape_dict
)
else:
print("Model is not of type Pipeline or Neural Network "
"and cannot be visualized")
return
import coremltools
web_dir = _os.path.join(_os.path.dirname(coremltools.__file__),
'graph_visualization')
with open('{}/model.json'.format(web_dir), 'w') as file:
_json.dump(cy_data, file)
_start_server(port, web_dir) |
<SYSTEM_TASK:>
Construct composite distance parameters based on selected features and their
<END_TASK>
<USER_TASK:>
Description:
def _construct_auto_distance(feature_names, column_names, column_types, sample):
"""
Construct composite distance parameters based on selected features and their
types.
""" |
## Make a dictionary from the column_names and column_types
col_type_dict = {k: v for k, v in zip(column_names, column_types)}
## Loop through feature names, appending a distance component if the
# feature's type is *not* numeric. If the type *is* numeric, append it to
# the numeric_cols list, then at the end make a numeric columns distance
# component.
composite_distance_params = []
numeric_cols = []
for c in feature_names:
if col_type_dict[c] == str:
composite_distance_params.append([[c], _turicreate.distances.levenshtein, 1])
elif col_type_dict[c] == dict:
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] == array.array:
composite_distance_params.append([[c], _turicreate.distances.euclidean, 1])
elif col_type_dict[c] == list:
only_str_lists = _validate_lists(sample[c], allowed_types=[str])
if not only_str_lists:
raise TypeError("Only lists of all str objects are currently supported")
composite_distance_params.append([[c], _turicreate.distances.jaccard, 1])
elif col_type_dict[c] in [int, float, array.array, list]:
numeric_cols.append(c)
else:
raise TypeError("Unable to automatically determine a distance "+\
"for column {}".format(c))
# Make the standalone numeric column distance component
if len(numeric_cols) > 0:
composite_distance_params.append([numeric_cols, _turicreate.distances.euclidean, 1])
return composite_distance_params |
<SYSTEM_TASK:>
List the fields stored in the model, including data, model, and
<END_TASK>
<USER_TASK:>
Description:
def _list_fields(self):
"""
List the fields stored in the model, including data, model, and
training options. Each field can be queried with the ``get`` method.
Returns
-------
out : list
List of fields queryable with the ``get`` method.
""" |
opts = {'model': self.__proxy__, 'model_name': self.__name__}
response = _turicreate.extensions._nearest_neighbors.list_fields(opts)
return sorted(response.keys()) |
<SYSTEM_TASK:>
For each row of the input 'dataset', retrieve the nearest neighbors
<END_TASK>
<USER_TASK:>
Description:
def query(self, dataset, label=None, k=5, radius=None, verbose=True):
"""
For each row of the input 'dataset', retrieve the nearest neighbors
from the model's stored data. In general, the query dataset does not
need to be the same as the reference data stored in the model, but if
it is, the 'include_self_edges' parameter can be set to False to
exclude results that match query points to themselves.
Parameters
----------
dataset : SFrame
Query data. Must contain columns with the same names and types as
the features used to train the model. Additional columns are
allowed, but ignored. Please see the nearest neighbors
:func:`~turicreate.nearest_neighbors.create` documentation for more
detail on allowable data types.
label : str, optional
Name of the query SFrame column with row labels. If 'label' is not
specified, row numbers are used to identify query dataset rows in
the output SFrame.
k : int, optional
Number of nearest neighbors to return from the reference set for
each query observation. The default is 5 neighbors, but setting it
to ``None`` will return all neighbors within ``radius`` of the
query point.
radius : float, optional
Only neighbors whose distance to a query point is smaller than this
value are returned. The default is ``None``, in which case the
``k`` nearest neighbors are returned for each query point,
regardless of distance.
verbose: bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame
An SFrame with the k-nearest neighbors of each query observation.
The result contains four columns: the first is the label of the
query observation, the second is the label of the nearby reference
observation, the third is the distance between the query and
reference observations, and the fourth is the rank of the reference
observation among the query's k-nearest neighbors.
See Also
--------
similarity_graph
Notes
-----
- The `dataset` input to this method *can* have missing values (in
contrast to the reference dataset used to create the nearest
neighbors model). Missing numeric values are imputed to be the mean
of the corresponding feature in the reference dataset, and missing
strings are imputed to be empty strings.
- If both ``k`` and ``radius`` are set to ``None``, each query point
returns all of the reference set. If the reference dataset has
:math:`n` rows and the query dataset has :math:`m` rows, the output
is an SFrame with :math:`nm` rows.
- For models created with the 'lsh' method, the query results may have
fewer query labels than input query points. Because LSH is an
approximate method, a query point may have fewer than 'k' neighbors.
If LSH returns no neighbors at all for a query, the query point is
omitted from the results.
Examples
--------
First construct a toy SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'label': range(3),
... 'feature1': [0.98, 0.62, 0.11],
... 'feature2': [0.69, 0.58, 0.36]})
>>> model = turicreate.nearest_neighbors.create(sf, 'label')
A new SFrame contains query observations with same schema as the
reference SFrame. This SFrame is passed to the ``query`` method.
>>> queries = turicreate.SFrame({'label': range(3),
... 'feature1': [0.05, 0.61, 0.99],
... 'feature2': [0.06, 0.97, 0.86]})
>>> model.query(queries, 'label', k=2)
+-------------+-----------------+----------------+------+
| query_label | reference_label | distance | rank |
+-------------+-----------------+----------------+------+
| 0 | 2 | 0.305941170816 | 1 |
| 0 | 1 | 0.771556867638 | 2 |
| 1 | 1 | 0.390128184063 | 1 |
| 1 | 0 | 0.464004310325 | 2 |
| 2 | 0 | 0.170293863659 | 1 |
| 2 | 1 | 0.464004310325 | 2 |
+-------------+-----------------+----------------+------+
""" |
## Validate the 'dataset' input
_tkutl._raise_error_if_not_sframe(dataset, "dataset")
_tkutl._raise_error_if_sframe_empty(dataset, "dataset")
## Get model features
ref_features = self.features
sf_features = _tkutl._toolkits_select_columns(dataset, ref_features)
## Validate and preprocess the 'label' input
if label is None:
query_labels = _turicreate.SArray.from_sequence(len(dataset))
else:
if not label in dataset.column_names():
raise ValueError(
"Input 'label' must be a string matching the name of a " +\
"column in the reference SFrame 'dataset'.")
if not dataset[label].dtype == str and not dataset[label].dtype == int:
raise TypeError("The label column must contain integers or strings.")
if label in ref_features:
raise ValueError("The label column cannot be one of the features.")
query_labels = dataset[label]
## Validate neighborhood parameters 'k' and 'radius'
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'features': sf_features,
'query_labels': query_labels,
'k': k,
'radius': radius}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.query(opts)
return result['neighbors'] |
<SYSTEM_TASK:>
Construct the similarity graph on the reference dataset, which is
<END_TASK>
<USER_TASK:>
Description:
def similarity_graph(self, k=5, radius=None, include_self_edges=False,
output_type='SGraph', verbose=True):
"""
Construct the similarity graph on the reference dataset, which is
already stored in the model. This is conceptually very similar to
running `query` with the reference set, but this method is optimized
for the purpose, syntactically simpler, and automatically removes
self-edges.
Parameters
----------
k : int, optional
Maximum number of neighbors to return for each point in the
dataset. Setting this to ``None`` deactivates the constraint, so
that all neighbors are returned within ``radius`` of a given point.
radius : float, optional
For a given point, only neighbors within this distance are
returned. The default is ``None``, in which case the ``k`` nearest
neighbors are returned for each query point, regardless of
distance.
include_self_edges : bool, optional
For most distance functions, each point in the model's reference
dataset is its own nearest neighbor. If this parameter is set to
False, this result is ignored, and the nearest neighbors are
returned *excluding* the point itself.
output_type : {'SGraph', 'SFrame'}, optional
By default, the results are returned in the form of an SGraph,
where each point in the reference dataset is a vertex and an edge A
-> B indicates that vertex B is a nearest neighbor of vertex A. If
'output_type' is set to 'SFrame', the output is in the same form as
the results of the 'query' method: an SFrame with columns
indicating the query label (in this case the query data is the same
as the reference data), reference label, distance between the two
points, and the rank of the neighbor.
verbose : bool, optional
If True, print progress updates and model details.
Returns
-------
out : SFrame or SGraph
The type of the output object depends on the 'output_type'
parameter. See the parameter description for more detail.
Notes
-----
- If both ``k`` and ``radius`` are set to ``None``, each data point is
matched to the entire dataset. If the reference dataset has
:math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an
SGraph with :math:`n^2` edges).
- For models created with the 'lsh' method, the output similarity graph
may have fewer vertices than there are data points in the original
reference set. Because LSH is an approximate method, a query point
may have fewer than 'k' neighbors. If LSH returns no neighbors at all
for a query and self-edges are excluded, the query point is omitted
from the results.
Examples
--------
First construct an SFrame and create a nearest neighbors model:
>>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11],
... 'x2': [0.69, 0.58, 0.36]})
...
>>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean')
Unlike the ``query`` method, there is no need for a second dataset with
``similarity_graph``.
>>> g = model.similarity_graph(k=1) # an SGraph
>>> g.edges
+----------+----------+----------------+------+
| __src_id | __dst_id | distance | rank |
+----------+----------+----------------+------+
| 0 | 1 | 0.376430604494 | 1 |
| 2 | 1 | 0.55542776308 | 1 |
| 1 | 0 | 0.376430604494 | 1 |
+----------+----------+----------------+------+
""" |
## Validate inputs.
if k is not None:
if not isinstance(k, int):
raise ValueError("Input 'k' must be an integer.")
if k <= 0:
raise ValueError("Input 'k' must be larger than 0.")
if radius is not None:
if not isinstance(radius, (int, float)):
raise ValueError("Input 'radius' must be an integer or float.")
if radius < 0:
raise ValueError("Input 'radius' must be non-negative.")
## Set k and radius to special values to indicate 'None'
if k is None:
k = -1
if radius is None:
radius = -1.0
opts = {'model': self.__proxy__,
'model_name': self.__name__,
'k': k,
'radius': radius,
'include_self_edges': include_self_edges}
with QuietProgress(verbose):
result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts)
knn = result['neighbors']
if output_type == "SFrame":
return knn
else:
sg = _SGraph(edges=knn, src_field='query_label',
dst_field='reference_label')
return sg |
<SYSTEM_TASK:>
Randomly split an SFrame into two SFrames based on the `session_id` such
<END_TASK>
<USER_TASK:>
Description:
def random_split_by_session(dataset, session_id, fraction=0.9, seed=None):
"""
Randomly split an SFrame into two SFrames based on the `session_id` such
that one split contains data for a `fraction` of the sessions while the
second split contains all data for the rest of the sessions.
Parameters
----------
dataset : SFrame
Dataset to split. It must contain a column of session ids.
session_id : string, optional
The name of the column in `dataset` that corresponds to the
a unique identifier for each session.
fraction : float, optional
Fraction of the sessions to fetch for the first returned SFrame. Must
be between 0 and 1. Once the sessions are split, all data from a single
session is in the same SFrame.
seed : int, optional
Seed for the random number generator used to split.
Examples
--------
.. sourcecode:: python
# Split the data so that train has 90% of the users.
>>> train, valid = tc.activity_classifier.util.random_split_by_session(
... dataset, session_id='session_id', fraction=0.9)
# For example: If dataset has 2055 sessions
>>> len(dataset['session_id'].unique())
2055
# The training set now has 90% of the sessions
>>> len(train['session_id'].unique())
1850
# The validation set has the remaining 10% of the sessions
>>> len(valid['session_id'].unique())
205
""" |
from random import Random
_raise_error_if_not_of_type(dataset, _SFrame, 'dataset')
_raise_error_if_not_of_type(session_id, str, 'session_id')
_raise_error_if_not_of_type(fraction, float, 'fraction')
_raise_error_if_not_of_type(seed, [int, type(None)], 'seed')
_numeric_param_check_range('fraction', fraction, 0, 1)
if session_id not in dataset.column_names():
raise _ToolkitError(
'Input "dataset" must contain a column called %s.' % session_id)
if seed is None:
# Include the nanosecond component as well.
import time
seed = abs(hash("%0.20f" % time.time())) % (2 ** 31)
# The cython bindings require this to be an int, so cast if we can.
try:
seed = int(seed)
except ValueError:
raise ValueError('The \'seed\' parameter must be of type int.')
random = Random()
# Create a random binary filter (boolean SArray), using the same probability across all lines
# that belong to the same session. In expectancy - the desired fraction of the sessions will
# go to the training set.
# Since boolean filters preserve order - there is no need to re-sort the lines within each session.
# The boolean filter is a pseudorandom function of the session_id and the
# global seed above, allowing the train-test split to vary across runs using
# the same dataset.
def random_session_pick(session_id_hash):
random.seed(session_id_hash)
return random.uniform(0, 1) < fraction
chosen_filter = dataset[session_id].hash(seed).apply(random_session_pick)
train = dataset[chosen_filter]
valid = dataset[1 - chosen_filter]
return train, valid |
<SYSTEM_TASK:>
Reads the MS Build XML file at the path and returns its contents.
<END_TASK>
<USER_TASK:>
Description:
def read_msbuild_xml(path, values={}):
"""Reads the MS Build XML file at the path and returns its contents.
Keyword arguments:
values -- The map to append the contents to (default {})
""" |
# Attempt to read the file contents
try:
document = parse(path)
except Exception as e:
logging.exception('Could not read MS Build XML file at %s', path)
return values
# Convert the XML to JSON format
logging.info('Processing MS Build XML file at %s', path)
# Get the rule node
rule = document.getElementsByTagName('Rule')[0]
rule_name = rule.attributes['Name'].value
logging.info('Found rules for %s', rule_name)
# Proprocess Argument values
__preprocess_arguments(rule)
# Get all the values
converted_values = []
__convert(rule, 'EnumProperty', converted_values, __convert_enum)
__convert(rule, 'BoolProperty', converted_values, __convert_bool)
__convert(rule, 'StringListProperty', converted_values,
__convert_string_list)
__convert(rule, 'StringProperty', converted_values, __convert_string)
__convert(rule, 'IntProperty', converted_values, __convert_string)
values[rule_name] = converted_values
return values |
<SYSTEM_TASK:>
Reads the MS Build JSON file at the path and returns its contents.
<END_TASK>
<USER_TASK:>
Description:
def read_msbuild_json(path, values=[]):
"""Reads the MS Build JSON file at the path and returns its contents.
Keyword arguments:
values -- The list to append the contents to (default [])
""" |
if not os.path.exists(path):
logging.info('Could not find MS Build JSON file at %s', path)
return values
try:
values.extend(__read_json_file(path))
except Exception as e:
logging.exception('Could not read MS Build JSON file at %s', path)
return values
logging.info('Processing MS Build JSON file at %s', path)
return values |
<SYSTEM_TASK:>
Merges the values between the current and previous run of the script.
<END_TASK>
<USER_TASK:>
Description:
def __merge_json_values(current, previous):
"""Merges the values between the current and previous run of the script.""" |
for value in current:
name = value['name']
# Find the previous value
previous_value = __find_and_remove_value(previous, value)
if previous_value is not None:
flags = value['flags']
previous_flags = previous_value['flags']
if flags != previous_flags:
logging.warning(
'Flags for %s are different. Using previous value.', name)
value['flags'] = previous_flags
else:
logging.warning('Value %s is a new value', name)
for value in previous:
name = value['name']
logging.warning(
'Value %s not present in current run. Appending value.', name)
current.append(value) |
<SYSTEM_TASK:>
Finds the value in the list that corresponds with the value of compare.
<END_TASK>
<USER_TASK:>
Description:
def __find_and_remove_value(list, compare):
"""Finds the value in the list that corresponds with the value of compare.""" |
# next throws if there are no matches
try:
found = next(value for value in list
if value['name'] == compare['name'] and value['switch'] ==
compare['switch'])
except:
return None
list.remove(found)
return found |
<SYSTEM_TASK:>
Converts the tag type found in the root and converts them using the func
<END_TASK>
<USER_TASK:>
Description:
def __convert(root, tag, values, func):
"""Converts the tag type found in the root and converts them using the func
and appends them to the values.
""" |
elements = root.getElementsByTagName(tag)
for element in elements:
converted = func(element)
# Append to the list
__append_list(values, converted) |
<SYSTEM_TASK:>
Modifies the flags in value if the node contains an Argument.
<END_TASK>
<USER_TASK:>
Description:
def __with_argument(node, value):
"""Modifies the flags in value if the node contains an Argument.""" |
arguments = node.getElementsByTagName('Argument')
if arguments:
logging.debug('Found argument within %s', value['name'])
value['flags'] = vsflags(VSFlags.UserValueIgnored, VSFlags.Continue) |
<SYSTEM_TASK:>
Preprocesses occurrences of Argument within the root.
<END_TASK>
<USER_TASK:>
Description:
def __preprocess_arguments(root):
"""Preprocesses occurrences of Argument within the root.
Argument XML values reference other values within the document by name. The
referenced value does not contain a switch. This function will add the
switch associated with the argument.
""" |
# Set the flags to require a value
flags = ','.join(vsflags(VSFlags.UserValueRequired))
# Search through the arguments
arguments = root.getElementsByTagName('Argument')
for argument in arguments:
reference = __get_attribute(argument, 'Property')
found = None
# Look for the argument within the root's children
for child in root.childNodes:
# Ignore Text nodes
if isinstance(child, Element):
name = __get_attribute(child, 'Name')
if name == reference:
found = child
break
if found is not None:
logging.info('Found property named %s', reference)
# Get the associated switch
switch = __get_attribute(argument.parentNode, 'Switch')
# See if there is already a switch associated with the element.
if __get_attribute(found, 'Switch'):
logging.debug('Copying node %s', reference)
clone = found.cloneNode(True)
root.insertBefore(clone, found)
found = clone
found.setAttribute('Switch', switch)
found.setAttribute('Flags', flags)
else:
logging.warning('Could not find property named %s', reference) |
<SYSTEM_TASK:>
Retrieves the attribute of the given name from the node.
<END_TASK>
<USER_TASK:>
Description:
def __get_attribute(node, name, default_value=''):
"""Retrieves the attribute of the given name from the node.
If not present then the default_value is used.
""" |
if node.hasAttribute(name):
return node.attributes[name].value.strip()
else:
return default_value |
<SYSTEM_TASK:>
Gets the output path for a file given the toolchain, rule and output_dir
<END_TASK>
<USER_TASK:>
Description:
def __output_path(toolchain, rule, output_dir):
"""Gets the output path for a file given the toolchain, rule and output_dir""" |
filename = '%s_%s.json' % (toolchain, rule)
return os.path.join(output_dir, filename) |
<SYSTEM_TASK:>
Writes a JSON file at the path with the values provided.
<END_TASK>
<USER_TASK:>
Description:
def __write_json_file(path, values):
"""Writes a JSON file at the path with the values provided.""" |
# Sort the keys to ensure ordering
sort_order = ['name', 'switch', 'comment', 'value', 'flags']
sorted_values = [
OrderedDict(
sorted(
value.items(), key=lambda value: sort_order.index(value[0])))
for value in values
]
with open(path, 'w') as f:
json.dump(sorted_values, f, indent=2, separators=(',', ': ')) |
<SYSTEM_TASK:>
Appends the value to the list.
<END_TASK>
<USER_TASK:>
Description:
def __append_list(append_to, value):
"""Appends the value to the list.""" |
if value is not None:
if isinstance(value, list):
append_to.extend(value)
else:
append_to.append(value) |
<SYSTEM_TASK:>
Consumes input extracting definitions.
<END_TASK>
<USER_TASK:>
Description:
def ParseInput(self, a_file):
"""Consumes input extracting definitions.
Args:
a_file: The file like stream to parse.
Raises:
PDDMError if there are any issues.
""" |
input_lines = a_file.read().splitlines()
self.ParseLines(input_lines) |
<SYSTEM_TASK:>
Parses list of lines.
<END_TASK>
<USER_TASK:>
Description:
def ParseLines(self, input_lines):
"""Parses list of lines.
Args:
input_lines: A list of strings of input to parse (no newlines on the
strings).
Raises:
PDDMError if there are any issues.
""" |
current_macro = None
for line in input_lines:
if line.startswith('PDDM-'):
directive = line.split(' ', 1)[0]
if directive == 'PDDM-DEFINE':
name, args = self._ParseDefineLine(line)
if self._macros.get(name):
raise PDDMError('Attempt to redefine macro: "%s"' % line)
current_macro = self.MacroDefinition(name, args)
self._macros[name] = current_macro
continue
if directive == 'PDDM-DEFINE-END':
if not current_macro:
raise PDDMError('Got DEFINE-END directive without an active macro:'
' "%s"' % line)
current_macro = None
continue
raise PDDMError('Hit a line with an unknown directive: "%s"' % line)
if current_macro:
current_macro.AppendLine(line)
continue
# Allow blank lines between macro definitions.
if line.strip() == '':
continue
raise PDDMError('Hit a line that wasn\'t a directive and no open macro'
' definition: "%s"' % line) |
<SYSTEM_TASK:>
Expands the macro reference.
<END_TASK>
<USER_TASK:>
Description:
def Expand(self, macro_ref_str):
"""Expands the macro reference.
Args:
macro_ref_str: String of a macro reference (i.e. foo(a, b)).
Returns:
The text from the expansion.
Raises:
PDDMError if there are any issues.
""" |
match = _MACRO_RE.match(macro_ref_str)
if match is None or match.group(0) != macro_ref_str:
raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str)
if match.group('name') not in self._macros:
raise PDDMError('No macro named "%s".' % match.group('name'))
return self._Expand(match, [], macro_ref_str) |
<SYSTEM_TASK:>
Returns the default property values for the given features.
<END_TASK>
<USER_TASK:>
Description:
def defaults(features):
""" Returns the default property values for the given features.
""" |
assert is_iterable_typed(features, Feature)
# FIXME: should merge feature and property modules.
from . import property
result = []
for f in features:
if not f.free and not f.optional and f.default:
result.append(property.Property(f, f.default))
return result |
<SYSTEM_TASK:>
Returns true iff all elements of names are valid features.
<END_TASK>
<USER_TASK:>
Description:
def valid (names):
""" Returns true iff all elements of names are valid features.
""" |
if isinstance(names, str):
names = [names]
assert is_iterable_typed(names, basestring)
return all(name in __all_features for name in names) |
<SYSTEM_TASK:>
Returns true iff 'value_string' is a value_string
<END_TASK>
<USER_TASK:>
Description:
def is_implicit_value (value_string):
""" Returns true iff 'value_string' is a value_string
of an implicit feature.
""" |
assert isinstance(value_string, basestring)
if value_string in __implicit_features:
return __implicit_features[value_string]
v = value_string.split('-')
if v[0] not in __implicit_features:
return False
feature = __implicit_features[v[0]]
for subvalue in (v[1:]):
if not __find_implied_subfeature(feature, subvalue, v[0]):
return False
return True |
<SYSTEM_TASK:>
Returns the implicit feature associated with the given implicit value.
<END_TASK>
<USER_TASK:>
Description:
def implied_feature (implicit_value):
""" Returns the implicit feature associated with the given implicit value.
""" |
assert isinstance(implicit_value, basestring)
components = implicit_value.split('-')
if components[0] not in __implicit_features:
raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value)
return __implicit_features[components[0]] |
<SYSTEM_TASK:>
Checks if all name is a valid feature. Otherwise, raises an exception.
<END_TASK>
<USER_TASK:>
Description:
def validate_feature (name):
""" Checks if all name is a valid feature. Otherwise, raises an exception.
""" |
assert isinstance(name, basestring)
if name not in __all_features:
raise InvalidFeature ("'%s' is not a valid feature name" % name)
else:
return __all_features[name] |
<SYSTEM_TASK:>
Make all elements of properties corresponding to implicit features
<END_TASK>
<USER_TASK:>
Description:
def expand_subfeatures(properties, dont_validate = False):
"""
Make all elements of properties corresponding to implicit features
explicit, and express all subfeature values as separate properties
in their own right. For example, the property
gcc-2.95.2-linux-x86
might expand to
<toolset>gcc <toolset-version>2.95.2 <toolset-os>linux <toolset-cpu>x86
properties: A sequence with elements of the form
<feature>value-string or just value-string in the
case of implicit features.
: dont_validate: If True, no validation of value string will be done.
""" |
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
assert isinstance(dont_validate, int) # matches bools
result = []
for p in properties:
# Don't expand subfeatures in subfeatures
if p.feature.subfeature:
result.append (p)
else:
result.extend(__expand_subfeatures_aux (p, dont_validate))
return result |
<SYSTEM_TASK:>
Adds the given values to the given feature.
<END_TASK>
<USER_TASK:>
Description:
def extend (name, values):
""" Adds the given values to the given feature.
""" |
assert isinstance(name, basestring)
assert is_iterable_typed(values, basestring)
name = add_grist (name)
__validate_feature (name)
feature = __all_features [name]
if feature.implicit:
for v in values:
if v in __implicit_features:
raise BaseException ("'%s' is already associated with the feature '%s'" % (v, __implicit_features [v]))
__implicit_features[v] = feature
if values and not feature.values and not(feature.free or feature.optional):
# This is the first value specified for this feature,
# take it as default value
feature.set_default(values[0])
feature.add_values(values) |
<SYSTEM_TASK:>
Checks that value-string is a valid value-string for the given feature.
<END_TASK>
<USER_TASK:>
Description:
def validate_value_string (f, value_string):
""" Checks that value-string is a valid value-string for the given feature.
""" |
assert isinstance(f, Feature)
assert isinstance(value_string, basestring)
if f.free or value_string in f.values:
return
values = [value_string]
if f.subfeatures:
if not value_string in f.values and \
not value_string in f.subfeatures:
values = value_string.split('-')
# An empty value is allowed for optional features
if not values[0] in f.values and \
(values[0] or not f.optional):
raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values))
for v in values [1:]:
# this will validate any subfeature values in value-string
implied_subfeature(f, v, values[0]) |
<SYSTEM_TASK:>
Sets the components of the given composite property.
<END_TASK>
<USER_TASK:>
Description:
def compose (composite_property_s, component_properties_s):
""" Sets the components of the given composite property.
All parameters are <feature>value strings
""" |
from . import property
component_properties_s = to_seq (component_properties_s)
composite_property = property.create_from_string(composite_property_s)
f = composite_property.feature
if len(component_properties_s) > 0 and isinstance(component_properties_s[0], property.Property):
component_properties = component_properties_s
else:
component_properties = [property.create_from_string(p) for p in component_properties_s]
if not f.composite:
raise BaseException ("'%s' is not a composite feature" % f)
if property in __composite_properties:
raise BaseException ('components of "%s" already set: %s' % (composite_property, str (__composite_properties[composite_property])))
if composite_property in component_properties:
raise BaseException ('composite property "%s" cannot have itself as a component' % composite_property)
__composite_properties[composite_property] = component_properties |
<SYSTEM_TASK:>
Returns all values of the given feature specified by the given property set.
<END_TASK>
<USER_TASK:>
Description:
def get_values (feature, properties):
""" Returns all values of the given feature specified by the given property set.
""" |
if feature[0] != '<':
feature = '<' + feature + '>'
result = []
for p in properties:
if get_grist (p) == feature:
result.append (replace_grist (p, ''))
return result |
<SYSTEM_TASK:>
Expand all composite properties in the set so that all components
<END_TASK>
<USER_TASK:>
Description:
def expand_composites (properties):
""" Expand all composite properties in the set so that all components
are explicitly expressed.
""" |
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
explicit_features = set(p.feature for p in properties)
result = []
# now expand composite features
for p in properties:
expanded = expand_composite(p)
for x in expanded:
if not x in result:
f = x.feature
if f.free:
result.append (x)
elif not x in properties: # x is the result of expansion
if not f in explicit_features: # not explicitly-specified
if any(r.feature == f for r in result):
raise FeatureConflict(
"expansions of composite features result in "
"conflicting values for '%s'\nvalues: '%s'\none contributing composite property was '%s'" %
(f.name, [r.value for r in result if r.feature == f] + [x.value], p))
else:
result.append (x)
elif any(r.feature == f for r in result):
raise FeatureConflict ("explicitly-specified values of non-free feature '%s' conflict\n"
"existing values: '%s'\nvalue from expanding '%s': '%s'" % (f,
[r.value for r in result if r.feature == f], p, x.value))
else:
result.append (x)
return result |
<SYSTEM_TASK:>
Return true iff f is an ordinary subfeature of the parent_property's
<END_TASK>
<USER_TASK:>
Description:
def is_subfeature_of (parent_property, f):
""" Return true iff f is an ordinary subfeature of the parent_property's
feature, or if f is a subfeature of the parent_property's feature
specific to the parent_property's value.
""" |
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(f, Feature)
if not f.subfeature:
return False
p = f.parent
if not p:
return False
parent_feature = p[0]
parent_value = p[1]
if parent_feature != parent_property.feature:
return False
if parent_value and parent_value != parent_property.value:
return False
return True |
<SYSTEM_TASK:>
As is_subfeature_of, for subproperties.
<END_TASK>
<USER_TASK:>
Description:
def __is_subproperty_of (parent_property, p):
""" As is_subfeature_of, for subproperties.
""" |
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert isinstance(p, Property)
return is_subfeature_of (parent_property, p.feature) |
<SYSTEM_TASK:>
Given a property set which may consist of composite and implicit
<END_TASK>
<USER_TASK:>
Description:
def expand (properties):
""" Given a property set which may consist of composite and implicit
properties and combined subfeature values, returns an expanded,
normalized property set with all implicit features expressed
explicitly, all subfeature values individually expressed, and all
components of composite properties expanded. Non-free features
directly expressed in the input properties cause any values of
those features due to composite feature expansion to be dropped. If
two values of a given non-free feature are directly expressed in the
input, an error is issued.
""" |
if __debug__:
from .property import Property
assert is_iterable_typed(properties, Property)
expanded = expand_subfeatures(properties)
return expand_composites (expanded) |
<SYSTEM_TASK:>
Combine all subproperties into their parent properties
<END_TASK>
<USER_TASK:>
Description:
def compress_subproperties (properties):
""" Combine all subproperties into their parent properties
Requires: for every subproperty, there is a parent property. All
features are explicitly expressed.
This rule probably shouldn't be needed, but
build-request.expand-no-defaults is being abused for unintended
purposes and it needs help
""" |
from .property import Property
assert is_iterable_typed(properties, Property)
result = []
matched_subs = set()
all_subs = set()
for p in properties:
f = p.feature
if not f.subfeature:
subs = [x for x in properties if is_subfeature_of(p, x.feature)]
if subs:
matched_subs.update(subs)
subvalues = '-'.join (sub.value for sub in subs)
result.append(Property(
p.feature, p.value + '-' + subvalues,
p.condition))
else:
result.append(p)
else:
all_subs.add(p)
# TODO: this variables are used just for debugging. What's the overhead?
assert all_subs == matched_subs
return result |
<SYSTEM_TASK:>
Given a property, return the subset of features consisting of all
<END_TASK>
<USER_TASK:>
Description:
def __select_subfeatures (parent_property, features):
""" Given a property, return the subset of features consisting of all
ordinary subfeatures of the property's feature, and all specific
subfeatures of the property's feature which are conditional on the
property's value.
""" |
if __debug__:
from .property import Property
assert isinstance(parent_property, Property)
assert is_iterable_typed(features, Feature)
return [f for f in features if is_subfeature_of (parent_property, f)] |
<SYSTEM_TASK:>
Returns the description and output type for a given interpretation.
<END_TASK>
<USER_TASK:>
Description:
def _get_interpretation_description_and_output_type(interpretation, dtype):
"""
Returns the description and output type for a given interpretation.
""" |
type_string = dtype.__name__
name = "%s__%s" % (interpretation, type_string)
if not hasattr(_interpretations_class, name):
raise ValueError("No transform available for type '%s' with interpretation '%s'."
% (type_string, interpretation))
# Need unbound method to get the attributes
func = getattr(_interpretations_class, name)
return func.description, func.output_type |
<SYSTEM_TASK:>
Returns a list of the available interpretations and what they do.
<END_TASK>
<USER_TASK:>
Description:
def _get_embeddable_interpretation_doc(indent = 0):
"""
Returns a list of the available interpretations and what they do.
If indent is specified, then the entire doc string is indented by that amount.
""" |
output_rows = []
# Pull out the doc string and put it in a table.
for name in sorted(dir(_interpretations)):
if name.startswith("_") or "__" not in name:
continue
interpretation, type_str = name.split("__")
func = getattr(_interpretations, name)
output_rows.append("%s (%s type):" % (interpretation, type_str))
output_rows += [(" " + line) for line in _textwrap.dedent(func.__doc__).strip().split("\n")]
output_rows.append("")
return "\n".join(" "*indent + line for line in output_rows) |
<SYSTEM_TASK:>
A function to load a previously saved SentenceSplitter instance.
<END_TASK>
<USER_TASK:>
Description:
def _load_version(cls, unpickler, version):
"""
A function to load a previously saved SentenceSplitter instance.
Parameters
----------
unpickler : GLUnpickler
A GLUnpickler file handler.
version : int
Version number maintained by the class writer.
""" |
state, _exclude, _features = unpickler.load()
features = state['features']
excluded_features = state['excluded_features']
model = cls.__new__(cls)
model._setup()
model.__proxy__.update(state)
model._exclude = _exclude
model._features = _features
return model |
<SYSTEM_TASK:>
Fits the transformer using the given data.
<END_TASK>
<USER_TASK:>
Description:
def fit(self, data):
"""
Fits the transformer using the given data.
""" |
_raise_error_if_not_sframe(data, "data")
fitted_state = {}
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state['features'] = feature_columns
fitted_state['fitted'] = True
self.__proxy__.update(fitted_state)
return self |
<SYSTEM_TASK:>
Transforms short text into a dictionary of TFIDF-weighted 3-gram
<END_TASK>
<USER_TASK:>
Description:
def short_text__str(self, column_name, output_column_prefix):
"""
Transforms short text into a dictionary of TFIDF-weighted 3-gram
character counts.
""" |
from ._ngram_counter import NGramCounter
from ._tfidf import TFIDF
return [NGramCounter(features=[column_name],
n = 3,
method = "character",
output_column_prefix = output_column_prefix),
TFIDF(features=[column_name],
min_document_frequency=0.01,
max_document_frequency=0.5,
output_column_prefix = output_column_prefix)] |
<SYSTEM_TASK:>
Interprets an integer column as a categorical variable.
<END_TASK>
<USER_TASK:>
Description:
def categorical__int(self, column_name, output_column_prefix):
"""
Interprets an integer column as a categorical variable.
""" |
return [_ColumnFunctionTransformation(
features = [column_name],
output_column_prefix = output_column_prefix,
transform_function = lambda col: col.astype(str),
transform_function_name = "astype(str)")] |
<SYSTEM_TASK:>
Sets up the content transforms.
<END_TASK>
<USER_TASK:>
Description:
def _setup_from_data(self, data):
"""
Sets up the content transforms.
""" |
fitted_state = {}
_raise_error_if_not_of_type(data, [_SFrame])
feature_columns = _internal_utils.get_column_names(data, self._exclude, self._features)
if not feature_columns:
raise RuntimeError("No valid feature columns specified in transformation.")
fitted_state["features"] = feature_columns
################################################################################
# Helper functions
def get_valid_interpretations():
return list(n.split("__")[0] for n in dir(_interpretations) if not n.startswith("_"))
################################################################################
# Check input data.
if not isinstance(data, _SFrame):
raise TypeError("`data` parameter must be an SFrame.")
all_col_names = set(feature_columns)
column_interpretations = self._get("column_interpretations").copy()
# Make sure all the interpretations are valid.
for k, v in column_interpretations.items():
if k not in all_col_names:
raise ValueError("Column '%s' in column_interpretations, but not found in `data`." % k)
# Get the automatic column interpretations.
for col_name in feature_columns:
if col_name not in column_interpretations:
n = column_interpretations[col_name] = infer_column_interpretation(data[col_name])
if n.startswith("unknown"):
raise ValueError("Interpretation inference failed on column '%s'; %s"
% (col_name, n[len("unknown"):].strip()))
# Now, build up the feature transforms.
transforms = {}
input_types = {}
output_column_prefix = self._get("output_column_prefix")
assert output_column_prefix is None or type(output_column_prefix) is str
tr_chain = []
for col_name in feature_columns:
in_type = input_types[col_name] = data[col_name].dtype
intr_func = _get_interpretation_function(column_interpretations[col_name], in_type)
tr_list = intr_func(col_name, output_column_prefix)
transforms[col_name] = tr_list
tr_chain += tr_list
fitted_state["transform_chain"] = _TransformerChain(tr_chain)
fitted_state["transforms"] = transforms
fitted_state["input_types"] = input_types
fitted_state["column_interpretations"] = column_interpretations
self.__proxy__.update(fitted_state) |
<SYSTEM_TASK:>
Fits and transforms the SFrame `data` using a fitted model.
<END_TASK>
<USER_TASK:>
Description:
def fit_transform(self, data):
"""
Fits and transforms the SFrame `data` using a fitted model.
Parameters
----------
data : SFrame
The data to be transformed.
Returns
-------
A transformed SFrame.
Returns
-------
out: SFrame
A transformed SFrame.
See Also
--------
fit, transform
""" |
self._setup_from_data(data)
ret = self.transform_chain.fit_transform(data)
self.__proxy__.update({"fitted" : True})
return ret |
<SYSTEM_TASK:>
Create a new mock object.
<END_TASK>
<USER_TASK:>
Description:
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
""" |
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock |
<SYSTEM_TASK:>
Replace a method, attribute, etc. with a Mock.
<END_TASK>
<USER_TASK:>
Description:
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
""" |
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub) |
<SYSTEM_TASK:>
Verify that all of the expected calls have been made.
<END_TASK>
<USER_TASK:>
Description:
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
""" |
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue) |
<SYSTEM_TASK:>
Verify the called method is expected.
<END_TASK>
<USER_TASK:>
Description:
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
""" |
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected |
<SYSTEM_TASK:>
Returns a possible group from the end of the call queue or None if no
<END_TASK>
<USER_TASK:>
Description:
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
""" |
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group |
<SYSTEM_TASK:>
Check to see if the RHS is an instance of class_name.
<END_TASK>
<USER_TASK:>
Description:
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
""" |
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name) |
<SYSTEM_TASK:>
Check to see if RHS is almost equal to float_value
<END_TASK>
<USER_TASK:>
Description:
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
""" |
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False |
<SYSTEM_TASK:>
Check to see whether actual_seq has same elements as expected_seq.
<END_TASK>
<USER_TASK:>
Description:
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
""" |
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual |
<SYSTEM_TASK:>
Checks whether any Comparator is equal to rhs.
<END_TASK>
<USER_TASK:>
Description:
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
""" |
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False |
<SYSTEM_TASK:>
Return True if all methods in this group are called at least once.
<END_TASK>
<USER_TASK:>
Description:
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once.""" |
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False |
<SYSTEM_TASK:>
Common utilities to set the regression interface params.
<END_TASK>
<USER_TASK:>
Description:
def set_classifier_interface_params(spec, features, class_labels,
model_accessor_for_class_labels, output_features = None):
"""
Common utilities to set the regression interface params.
""" |
# Normalize the features list.
features = _fm.process_or_validate_features(features)
if class_labels is None:
raise ValueError("List of class labels must be provided.")
n_classes = len(class_labels)
output_features = _fm.process_or_validate_classifier_output_features(output_features, class_labels)
if len(output_features) == 1:
predicted_class_output, pred_cl_type = output_features[0]
score_output = None
elif len(output_features) == 2:
predicted_class_output, pred_cl_type = output_features[0]
score_output, score_output_type = output_features[1]
else:
raise ValueError("Provided output classes for a classifier must be "
"a list of features, predicted class and (optionally) class_score.")
spec.description.predictedFeatureName = predicted_class_output
# Are they out of order?
if not (pred_cl_type == datatypes.Int64() or pred_cl_type == datatypes.String()):
raise ValueError("Provided predicted class output type not Int64 or String (%s)."
% repr(pred_cl_type))
if score_output is not None:
if not isinstance(score_output_type, datatypes.Dictionary):
raise ValueError("Provided class score output type not a Dictionary (%s)."
% repr(score_output_type))
if score_output_type.key_type != pred_cl_type:
raise ValueError(("Provided class score output (%s) key_type (%s) does not "
"match type of class prediction (%s).")
% (score_output, repr(score_output_type.key_type), repr(pred_cl_type)))
spec.description.predictedProbabilitiesName = score_output
# add input
for index, (cur_input_name, input_type) in enumerate(features):
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, input_type)
# add output
for index, (cur_output_name, output_type) in enumerate(output_features):
output_ = spec.description.output.add()
output_.name = cur_output_name
datatypes._set_datatype(output_.type, output_type)
# Worry about the class labels
if pred_cl_type == datatypes.String():
try:
for c in class_labels:
getattr(spec, model_accessor_for_class_labels).stringClassLabels.vector.append(str(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
pass
else:
for c in class_labels:
conv_error = False
try:
if not (int(c) == c):
conv_error = True
except:
conv_error = True
if conv_error:
raise TypeError(("Cannot cast '%s' class to an int type " % str(c))
+ "(class type determined by type of first class).")
try:
getattr(spec, model_accessor_for_class_labels).int64ClassLabels.vector.append(int(c))
# Not all the classifiers have class labels; in particular the pipeline
# classifier. Thus it's not an error if we can't actually set them.
except AttributeError:
break
# And we are done!
return spec |
<SYSTEM_TASK:>
Common utilities to set the regressor interface params.
<END_TASK>
<USER_TASK:>
Description:
def set_regressor_interface_params(spec, features, output_features):
""" Common utilities to set the regressor interface params.
""" |
if output_features is None:
output_features = [("predicted_class", datatypes.Double())]
else:
output_features = _fm.process_or_validate_features(output_features, 1)
if len(output_features) != 1:
raise ValueError("Provided output features for a regressor must be "
"one Double feature.")
if output_features[0][1] != datatypes.Double():
raise ValueError("Output type of a regressor must be a Double.")
prediction_name = output_features[0][0]
spec.description.predictedFeatureName = prediction_name
# Normalize the features list.
features = _fm.process_or_validate_features(features)
# add input and output features
for cur_input_name, feature_type in features:
input_ = spec.description.input.add()
input_.name = cur_input_name
datatypes._set_datatype(input_.type, feature_type)
output_ = spec.description.output.add()
output_.name = prediction_name
datatypes._set_datatype(output_.type, 'Double')
return spec |
<SYSTEM_TASK:>
Common utilities to set transform interface params.
<END_TASK>
<USER_TASK:>
Description:
def set_transform_interface_params(spec, input_features, output_features, are_optional = False):
""" Common utilities to set transform interface params.
""" |
input_features = _fm.process_or_validate_features(input_features)
output_features = _fm.process_or_validate_features(output_features)
# Add input and output features
for (fname, ftype) in input_features:
input_ = spec.description.input.add()
input_.name = fname
datatypes._set_datatype(input_.type, ftype)
if are_optional:
input_.type.isOptional = are_optional
for (fname, ftype) in output_features:
output_ = spec.description.output.add()
output_.name = fname
datatypes._set_datatype(output_.type, ftype)
return spec |
<SYSTEM_TASK:>
Loads into numpy array from SFrame, assuming SFrame stores data flattened
<END_TASK>
<USER_TASK:>
Description:
def _load_into_numpy(sf, np_array, start, end, strides=None, shape=None):
"""Loads into numpy array from SFrame, assuming SFrame stores data flattened""" |
np_array[:] = 0.0
np_array_2d = np_array.reshape((np_array.shape[0], np_array.shape[1] * np_array.shape[2]))
_extensions.sframe_load_to_numpy(sf, np_array.ctypes.data,
np_array_2d.strides, np_array_2d.shape,
start, end) |
<SYSTEM_TASK:>
Set the inputs of the network spec.
<END_TASK>
<USER_TASK:>
Description:
def set_input(self, input_names, input_dims):
"""
Set the inputs of the network spec.
Parameters
----------
input_names: [str]
List of input names of the network.
input_dims: [tuple]
List of input dimensions of the network. The ordering of input_dims
is the same as input_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec inputs to be 3 dimensional vector data1 and
# 4 dimensional vector data2.
>>> builder.set_input(input_names = ['data1', 'data2'], [(3,), (4,)])
See Also
--------
set_output, set_class_labels
""" |
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(input_dims):
if len(dim) == 3:
input_shape = (dim[0], dim[1], dim[2])
elif len(dim) == 2:
input_shape = (dim[1], )
elif len(dim) == 1:
input_shape = tuple(dim)
else:
raise RuntimeError("Attempting to add a neural network " +
"input with rank " + str(len(dim)) +
". All networks should take inputs of rank 1 or 3.")
spec.description.input[idx].type.multiArrayType.ClearField("shape")
spec.description.input[idx].type.multiArrayType.shape.extend(input_shape)
# TODO: if it's an embedding, this should be integer
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE |
<SYSTEM_TASK:>
Set the outputs of the network spec.
<END_TASK>
<USER_TASK:>
Description:
def set_output(self, output_names, output_dims):
"""
Set the outputs of the network spec.
Parameters
----------
output_names: [str]
List of output names of the network.
output_dims: [tuple]
List of output dimensions of the network. The ordering of output_dims is the same
as output_names.
Examples
--------
.. sourcecode:: python
# Set the neural network spec outputs to be 3 dimensional vector feature1 and
# 4 dimensional vector feature2.
>>> builder.set_output(output_names = ['feature1', 'feature2'], [(3,), (4,)])
See Also
--------
set_input, set_class_labels
""" |
spec = self.spec
nn_spec = self.nn_spec
for idx, dim in enumerate(output_dims):
spec.description.output[idx].type.multiArrayType.ClearField("shape")
spec.description.output[idx].type.multiArrayType.shape.extend(dim)
spec.description.output[idx].type.multiArrayType.dataType = \
_Model_pb2.ArrayFeatureType.DOUBLE |
<SYSTEM_TASK:>
Set class labels to the model spec to make it a neural network classifier.
<END_TASK>
<USER_TASK:>
Description:
def set_class_labels(self, class_labels, predicted_feature_name = 'classLabel', prediction_blob = ''):
"""
Set class labels to the model spec to make it a neural network classifier.
Parameters
----------
class_labels: list[int or str]
A list of integers or strings that map the index of the output of a
neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the
Core ML neural network classifier. Defaults to 'class_output'.
prediction_blob: str
If provided, then this is the name of the neural network blob which
generates the probabilities for each class label (typically the output
of a softmax layer). If not provided, then the last output layer is
assumed.
See Also
--------
set_input, set_output, set_pre_processing_parameters
""" |
spec = self.spec
nn_spec = self.nn_spec
if len(spec.description.output) == 0:
raise ValueError(
"Model should have at least one output (the probabilities) to automatically make it a classifier.")
probOutput = spec.description.output[0]
probOutput.type.dictionaryType.MergeFromString(b'')
if len(class_labels) == 0:
return
class_type = type(class_labels[0])
if class_type not in [int, str]:
raise TypeError("Class labels must be of type Integer or String. (not %s)" % class_type)
spec.description.predictedProbabilitiesName = probOutput.name
spec.description.predictedFeatureName = predicted_feature_name
classLabel = spec.description.output.add()
classLabel.name = predicted_feature_name
if class_type == int:
nn_spec.ClearField('int64ClassLabels')
probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')
classLabel.type.int64Type.MergeFromString(b'')
for c in class_labels:
nn_spec.int64ClassLabels.vector.append(c)
else:
nn_spec.ClearField('stringClassLabels')
probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')
classLabel.type.stringType.MergeFromString(b'')
for c in class_labels:
nn_spec.stringClassLabels.vector.append(c)
if prediction_blob != '':
# correctness here will be checked in the validator -- i.e. to
# make sure this string corresponds to a real blob
nn_spec.labelProbabilityLayerName = prediction_blob
else: #not provided
# assume it's the last blob produced in the network
nn_spec.labelProbabilityLayerName = nn_spec.layers[-1].output[0] |
<SYSTEM_TASK:>
Add optional inputs and outputs to the model spec.
<END_TASK>
<USER_TASK:>
Description:
def add_optionals(self, optionals_in, optionals_out):
"""
Add optional inputs and outputs to the model spec.
Parameters
----------
optionals_in: [str]
List of inputs that are optionals.
optionals_out: [str]
List of outputs that are optionals.
See Also
--------
set_input, set_output
""" |
spec = self.spec
if (not optionals_in) and (not optionals_out):
return
# assuming single sizes here
input_types = [datatypes.Array(dim) for (name, dim) in optionals_in]
output_types = [datatypes.Array(dim) for (name, dim) in optionals_out]
input_names = [str(name) for (name, dim) in optionals_in]
output_names = [str(name) for (name, dim) in optionals_out]
input_features = list(zip(input_names, input_types))
output_features = list(zip(output_names, output_types))
len_before_in = len(spec.description.input)
len_before_out = len(spec.description.output)
# this appends to the existing model interface
set_transform_interface_params(spec, input_features, output_features, True)
# add types for any extra hidden inputs
for idx in range(len_before_in, len(spec.description.input)):
spec.description.input[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE
for idx in range(len_before_out, len(spec.description.output)):
spec.description.output[idx].type.multiArrayType.dataType = _Model_pb2.ArrayFeatureType.DOUBLE |
<SYSTEM_TASK:>
Add an embedding layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_embedding(self, name, W, b, input_dim, output_channels, has_bias,
input_name, output_name):
"""
Add an embedding layer to the model.
Parameters
----------
name: str
The name of this layer
W: numpy.array
Weight matrix of shape (output_channels, input_dim).
b: numpy.array
Bias vector of shape (output_channels, ).
input_dim: int
Size of the vocabulary (1 + maximum integer index of the words).
output_channels: int
Number of output channels.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
- If True, the bias vector of this layer is not ignored.
- If False, the bias vector is ignored.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_inner_product
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
# Fill in the parameters
spec_layer_params = spec_layer.embedding
spec_layer_params.inputDim = input_dim
spec_layer_params.outputChannels = output_channels
spec_layer_params.hasBias = has_bias
weights = spec_layer_params.weights
weights.floatValue.extend(map(float, W.flatten()))
if has_bias:
bias = spec_layer_params.bias
bias.floatValue.extend(map(float, b.flatten())) |
<SYSTEM_TASK:>
Add a softmax layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_softmax(self, name, input_name, output_name):
"""
Add a softmax layer to the model.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_activation, add_inner_product, add_convolution
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.softmax.MergeFromString(b'') |
<SYSTEM_TASK:>
Add an activation layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_activation(self, name, non_linearity, input_name, output_name,
params=None):
"""
Add an activation layer to the model.
Parameters
----------
name: str
The name of this layer
non_linearity: str
The non_linearity (activation) function of this layer.
It can be one of the following:
- 'RELU': Rectified Linear Unit (ReLU) function.
- 'SIGMOID': sigmoid function.
- 'TANH': tanh function.
- 'SCALED_TANH': scaled tanh function, defined as:
`f(x) = alpha * tanh(beta * x)`
where alpha and beta are constant scalars.
- 'SOFTPLUS': softplus function.
- 'SOFTSIGN': softsign function.
- 'SIGMOID_HARD': hard sigmoid function, defined as:
`f(x) = min(max(alpha * x + beta, -1), 1)`
where alpha and beta are constant scalars.
- 'LEAKYRELU': leaky relu function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a constant scalar.
- 'PRELU': Parametric ReLU function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * alpha * x`
where alpha is a multi-dimensional array of same size as x.
- 'ELU': Exponential linear unit function, defined as:
`f(x) = (x >= 0) * x + (x < 0) * (alpha * exp(x) - 1)`
where alpha is a constant scalar.
- 'PARAMETRICSOFTPLUS': Parametric softplus function, defined as:
`f(x) = alpha * log(1 + exp(beta * x))`
where alpha and beta are two multi-dimensional arrays of same size as x.
- 'THRESHOLDEDRELU': Thresholded ReLU function, defined as:
`f(x) = (x >= alpha) * x`
where alpha is a constant scalar.
- 'LINEAR': linear function.
`f(x) = alpha * x + beta`
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
params: [float] | [numpy.array]
Parameters for the activation, depending on non_linearity. Kindly refer to NeuralNetwork.proto for details.
- When non_linearity is one of ['RELU', 'SIGMOID', 'TANH', 'SCALED_TANH', 'SOFTPLUS', 'SOFTSIGN'], params is ignored.
- When non_linearity is one of ['SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'], param is a list of 2 floats
[alpha, beta].
- When non_linearity is one of ['LEAKYRELU', 'ELU', 'THRESHOLDEDRELU'], param is a list of 1 float
[alpha].
- When non_linearity is 'PRELU', param is a list of 1 numpy array [alpha]. The shape of
alpha is (C,), where C is either the number of input channels or
1. When C = 1, same alpha is applied to all channels.
- When non_linearity is 'PARAMETRICSOFTPLUS', param is a list of 2 numpy arrays [alpha,
beta]. The shape of alpha and beta is (C, ), where C is either
the number of input channels or 1. When C = 1, same alpha and
beta are applied to all channels.
See Also
--------
add_convolution, add_softmax
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.activation
# Fill in the parameters
if non_linearity == 'RELU':
spec_layer_params.ReLU.MergeFromString(b'')
elif non_linearity == 'SIGMOID':
spec_layer_params.sigmoid.MergeFromString(b'')
elif non_linearity == 'TANH':
spec_layer_params.tanh.MergeFromString(b'')
elif non_linearity == 'SCALED_TANH':
spec_layer_params.scaledTanh.MergeFromString(b'')
if params is None:
alpha, beta = (0.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.scaledTanh.alpha = alpha
spec_layer_params.scaledTanh.beta = beta
elif non_linearity == 'SOFTPLUS':
spec_layer_params.softplus.MergeFromString(b'')
elif non_linearity == 'SOFTSIGN':
spec_layer_params.softsign.MergeFromString(b'')
elif non_linearity == 'SIGMOID_HARD':
if params is None:
alpha, beta = (0.2, 0.5)
else:
alpha, beta = params[0], params[1]
spec_layer_params.sigmoidHard.alpha = alpha
spec_layer_params.sigmoidHard.beta = beta
elif non_linearity == 'LEAKYRELU':
if params is None:
alpha = 0.3
else:
alpha = params[0]
spec_layer_params.leakyReLU.alpha = float(alpha)
elif non_linearity == 'PRELU':
# PReLU must provide an np array in params[0]
spec_layer_params.PReLU.alpha.floatValue.extend(map(float, params.flatten()))
elif non_linearity == 'ELU':
# ELU must provide an alpha in params[0]
spec_layer_params.ELU.alpha = float(params)
elif non_linearity == 'PARAMETRICSOFTPLUS':
# Parametric softplus must provide two np arrays for alpha and beta
alphas, betas = (params[0], params[1])
# Weight alignment: Keras [H,W,C,F], Espresso [
spec_layer_params.parametricSoftplus.alpha.floatValue.extend(map(float, alphas.flatten()))
spec_layer_params.parametricSoftplus.beta.floatValue.extend(map(float, betas.flatten()))
elif non_linearity == 'THRESHOLDEDRELU':
if params is None:
theta = 1.0
else:
theta = params
spec_layer_params.thresholdedReLU.alpha = float(theta)
elif non_linearity == 'LINEAR':
if params is None:
alpha, beta = (1.0, 0.0)
else:
alpha, beta = params[0], params[1]
spec_layer_params.linear.alpha = alpha
spec_layer_params.linear.beta = beta
else:
raise TypeError("Unknown activation type %s." %(non_linearity)) |
<SYSTEM_TASK:>
Add an element-wise operation layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_elementwise(self, name, input_names, output_name, mode, alpha = None):
"""
Add an element-wise operation layer to the model.
Parameters
----------
The name of this layer
name: str
input_names: [str]
A list of input blob names of this layer. The input blobs should have the same shape.
output_name: str
The output blob name of this layer.
mode: str
A string specifying the mode of the elementwise layer. It can be one of the following:
- 'CONCAT': concatenate input blobs along the channel axis.
- 'SEQUENCE_CONCAT': concatenate input blobs along the sequence axis.
- 'ADD': perform an element-wise summation over the input blobs.
- 'MULTIPLY': perform an element-wise multiplication over the input blobs.
- 'DOT': compute the dot product of the two input blobs. In this mode, the length of input_names should be 2.
- 'COS': compute the cosine similarity of the two input blobs. In this mode, the length of input_names should be 2.
- 'MAX': compute the element-wise maximum over the input blobs.
- 'MIN': compute the element-wise minimum over the input blobs.
- 'AVE': compute the element-wise average over the input blobs.
alpha: float
if mode == 'ADD' and there is only one input_name, alpha is added to the input
if mode == 'MULTIPLY' and there is only one input_name, alpha is multiplied to the input
See Also
--------
add_upsample, add_sequence_repeat
""" |
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
if isinstance(input_names, list):
for input_name in input_names:
spec_layer.input.append(input_name)
else:
spec_layer.input.append(input_names)
spec_layer.output.append(output_name)
## Add the following layers.
if mode == 'CONCAT':
spec_layer.concat.sequenceConcat = False
elif mode == 'SEQUENCE_CONCAT':
spec_layer.concat.sequenceConcat = True
elif mode == 'ADD':
spec_layer.add.MergeFromString(b'')
if alpha:
spec_layer.add.alpha = alpha
elif mode == 'MULTIPLY':
spec_layer.multiply.MergeFromString(b'')
if alpha:
spec_layer.multiply.alpha = alpha
elif mode == 'COS':
spec_layer.dot.cosineSimilarity = True
elif mode == 'DOT':
spec_layer.dot.cosineSimilarity = False
elif mode == 'MAX':
spec_layer.max.MergeFromString(b'')
elif mode == 'MIN':
spec_layer.min.MergeFromString(b'')
elif mode == 'AVE':
spec_layer.average.MergeFromString(b'')
else:
raise ValueError("Unsupported elementwise mode %s" % mode) |
<SYSTEM_TASK:>
Add upsample layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_upsample(self, name, scaling_factor_h, scaling_factor_w, input_name, output_name, mode = 'NN'):
"""
Add upsample layer to the model.
Parameters
----------
name: str
The name of this layer.
scaling_factor_h: int
Scaling factor on the vertical direction.
scaling_factor_w: int
Scaling factor on the horizontal direction.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
Following values are supported:
'NN': nearest neighbour
'BILINEAR' : bilinear interpolation
See Also
--------
add_sequence_repeat, add_elementwise
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new inner-product layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.upsample
spec_layer_params.scalingFactor.append(scaling_factor_h)
spec_layer_params.scalingFactor.append(scaling_factor_w)
if mode == 'NN':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('NN')
elif mode == 'BILINEAR':
spec_layer_params.mode = _NeuralNetwork_pb2.UpsampleLayerParams.InterpolationMode.Value('BILINEAR')
else:
raise ValueError("Unsupported upsampling mode %s" % mode) |
<SYSTEM_TASK:>
Add scale layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_scale(self, name, W, b, has_bias, input_name, output_name, shape_scale = [1], shape_bias = [1]):
"""
Add scale layer to the model.
Parameters
----------
name: str
The name of this layer.
W: int | numpy.array
Scale of the input.
b: int | numpy.array
Bias to add to the input.
has_bias: boolean
Whether the bias vector of this layer is ignored in the spec.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_scale: [int]
List of ints that specifies the shape of the scale parameter. Can be [1] or [C] or [1,H,W] or [C,H,W].
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_bias
""" |
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.scale
spec_layer_params.hasBias = has_bias
#add scale and its shape
scale = spec_layer_params.scale
spec_layer_params.shapeScale.extend(shape_scale)
if isinstance(W, int):
scale.floatValue.append(float(W))
else:
scale.floatValue.extend(map(float, W.flatten()))
if len(scale.floatValue) != np.prod(shape_scale):
raise ValueError("Dimensions of 'shape_scale' do not match the size of the provided 'scale' parameter")
#add bias and its shape
if has_bias:
bias = spec_layer_params.bias
spec_layer_params.shapeBias.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") |
<SYSTEM_TASK:>
Add bias layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_bias(self, name, b, input_name, output_name, shape_bias = [1]):
"""
Add bias layer to the model.
Parameters
----------
name: str
The name of this layer.
b: int | numpy.array
Bias to add to the input.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
shape_bias: [int]
List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W].
See Also
--------
add_scale
""" |
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.bias
#add bias and its shape
bias = spec_layer_params.bias
spec_layer_params.shape.extend(shape_bias)
if isinstance(b, int):
bias.floatValue.append(float(b))
else:
bias.floatValue.extend(map(float, b.flatten()))
if len(bias.floatValue) != np.prod(shape_bias):
raise ValueError("Dimensions of 'shape_bias' do not match the size of the provided 'b' parameter") |
<SYSTEM_TASK:>
Add sequence repeat layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_sequence_repeat(self, name, nrep, input_name, output_name):
"""
Add sequence repeat layer to the model.
Parameters
----------
name: str
The name of this layer.
nrep: int
Number of repetitions of the input blob along the sequence axis.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_upsample, add_elementwise
""" |
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.sequenceRepeat
spec_layer_params.nRepetitions = nrep |
<SYSTEM_TASK:>
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
<END_TASK>
<USER_TASK:>
Description:
def add_padding(self, name,
left = 0, right = 0, top = 0, bottom = 0,
value = 0,
input_name = 'data', output_name = 'out',
padding_type = 'constant'):
"""
Add a padding layer to the model. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
left: int
Number of elements to be padded on the left side of the input blob.
right: int
Number of elements to be padded on the right side of the input blob.
top: int
Number of elements to be padded on the top of the input blob.
bottom: int
Number of elements to be padded on the bottom of the input blob.
value: float
Value of the elements padded. Used only when padding_type = 'constant'
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
padding_type: str
Type of the padding. Can be one of 'constant', 'reflection' or 'replication'
See Also
--------
add_crop, add_convolution, add_pooling
""" |
# Currently only constant padding is supported.
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.padding
# Set the parameters
if padding_type == 'constant':
spec_layer_params.constant.value = value
elif padding_type == 'reflection':
spec_layer_params.reflection.MergeFromString(b'')
elif padding_type == 'replication':
spec_layer_params.replication.MergeFromString(b'')
else:
raise ValueError("Unknown padding_type %s" %(padding_type))
height_border = spec_layer_params.paddingAmounts.borderAmounts.add()
height_border.startEdgeSize = top
height_border.endEdgeSize = bottom
width_border = spec_layer_params.paddingAmounts.borderAmounts.add()
width_border.startEdgeSize = left
width_border.endEdgeSize = right |
<SYSTEM_TASK:>
Add a simple recurrent layer to the model.
<END_TASK>
<USER_TASK:>
Description:
def add_simple_rnn(self,name, W_h, W_x, b, hidden_size, input_size, activation, input_names, output_names, output_all = False, reverse_input = False):
"""
Add a simple recurrent layer to the model.
Parameters
----------
name: str
The name of this layer.
W_h: numpy.array
Weights of the recurrent layer's hidden state. Must be of shape (hidden_size, hidden_size).
W_x: numpy.array
Weights of the recurrent layer's input. Must be of shape (hidden_size, input_size).
b: numpy.array | None
Bias of the recurrent layer's output. If None, bias is ignored. Otherwise it must be of shape (hidden_size, ).
hidden_size: int
Number of hidden units. This is equal to the number of channels of output shape.
input_size: int
Number of the number of channels of input shape.
activation: str
Activation function name. Can be one of the following option:
['RELU', 'TANH', 'SIGMOID', 'SCALED_TANH', 'SIGMOID_HARD', 'LINEAR'].
See add_activation for more detailed description.
input_names: [str]
The input blob name list of this layer, in the order of [x, h_input].
output_name: [str]
The output blob name list of this layer, in the order of [y, h_output].
output_all: boolean
Whether the recurrent layer should output at every time step.
- If False, the output is the result after the final state update.
- If True, the output is a sequence, containing outputs at all time steps.
reverse_input: boolean
Whether the recurrent layer should process the input sequence in the reverse order.
- If False, the input sequence order is not reversed.
- If True, the input sequence order is reversed.
See Also
--------
add_activation, add_gru, add_unilstm, add_bidirlstm
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new Layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for name in input_names:
spec_layer.input.append(name)
for name in output_names:
spec_layer.output.append(name)
spec_layer_params = spec_layer.simpleRecurrent
spec_layer_params.reverseInput = reverse_input
#set the parameters
spec_layer_params.inputVectorSize = input_size
spec_layer_params.outputVectorSize = hidden_size
if b is not None:
spec_layer_params.hasBiasVector = True
spec_layer_params.sequenceOutput = output_all
activation_f = spec_layer_params.activation
_set_recurrent_activation(activation_f, activation)
# Write the weights
spec_layer_params.weightMatrix.floatValue.extend(map(float, W_x.flatten()))
spec_layer_params.recursionMatrix.floatValue.extend(map(float, W_h.flatten()))
if b is not None:
spec_layer_params.biasVector.floatValue.extend(map(float, b.flatten())) |
<SYSTEM_TASK:>
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
<END_TASK>
<USER_TASK:>
Description:
def add_flatten(self, name, mode, input_name, output_name):
"""
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is.
Parameters
----------
name: str
The name of this layer.
mode: int
- If mode == 0, the flatten layer is in CHANNEL_FIRST mode.
- If mode == 1, the flatten layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_permute, add_reshape
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.flatten
# Set the parameters
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_FIRST')
elif mode == 1:
spec_layer_params.mode = \
_NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value('CHANNEL_LAST')
else:
raise NotImplementedError(
'Unknown flatten mode %d ' % mode) |
<SYSTEM_TASK:>
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
<END_TASK>
<USER_TASK:>
Description:
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2):
"""
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
mode: str
- If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension.
Input is spatially divided into non-overlapping blocks of size block_size X block_size
and data from each block is moved to the channel dimension.
Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size].
- If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension.
Reverse of the operation 'SPACE_TO_DEPTH'.
Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size].
block_size: int
Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size)
must divide C when mode is 'DEPTH_TO_SPACE'.
See Also
--------
add_flatten, add_reshape
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reorganizeData
# Set the parameters
if block_size < 2:
raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size)
spec_layer_params.blockSize = block_size
if mode == 'SPACE_TO_DEPTH':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH')
elif mode == 'DEPTH_TO_SPACE':
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE')
else:
raise NotImplementedError(
'Unknown reorganization mode %s ' % mode) |
<SYSTEM_TASK:>
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details.
<END_TASK>
<USER_TASK:>
Description:
def add_reshape(self, name, input_name, output_name, target_shape, mode):
"""
Add a reshape layer. Kindly refer to NeuralNetwork.proto for details.
Parameters
----------
name: str
The name of this layer.
target_shape: tuple
Shape of the output blob. The product of target_shape must be equal
to the shape of the input blob.
Can be either length 3 (C,H,W) or length 4 (Seq,C,H,W).
mode: int
- If mode == 0, the reshape layer is in CHANNEL_FIRST mode.
- If mode == 1, the reshape layer is in CHANNEL_LAST mode.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
See Also
--------
add_flatten, add_permute
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.reshape
spec_layer_params.targetShape.extend(target_shape)
if mode == 0:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_FIRST')
else:
spec_layer_params.mode = \
_NeuralNetwork_pb2.ReshapeLayerParams.ReshapeOrder.Value('CHANNEL_LAST')
if len(target_shape) != 4 and len(target_shape) != 3:
raise ValueError("Length of the 'target-shape' parameter must be equal to 3 or 4") |
<SYSTEM_TASK:>
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
<END_TASK>
<USER_TASK:>
Description:
def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon |
<SYSTEM_TASK:>
Add a Split layer that uniformly splits the input along the channel dimension
<END_TASK>
<USER_TASK:>
Description:
def add_split(self, name, input_name, output_names):
"""
Add a Split layer that uniformly splits the input along the channel dimension
to produce multiple outputs.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_names: [str]
List of output blob names of this layer.
See Also
--------
add_elementwise
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.extend(output_names)
spec_layer_params = spec_layer.split
spec_layer_params.nOutputs = len(output_names) |
<SYSTEM_TASK:>
Add a load constant layer.
<END_TASK>
<USER_TASK:>
Description:
def add_load_constant(self, name, output_name, constant_value, shape):
"""
Add a load constant layer.
Parameters
----------
name: str
The name of this layer.
output_name: str
The output blob name of this layer.
constant_value: numpy.array
value of the constant as a numpy array.
shape: [int]
List of ints representing the shape of the constant. Must be of length 3: [C,H,W]
See Also
--------
add_elementwise
""" |
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.loadConstant
data = spec_layer_params.data
data.floatValue.extend(map(float, constant_value.flatten()))
spec_layer_params.shape.extend(shape)
if len(data.floatValue) != np.prod(shape):
raise ValueError("Dimensions of 'shape' do not match the size of the provided constant")
if len(shape) != 3:
raise ValueError("'shape' must be of length 3") |
<SYSTEM_TASK:>
Add a custom layer.
<END_TASK>
<USER_TASK:>
Description:
def add_custom(self, name, input_names, output_names, custom_proto_spec = None):
"""
Add a custom layer.
Parameters
----------
name: str
The name of this layer.
input_names: [str]
The input blob names to this layer.
output_names: [str]
The output blob names from this layer.
custom_proto_spec: CustomLayerParams
A protobuf CustomLayerParams message. This can also be left blank and filled in later.
""" |
spec = self.spec
nn_spec = self.nn_spec
# custom layers require a newer specification version
from coremltools import _MINIMUM_CUSTOM_LAYER_SPEC_VERSION
spec.specificationVersion = max(spec.specificationVersion, _MINIMUM_CUSTOM_LAYER_SPEC_VERSION)
spec_layer = nn_spec.layers.add()
spec_layer.name = name
for inname in input_names:
spec_layer.input.append(inname)
for outname in output_names:
spec_layer.output.append(outname)
# Have to do it this way since I can't just assign custom in a layer
spec_layer.custom.MergeFromString(b'')
if custom_proto_spec:
spec_layer.custom.CopyFrom(custom_proto_spec) |
<SYSTEM_TASK:>
Add pre-processing parameters to the neural network object
<END_TASK>
<USER_TASK:>
Description:
def set_pre_processing_parameters(self, image_input_names = [], is_bgr = False,
red_bias = 0.0, green_bias = 0.0, blue_bias = 0.0, gray_bias = 0.0, image_scale = 1.0):
"""Add pre-processing parameters to the neural network object
Parameters
----------
image_input_names: [str]
Name of input blobs that are images
is_bgr: boolean | dict()
Channel order for input blobs that are images. BGR if True else RGB.
To specify a different value for each image input,
provide a dictionary with input names as keys.
red_bias: float | dict()
Image re-centering parameter (red channel)
blue_bias: float | dict()
Image re-centering parameter (blue channel)
green_bias: float | dict()
Image re-centering parameter (green channel)
gray_bias: float | dict()
Image re-centering parameter (for grayscale images)
image_scale: float | dict()
Value by which to scale the images.
See Also
--------
set_input, set_output, set_class_labels
""" |
spec = self.spec
if not image_input_names:
return # nothing to do here
if not isinstance(is_bgr, dict): is_bgr = dict.fromkeys(image_input_names, is_bgr)
if not isinstance(red_bias, dict): red_bias = dict.fromkeys(image_input_names, red_bias)
if not isinstance(blue_bias, dict): blue_bias = dict.fromkeys(image_input_names, blue_bias)
if not isinstance(green_bias, dict): green_bias = dict.fromkeys(image_input_names, green_bias)
if not isinstance(gray_bias, dict): gray_bias = dict.fromkeys(image_input_names, gray_bias)
if not isinstance(image_scale, dict): image_scale = dict.fromkeys(image_input_names, image_scale)
# Add image inputs
for input_ in spec.description.input:
if input_.name in image_input_names:
if input_.type.WhichOneof('Type') == 'multiArrayType':
array_shape = tuple(input_.type.multiArrayType.shape)
channels, height, width = array_shape
if channels == 1:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
elif channels == 3:
if input_.name in is_bgr:
if is_bgr[input_.name]:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
input_.type.imageType.colorSpace = _FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('RGB')
else:
raise ValueError("Channel Value %d not supported for image inputs" % channels)
input_.type.imageType.width = width
input_.type.imageType.height = height
preprocessing = self.nn_spec.preprocessing.add()
preprocessing.featureName = input_.name
scaler = preprocessing.scaler
if input_.name in image_scale:
scaler.channelScale = image_scale[input_.name]
else:
scaler.channelScale = 1.0
if input_.name in red_bias: scaler.redBias = red_bias[input_.name]
if input_.name in blue_bias: scaler.blueBias = blue_bias[input_.name]
if input_.name in green_bias: scaler.greenBias = green_bias[input_.name]
if input_.name in gray_bias: scaler.grayBias = gray_bias[input_.name] |
<SYSTEM_TASK:>
Returns an instance of previously registered scanner
<END_TASK>
<USER_TASK:>
Description:
def get(scanner_class, properties):
""" Returns an instance of previously registered scanner
with the specified properties.
""" |
assert issubclass(scanner_class, Scanner)
assert is_iterable_typed(properties, basestring)
scanner_name = str(scanner_class)
if not registered(scanner_name):
raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name)
relevant_properties = __scanners[scanner_name]
r = property.select(relevant_properties, properties)
scanner_id = scanner_name + '.' + '-'.join(r)
if scanner_id not in __scanner_cache:
__scanner_cache[scanner_id] = scanner_class(r)
return __scanner_cache[scanner_id] |
<SYSTEM_TASK:>
Ensure de-pickler imports any package child-modules that
<END_TASK>
<USER_TASK:>
Description:
def _save_subimports(self, code, top_level_dependencies):
"""
Ensure de-pickler imports any package child-modules that
are needed by the function
""" |
# check if any known dependency is an imported package
for x in top_level_dependencies:
if isinstance(x, types.ModuleType) and hasattr(x, '__package__') and x.__package__:
# check if the package has any currently loaded sub-imports
prefix = x.__name__ + '.'
for name, module in sys.modules.items():
# Older versions of pytest will add a "None" module to sys.modules.
if name is not None and name.startswith(prefix):
# check whether the function can address the sub-module
tokens = set(name[len(prefix):].split('.'))
if not tokens - set(code.co_names):
# ensure unpickler executes this import
self.save(module)
# then discards the reference to it
self.write(pickle.POP) |
<SYSTEM_TASK:>
Find all globals names read or written to by codeblock co
<END_TASK>
<USER_TASK:>
Description:
def extract_code_globals(cls, co):
"""
Find all globals names read or written to by codeblock co
""" |
out_names = cls._extract_code_globals_cache.get(co)
if out_names is None:
try:
names = co.co_names
except AttributeError:
# PyPy "builtin-code" object
out_names = set()
else:
out_names = set(names[oparg]
for op, oparg in _walk_global_ops(co))
# see if nested function have any global refs
if co.co_consts:
for const in co.co_consts:
if type(const) is types.CodeType:
out_names |= cls.extract_code_globals(const)
cls._extract_code_globals_cache[co] = out_names
return out_names |
<SYSTEM_TASK:>
Pulls out all the symbols from a descriptor proto.
<END_TASK>
<USER_TASK:>
Description:
def _ExtractSymbols(desc_proto, package):
"""Pulls out all the symbols from a descriptor proto.
Args:
desc_proto: The proto to extract symbols from.
package: The package containing the descriptor type.
Yields:
The fully qualified name found in the descriptor.
""" |
message_name = '.'.join((package, desc_proto.name))
yield message_name
for nested_type in desc_proto.nested_type:
for symbol in _ExtractSymbols(nested_type, message_name):
yield symbol
for enum_type in desc_proto.enum_type:
yield '.'.join((message_name, enum_type.name)) |
<SYSTEM_TASK:>
Adds the FileDescriptorProto and its types to this database.
<END_TASK>
<USER_TASK:>
Description:
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this database.
Args:
file_desc_proto: The FileDescriptorProto to add.
Raises:
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
add a proto with the same name but different definition than an
exisiting proto in the database.
""" |
proto_name = file_desc_proto.name
if proto_name not in self._file_desc_protos_by_file:
self._file_desc_protos_by_file[proto_name] = file_desc_proto
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
raise DescriptorDatabaseConflictingDefinitionError(
'%s already added, but with different descriptor.' % proto_name)
# Add all the top-level descriptors to the index.
package = file_desc_proto.package
for message in file_desc_proto.message_type:
self._file_desc_protos_by_symbol.update(
(name, file_desc_proto) for name in _ExtractSymbols(message, package))
for enum in file_desc_proto.enum_type:
self._file_desc_protos_by_symbol[
'.'.join((package, enum.name))] = file_desc_proto
for extension in file_desc_proto.extension:
self._file_desc_protos_by_symbol[
'.'.join((package, extension.name))] = file_desc_proto
for service in file_desc_proto.service:
self._file_desc_protos_by_symbol[
'.'.join((package, service.name))] = file_desc_proto |
<SYSTEM_TASK:>
Convert a normalizer model to the protobuf spec.
<END_TASK>
<USER_TASK:>
Description:
def convert(model, input_features, output_features):
"""Convert a normalizer model to the protobuf spec.
Parameters
----------
model: Normalizer
A Normalizer.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
""" |
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Normalizer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'norm'))
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
spec = _set_transform_interface_params(spec, input_features, output_features)
# Set the one hot encoder parameters
_normalizer_spec = spec.normalizer
if model.norm == 'l1':
_normalizer_spec.normType = _proto__normalizer.L1
elif model.norm == 'l2':
_normalizer_spec.normType = _proto__normalizer.L2
elif model.norm == 'max':
_normalizer_spec.normType = _proto__normalizer.LMax
return _MLModel(spec) |
<SYSTEM_TASK:>
Load find the path to xgboost dynamic library files.
<END_TASK>
<USER_TASK:>
Description:
def find_lib_path():
"""Load find the path to xgboost dynamic library files.
Returns
-------
lib_path: list(string)
List of all found library path to xgboost
""" |
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# make pythonpack hack: copy this directory one level upper for setup.py
dll_path = [curr_path, os.path.join(curr_path, '../../wrapper/'),
os.path.join(curr_path, './wrapper/')]
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))
else:
dll_path.append(os.path.join(curr_path, '../../windows/Release/'))
# hack for pip installation when copy all parent source directory here
dll_path.append(os.path.join(curr_path, './windows/Release/'))
if os.name == 'nt':
dll_path = [os.path.join(p, 'xgboost_wrapper.dll') for p in dll_path]
else:
dll_path = [os.path.join(p, 'libxgboostwrapper.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
#From github issues, most of installation errors come from machines w/o compilers
if len(lib_path) == 0 and not os.environ.get('XGBOOST_BUILD_DOC', False):
raise XGBoostLibraryNotFound(
'Cannot find XGBoost Libarary in the candicate path, ' +
'did you install compilers and run build.sh in root path?\n'
'List of candidates:\n' + ('\n'.join(dll_path)))
return lib_path |
<SYSTEM_TASK:>
Check if a model is of the right type. Raise error if not.
<END_TASK>
<USER_TASK:>
Description:
def check_expected_type(model, expected_type):
"""Check if a model is of the right type. Raise error if not.
Parameters
----------
model: model
Any scikit-learn model
expected_type: Type
Expected type of the scikit-learn.
""" |
if (model.__class__.__name__ != expected_type.__name__):
raise TypeError("Expected model of type '%s' (got %s)" % \
(expected_type.__name__, model.__class__.__name__)) |
<SYSTEM_TASK:>
Convert a LIBSVM model to Core ML format.
<END_TASK>
<USER_TASK:>
Description:
def convert(model, input_names='input', target_name='target',
probability='classProbability', input_length='auto'):
"""
Convert a LIBSVM model to Core ML format.
Parameters
----------
model: a libsvm model (C-SVC, nu-SVC, epsilon-SVR, or nu-SVR)
or string path to a saved model.
input_names: str | [str]
Name of the input column(s).
If a single string is used (the default) the input will be an array. The
length of the array will be inferred from the model, this can be overridden
using the 'input_length' parameter.
target: str
Name of the output column.
probability: str
Name of the output class probability column.
Only used for C-SVC and nu-SVC that have been trained with probability
estimates enabled.
input_length: int
Set the length of the input array.
This parameter should only be used when the input is an array (i.e. when
'input_name' is a string).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Make a LIBSVM model
>>> import svmutil
>>> problem = svmutil.svm_problem([0,0,1,1], [[0,1], [1,1], [8,9], [7,7]])
>>> libsvm_model = svmutil.svm_train(problem, svmutil.svm_parameter())
# Convert using default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model)
# Save the CoreML model to a file.
>>> coreml_model.save('./my_model.mlmodel')
# Convert using user specified input names
>>> coreml_model = coremltools.converters.libsvm.convert(libsvm_model, input_names=['x', 'y'])
""" |
if not(_HAS_LIBSVM):
raise RuntimeError('libsvm not found. libsvm conversion API is disabled.')
if isinstance(model, _string_types):
libsvm_model = _libsvm_util.load_model(model)
else:
libsvm_model = model
if not isinstance(libsvm_model, _libsvm.svm_model):
raise TypeError("Expected 'model' of type '%s' (got %s)" % (_libsvm.svm_model, type(libsvm_model)))
if not isinstance(target_name, _string_types):
raise TypeError("Expected 'target_name' of type str (got %s)" % type(libsvm_model))
if input_length != 'auto' and not isinstance(input_length, int):
raise TypeError("Expected 'input_length' of type int, got %s" % type(input_length))
if input_length != 'auto' and not isinstance(input_names, _string_types):
raise ValueError("'input_length' should not be used unless the input will be only one array.")
if not isinstance(probability, _string_types):
raise TypeError("Expected 'probability' of type str (got %s)" % type(probability))
return _libsvm_converter.convert(libsvm_model, input_names, target_name, input_length, probability) |
<SYSTEM_TASK:>
Appends the contents of another repeated field of the same type to this
<END_TASK>
<USER_TASK:>
Description:
def MergeFrom(self, other):
"""Appends the contents of another repeated field of the same type to this
one. We do not check the types of the individual fields.
""" |
self._values.extend(other._values)
self._message_listener.Modified() |
<SYSTEM_TASK:>
Adds a new element at the end of the list and returns it. Keyword
<END_TASK>
<USER_TASK:>
Description:
def add(self, **kwargs):
"""Adds a new element at the end of the list and returns it. Keyword
arguments may be used to initialize the element.
""" |
new_element = self._message_descriptor._concrete_class(**kwargs)
new_element._SetListener(self._message_listener)
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element |
<SYSTEM_TASK:>
Extends by appending the given sequence of elements of the same type
<END_TASK>
<USER_TASK:>
Description:
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
""" |
message_class = self._message_descriptor._concrete_class
listener = self._message_listener
values = self._values
for message in elem_seq:
new_element = message_class()
new_element._SetListener(listener)
new_element.MergeFrom(message)
values.append(new_element)
listener.Modified() |
<SYSTEM_TASK:>
Returns the elements of B that are not in A.
<END_TASK>
<USER_TASK:>
Description:
def difference (b, a):
""" Returns the elements of B that are not in A.
""" |
a = set(a)
result = []
for item in b:
if item not in a:
result.append(item)
return result |
<SYSTEM_TASK:>
Removes from set1 any items which don't appear in set2 and returns the result.
<END_TASK>
<USER_TASK:>
Description:
def intersection (set1, set2):
""" Removes from set1 any items which don't appear in set2 and returns the result.
""" |
assert is_iterable(set1)
assert is_iterable(set2)
result = []
for v in set1:
if v in set2:
result.append (v)
return result |
<SYSTEM_TASK:>
Returns true iff all elements of 'small' exist in 'large'.
<END_TASK>
<USER_TASK:>
Description:
def contains (small, large):
""" Returns true iff all elements of 'small' exist in 'large'.
""" |
small = to_seq (small)
large = to_seq (large)
for s in small:
if not s in large:
return False
return True |
<SYSTEM_TASK:>
Annotate your images loaded in either an SFrame or SArray Format
<END_TASK>
<USER_TASK:>
Description:
def annotate(data, image_column=None, annotation_column='annotations'):
"""
Annotate your images loaded in either an SFrame or SArray Format
The annotate util is a GUI assisted application used to create labels in
SArray Image data. Specifying a column, with dtype Image, in an SFrame
works as well since SFrames are composed of multiple SArrays.
When the GUI is terminated an SFrame is returned with the representative,
images and annotations.
The returned SFrame includes the newly created annotations.
Parameters
--------------
data : SArray | SFrame
The data containing the images. If the data type is 'SArray'
the 'image_column', and 'annotation_column' variables are used to construct
a new 'SFrame' containing the 'SArray' data for annotation.
If the data type is 'SFrame' the 'image_column', and 'annotation_column'
variables are used to annotate the images.
image_column: string, optional
If the data type is SFrame and the 'image_column' parameter is specified
then the column name is used as the image column used in the annotation. If
the data type is 'SFrame' and the 'image_column' variable is left empty. A
default column value of 'image' is used in the annotation. If the data type is
'SArray', the 'image_column' is used to construct the 'SFrame' data for
the annotation
annotation_column : string, optional
If the data type is SFrame and the 'annotation_column' parameter is specified
then the column name is used as the annotation column used in the annotation. If
the data type is 'SFrame' and the 'annotation_column' variable is left empty. A
default column value of 'annotation' is used in the annotation. If the data type is
'SArray', the 'annotation_column' is used to construct the 'SFrame' data for
the annotation
Returns
-------
out : SFrame
A new SFrame that contains the newly annotated data.
Examples
--------
>> import turicreate as tc
>> images = tc.image_analysis.load_images("path/to/images")
>> print(images)
Columns:
path str
image Image
Rows: 4
Data:
+------------------------+--------------------------+
| path | image |
+------------------------+--------------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 |
| /Users/username/Doc... | Height: 1386 Width: 1000 |
| /Users/username/Doc... | Height: 536 Width: 858 |
| /Users/username/Doc... | Height: 1512 Width: 2680 |
+------------------------+--------------------------+
[4 rows x 2 columns]
>> images = tc.image_classifier.annotate(images)
>> print(images)
Columns:
path str
image Image
annotation str
Rows: 4
Data:
+------------------------+--------------------------+-------------------+
| path | image | annotation |
+------------------------+--------------------------+-------------------+
| /Users/username/Doc... | Height: 1712 Width: 1952 | dog |
| /Users/username/Doc... | Height: 1386 Width: 1000 | dog |
| /Users/username/Doc... | Height: 536 Width: 858 | cat |
| /Users/username/Doc... | Height: 1512 Width: 2680 | mouse |
+------------------------+--------------------------+-------------------+
[4 rows x 3 columns]
""" |
# Check Value of Column Variables
if image_column == None:
image_column = _tkutl._find_only_image_column(data)
if image_column == None:
raise ValueError("'image_column' cannot be 'None'")
if type(image_column) != str:
raise TypeError("'image_column' has to be of type 'str'")
if annotation_column == None:
annotation_column = ""
if type(annotation_column) != str:
raise TypeError("'annotation_column' has to be of type 'str'")
# Check Data Structure
if type(data) == __tc.data_structures.image.Image:
data = __tc.SFrame({image_column:__tc.SArray([data])})
elif type(data) == __tc.data_structures.sframe.SFrame:
if(data.shape[0] == 0):
return data
if not (data[image_column].dtype == __tc.data_structures.image.Image):
raise TypeError("'data[image_column]' must be an SFrame or SArray")
elif type(data) == __tc.data_structures.sarray.SArray:
if(data.shape[0] == 0):
return data
data = __tc.SFrame({image_column:data})
else:
raise TypeError("'data' must be an SFrame or SArray")
_warning_annotations()
annotation_window = __tc.extensions.create_image_classification_annotation(
data,
[image_column],
annotation_column
)
annotation_window.annotate(_get_client_app_path())
return annotation_window.returnAnnotations() |
<SYSTEM_TASK:>
Convert a DictVectorizer model to the protobuf spec.
<END_TASK>
<USER_TASK:>
Description:
def convert(model, input_features, output_features):
"""Convert a DictVectorizer model to the protobuf spec.
Parameters
----------
model: DictVectorizer
A fitted DictVectorizer model.
input_features: str
Name of the input column.
output_features: str
Name of the output column.
Returns
-------
model_spec: An object of type Model_pb.
Protobuf representation of the model
""" |
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
# Set the interface params.
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
assert len(input_features) == 1
assert isinstance(input_features[0][1], datatypes.Array)
# feature name in and out are the same here
spec = set_transform_interface_params(spec, input_features, output_features)
# Test the scikit-learn model
_sklearn_util.check_expected_type(model, Imputer)
_sklearn_util.check_fitted(model, lambda m: hasattr(m, 'statistics_'))
if model.axis != 0:
raise ValueError("Imputation is only supported along axis = 0.")
# The imputer in our framework only works on single columns, so
# we need to translate that over. The easiest way to do that is to
# put it in a nested pipeline with a feature extractor and a
tr_spec = spec.imputer
for v in model.statistics_:
tr_spec.imputedDoubleArray.vector.append(v)
try:
tr_spec.replaceDoubleValue = float(model.missing_values)
except ValueError:
raise ValueError("Only scalar values or NAN as missing_values "
"in _imputer are supported.")
return _MLModel(spec) |
<SYSTEM_TASK:>
Internal function.
<END_TASK>
<USER_TASK:>
Description:
def print_callback(val):
"""
Internal function.
This function is called via a call back returning from IPC to Cython
to Python. It tries to perform incremental printing to IPython Notebook or
Jupyter Notebook and when all else fails, just prints locally.
""" |
success = False
try:
# for reasons I cannot fathom, regular printing, even directly
# to io.stdout does not work.
# I have to intrude rather deep into IPython to make it behave
if have_ipython:
if InteractiveShell.initialized():
IPython.display.publish_display_data({'text/plain':val,'text/html':'<pre>' + val + '</pre>'})
success = True
except:
pass
if not success:
print(val)
sys.stdout.flush() |
<SYSTEM_TASK:>
Internal function to execute toolkit on the turicreate server.
<END_TASK>
<USER_TASK:>
Description:
def run(toolkit_name, options, verbose=True, show_progress=False):
"""
Internal function to execute toolkit on the turicreate server.
Parameters
----------
toolkit_name : string
The name of the toolkit.
options : dict
A map containing the required input for the toolkit function,
for example: {'graph': g, 'reset_prob': 0.15}.
verbose : bool
If true, enable progress log from server.
show_progress : bool
If true, display progress plot.
Returns
-------
out : dict
The toolkit specific model parameters.
Raises
------
RuntimeError
Raises RuntimeError if the server fail executing the toolkit.
""" |
unity = glconnect.get_unity()
if (not verbose):
glconnect.get_server().set_log_progress(False)
(success, message, params) = unity.run_toolkit(toolkit_name, options)
if (len(message) > 0):
logging.getLogger(__name__).error("Toolkit error: " + message)
# set the verbose level back to default
glconnect.get_server().set_log_progress(True)
if success:
return params
else:
raise ToolkitError(str(message)) |
<SYSTEM_TASK:>
Truncates the remainder part after division.
<END_TASK>
<USER_TASK:>
Description:
def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division.""" |
# For some languanges, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.