text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Visualize the SArray.
<END_TASK>
<USER_TASK:>
Description:
def show(self, title=LABEL_DEFAULT, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT):
"""
Visualize the SArray.
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Parameters
----------
title : str
The plot title to show for the resulting visualization.
If the title is None, the title will be omitted.
xlabel : str
The X axis label to show for the resulting visualization.
If the xlabel is None, the X axis label will be omitted.
ylabel : str
The Y axis label to show for the resulting visualization.
If the ylabel is None, the Y axis label will be omitted.
Returns
-------
None
Examples
--------
Suppose 'sa' is an SArray, we can view it using:
>>> sa.show()
To override the default plot title and axis labels:
>>> sa.show(title="My Plot Title", xlabel="My X Axis", ylabel="My Y Axis")
""" |
returned_plot = self.plot(title, xlabel, ylabel)
returned_plot.show() |
<SYSTEM_TASK:>
Create a Plot object representing the SArray.
<END_TASK>
<USER_TASK:>
Description:
def plot(self, title=LABEL_DEFAULT, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT):
"""
Create a Plot object representing the SArray.
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Parameters
----------
title : str
The plot title to show for the resulting visualization.
If the title is None, the title will be omitted.
xlabel : str
The X axis label to show for the resulting visualization.
If the xlabel is None, the X axis label will be omitted.
ylabel : str
The Y axis label to show for the resulting visualization.
If the ylabel is None, the Y axis label will be omitted.
Returns
-------
out : Plot
A :class: Plot object that is the visualization of the SArray.
Examples
--------
Suppose 'sa' is an SArray, we can create a plot of it using:
>>> plt = sa.plot()
To override the default plot title and axis labels:
>>> plt = sa.plot(title="My Plot Title", xlabel="My X Axis", ylabel="My Y Axis")
We can then visualize the plot using:
>>> plt.show()
""" |
if title == "":
title = " "
if xlabel == "":
xlabel = " "
if ylabel == "":
ylabel = " "
if title is None:
title = "" # C++ otherwise gets "None" as std::string
if xlabel is None:
xlabel = ""
if ylabel is None:
ylabel = ""
return Plot(self.__proxy__.plot(title, xlabel, ylabel)) |
<SYSTEM_TASK:>
Length of each element in the current SArray.
<END_TASK>
<USER_TASK:>
Description:
def item_length(self):
"""
Length of each element in the current SArray.
Only works on SArrays of dict, array, or list type. If a given element
is a missing value, then the output elements is also a missing value.
This function is equivalent to the following but more performant:
sa_item_len = sa.apply(lambda x: len(x) if x is not None else None)
Returns
-------
out_sf : SArray
A new SArray, each element in the SArray is the len of the corresponding
items in original SArray.
Examples
--------
>>> sa = SArray([
... {"is_restaurant": 1, "is_electronics": 0},
... {"is_restaurant": 1, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0, "is_retail": 1, "is_electronics": 0},
... {"is_restaurant": 0},
... {"is_restaurant": 1, "is_electronics": 1},
... None])
>>> sa.item_length()
dtype: int
Rows: 6
[2, 3, 3, 1, 2, None]
""" |
if (self.dtype not in [list, dict, array.array]):
raise TypeError("item_length() is only applicable for SArray of type list, dict and array.")
with cython_context():
return SArray(_proxy = self.__proxy__.item_length()) |
<SYSTEM_TASK:>
Convert a "wide" SArray to one or two "tall" columns in an SFrame by
<END_TASK>
<USER_TASK:>
Description:
def stack(self, new_column_name=None, drop_na=False, new_column_type=None):
"""
Convert a "wide" SArray to one or two "tall" columns in an SFrame by
stacking all values.
The stack works only for columns of dict, list, or array type. If the
column is dict type, two new columns are created as a result of
stacking: one column holds the key and another column holds the value.
The rest of the columns are repeated for each key/value pair.
If the column is array or list type, one new column is created as a
result of stacking. With each row holds one element of the array or list
value, and the rest columns from the same original row repeated.
The returned SFrame includes the newly created column(s).
Parameters
--------------
new_column_name : str | list of str, optional
The new column name(s). If original column is list/array type,
new_column_name must a string. If original column is dict type,
new_column_name must be a list of two strings. If not given, column
names are generated automatically.
drop_na : boolean, optional
If True, missing values and empty list/array/dict are all dropped
from the resulting column(s). If False, missing values are
maintained in stacked column(s).
new_column_type : type | list of types, optional
The new column types. If original column is a list/array type
new_column_type must be a single type, or a list of one type. If
original column is of dict type, new_column_type must be a list of
two types. If not provided, the types are automatically inferred
from the first 100 values of the SFrame.
Returns
-------
out : SFrame
A new SFrame that contains the newly stacked column(s).
Examples
---------
Suppose 'sa' is an SArray of dict type:
>>> sa = turicreate.SArray([{'a':3, 'cat':2},
... {'a':1, 'the':2},
... {'the':1, 'dog':3},
... {}])
[{'a': 3, 'cat': 2}, {'a': 1, 'the': 2}, {'the': 1, 'dog': 3}, {}]
Stack would stack all keys in one column and all values in another
column:
>>> sa.stack(new_column_name=['word', 'count'])
+------+-------+
| word | count |
+------+-------+
| a | 3 |
| cat | 2 |
| a | 1 |
| the | 2 |
| the | 1 |
| dog | 3 |
| None | None |
+------+-------+
[7 rows x 2 columns]
Observe that since topic 4 had no words, an empty row is inserted.
To drop that row, set drop_na=True in the parameters to stack.
""" |
from .sframe import SFrame as _SFrame
return _SFrame({'SArray': self}).stack('SArray',
new_column_name=new_column_name,
drop_na=drop_na,
new_column_type=new_column_type) |
<SYSTEM_TASK:>
Sort all values in this SArray.
<END_TASK>
<USER_TASK:>
Description:
def sort(self, ascending=True):
"""
Sort all values in this SArray.
Sort only works for sarray of type str, int and float, otherwise TypeError
will be raised. Creates a new, sorted SArray.
Parameters
----------
ascending: boolean, optional
If true, the sarray values are sorted in ascending order, otherwise,
descending order.
Returns
-------
out: SArray
Examples
--------
>>> sa = SArray([3,2,1])
>>> sa.sort()
dtype: int
Rows: 3
[1, 2, 3]
""" |
from .sframe import SFrame as _SFrame
if self.dtype not in (int, float, str, datetime.datetime):
raise TypeError("Only sarray with type (int, float, str, datetime.datetime) can be sorted")
sf = _SFrame()
sf['a'] = self
return sf.sort('a', ascending)['a'] |
<SYSTEM_TASK:>
Calculate a new SArray of the sum of different subsets over this
<END_TASK>
<USER_TASK:>
Description:
def rolling_sum(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the sum of different subsets over this
SArray.
Also known as a "moving sum" or "running sum". The subset that
the sum is calculated over is defined as an inclusive range relative
to the position to each value in the SArray, using `window_start` and
`window_end`. For a better understanding of this, see the examples
below.
Parameters
----------
window_start : int
The start of the subset to calculate the sum relative to the
current value.
window_end : int
The end of the subset to calculate the sum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the sum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling sum with a window including the previous 2 entries including
the current:
>>> sa.rolling_sum(-2,0)
dtype: int
Rows: 5
[None, None, 6, 9, 12]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3)
0 NaN
1 NaN
2 6
3 9
4 12
dtype: float64
Same rolling sum operation, but 2 minimum observations:
>>> sa.rolling_sum(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 3, 6, 9, 12]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3, min_periods=2)
0 NaN
1 3
2 6
3 9
4 12
dtype: float64
A rolling sum with a size of 3, centered around the current:
>>> sa.rolling_sum(-1,1)
dtype: int
Rows: 5
[None, 6, 9, 12, None]
Pandas equivalent:
>>> pandas.rolling_sum(series, 3, center=True)
0 NaN
1 6
2 9
3 12
4 NaN
dtype: float64
A rolling sum with a window including the current and the 2 entries
following:
>>> sa.rolling_sum(0,2)
dtype: int
Rows: 5
[6, 9, 12, None, None]
A rolling sum with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_sum(-2,-1)
dtype: int
Rows: 5
[None, None, 3, 5, 7]
""" |
min_observations = self.__check_min_observations(min_observations)
agg_op = None
if self.dtype is array.array:
agg_op = '__builtin__vector__sum__'
else:
agg_op = '__builtin__sum__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations)) |
<SYSTEM_TASK:>
Calculate a new SArray of the maximum value of different subsets over
<END_TASK>
<USER_TASK:>
Description:
def rolling_max(self, window_start, window_end, min_observations=None):
"""
Calculate a new SArray of the maximum value of different subsets over
this SArray.
The subset that the maximum is calculated over is defined as an
inclusive range relative to the position to each value in the SArray,
using `window_start` and `window_end`. For a better understanding of
this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the maximum relative to the
current value.
window_end : int
The end of the subset to calculate the maximum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the maximum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling max with a window including the previous 2 entries including
the current:
>>> sa.rolling_max(-2,0)
dtype: int
Rows: 5
[None, None, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3)
0 NaN
1 NaN
2 3
3 4
4 5
dtype: float64
Same rolling max operation, but 2 minimum observations:
>>> sa.rolling_max(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 2, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, min_periods=2)
0 NaN
1 2
2 3
3 4
4 5
dtype: float64
A rolling max with a size of 3, centered around the current:
>>> sa.rolling_max(-1,1)
dtype: int
Rows: 5
[None, 3, 4, 5, None]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, center=True)
0 NaN
1 3
2 4
3 5
4 NaN
dtype: float64
A rolling max with a window including the current and the 2 entries
following:
>>> sa.rolling_max(0,2)
dtype: int
Rows: 5
[3, 4, 5, None, None]
A rolling max with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_max(-2,-1)
dtype: int
Rows: 5
[None, None, 2, 3, 4]
""" |
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__max__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations)) |
<SYSTEM_TASK:>
Count the number of non-NULL values of different subsets over this
<END_TASK>
<USER_TASK:>
Description:
def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
""" |
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) |
<SYSTEM_TASK:>
Return the cumulative sum of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_sum(self):
"""
Return the cumulative sum of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
sum of all the elements preceding and including it. The SArray is
expected to be of numeric type (int, float), or a numeric vector type.
Returns
-------
out : sarray[int, float, array.array]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
- For SArray's of type array.array, all entries are expected to
be of the same size.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 5])
>>> sa.cumulative_sum()
dtype: int
rows: 3
[1, 3, 6, 10, 15]
""" |
from .. import extensions
agg_op = "__builtin__cum_sum__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Return the cumulative mean of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_mean(self):
"""
Return the cumulative mean of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
mean value of all the elements preceding and including it. The SArray
is expected to be of numeric type (int, float), or a numeric vector
type.
Returns
-------
out : Sarray[float, array.array]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
- For SArray's of type array.array, all entries are expected to
be of the same size.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 5])
>>> sa.cumulative_mean()
dtype: float
rows: 3
[1, 1.5, 2, 2.5, 3]
""" |
from .. import extensions
agg_op = "__builtin__cum_avg__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Return the cumulative minimum value of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_min(self):
"""
Return the cumulative minimum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
minimum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_min()
dtype: int
rows: 3
[1, 1, 1, 1, 0]
""" |
from .. import extensions
agg_op = "__builtin__cum_min__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Return the cumulative maximum value of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_max(self):
"""
Return the cumulative maximum value of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
maximum value of all the elements preceding and including it. The
SArray is expected to be of numeric type (int, float).
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 0, 3, 4, 2])
>>> sa.cumulative_max()
dtype: int
rows: 3
[1, 1, 3, 4, 4]
""" |
from .. import extensions
agg_op = "__builtin__cum_max__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Return the cumulative standard deviation of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_std(self):
"""
Return the cumulative standard deviation of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
standard deviation of all the elements preceding and including it. The
SArray is expected to be of numeric type, or a numeric vector type.
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_std()
dtype: float
rows: 3
[0.0, 0.5, 0.816496580927726, 1.118033988749895, 1.4142135623730951]
""" |
from .. import extensions
agg_op = "__builtin__cum_std__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Return the cumulative variance of the elements in the SArray.
<END_TASK>
<USER_TASK:>
Description:
def cumulative_var(self):
"""
Return the cumulative variance of the elements in the SArray.
Returns an SArray where each element in the output corresponds to the
variance of all the elements preceding and including it. The SArray is
expected to be of numeric type, or a numeric vector type.
Returns
-------
out : SArray[int, float]
Notes
-----
- Missing values are ignored while performing the cumulative
aggregate operation.
Examples
--------
>>> sa = SArray([1, 2, 3, 4, 0])
>>> sa.cumulative_var()
dtype: float
rows: 3
[0.0, 0.25, 0.6666666666666666, 1.25, 2.0]
""" |
from .. import extensions
agg_op = "__builtin__cum_var__"
return SArray(_proxy = self.__proxy__.builtin_cumulative_aggregate(agg_op)) |
<SYSTEM_TASK:>
Converts protobuf message to JSON format.
<END_TASK>
<USER_TASK:>
Description:
def MessageToJson(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A string containing the JSON formatted protocol buffer message.
""" |
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
return printer.ToJsonString(message) |
<SYSTEM_TASK:>
Converts protobuf message to a JSON dictionary.
<END_TASK>
<USER_TASK:>
Description:
def MessageToDict(message,
including_default_value_fields=False,
preserving_proto_field_name=False):
"""Converts protobuf message to a JSON dictionary.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
preserving_proto_field_name: If True, use the original proto field
names as defined in the .proto file. If False, convert the field
names to lowerCamelCase.
Returns:
A dict representation of the JSON formatted protocol buffer message.
""" |
printer = _Printer(including_default_value_fields,
preserving_proto_field_name)
# pylint: disable=protected-access
return printer._MessageToJsonObject(message) |
<SYSTEM_TASK:>
Parses a JSON dictionary representation into a message.
<END_TASK>
<USER_TASK:>
Description:
def ParseDict(js_dict, message, ignore_unknown_fields=False):
"""Parses a JSON dictionary representation into a message.
Args:
js_dict: Dict representation of a JSON message.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
""" |
parser = _Parser(ignore_unknown_fields)
parser.ConvertMessage(js_dict, message)
return message |
<SYSTEM_TASK:>
Convert a single scalar field value.
<END_TASK>
<USER_TASK:>
Description:
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
""" |
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
try:
number = int(value)
enum_value = field.enum_type.values_by_number.get(number, None)
except ValueError:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
if enum_value is None:
raise ParseError('Invalid enum value {0} for enum type {1}.'.format(
value, field.enum_type.full_name))
return enum_value.number |
<SYSTEM_TASK:>
Convert an integer.
<END_TASK>
<USER_TASK:>
Description:
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
""" |
if isinstance(value, float) and not value.is_integer():
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value) |
<SYSTEM_TASK:>
Convert a boolean value.
<END_TASK>
<USER_TASK:>
Description:
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
""" |
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value |
<SYSTEM_TASK:>
Converts message to an object according to Proto3 JSON Specification.
<END_TASK>
<USER_TASK:>
Description:
def _MessageToJsonObject(self, message):
"""Converts message to an object according to Proto3 JSON Specification.""" |
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return self._WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)
js = {}
return self._RegularMessageToJsonObject(message, js) |
<SYSTEM_TASK:>
Convert a JSON object into a message.
<END_TASK>
<USER_TASK:>
Description:
def ConvertMessage(self, value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
""" |
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message) |
<SYSTEM_TASK:>
Convert a JSON representation into Wrapper message.
<END_TASK>
<USER_TASK:>
Description:
def _ConvertWrapperMessage(self, value, message):
"""Convert a JSON representation into Wrapper message.""" |
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field)) |
<SYSTEM_TASK:>
Convert map field value for a message map field.
<END_TASK>
<USER_TASK:>
Description:
def _ConvertMapFieldValue(self, value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
""" |
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1}.'.format(
field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(value[key], getattr(
message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field) |
<SYSTEM_TASK:>
Plots the data in `x` on the X axis and the data in `y` on the Y axis
<END_TASK>
<USER_TASK:>
Description:
def categorical_heatmap(x, y, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots the data in `x` on the X axis and the data in `y` on the Y axis
in a 2d categorical heatmap, and returns the resulting Plot object.
The function supports SArrays of dtypes str.
Parameters
----------
x : SArray
The data to plot on the X axis of the categorical heatmap.
Must be string SArray
y : SArray
The data to plot on the Y axis of the categorical heatmap.
Must be string SArray and must be the same length as `x`.
xlabel : str (optional)
The text label for the X axis. Defaults to "X".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Y".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the categorical heatmap.
Examples
--------
Make a categorical heatmap.
>>> x = turicreate.SArray(['1','2','3','4','5'])
>>> y = turicreate.SArray(['a','b','c','d','e'])
>>> catheat = turicreate.visualization.categorical_heatmap(x, y)
""" |
if (not isinstance(x, tc.data_structures.sarray.SArray) or
not isinstance(y, tc.data_structures.sarray.SArray) or
x.dtype != str or y.dtype != str):
raise ValueError("turicreate.visualization.categorical_heatmap supports " +
"SArrays of dtype: str")
# legit input
title = _get_title(title)
plt_ref = tc.extensions.plot_categorical_heatmap(x, y,
xlabel, ylabel, title)
return Plot(plt_ref) |
<SYSTEM_TASK:>
Plots a columnwise summary of the sframe provided as input,
<END_TASK>
<USER_TASK:>
Description:
def columnwise_summary(sf):
"""
Plots a columnwise summary of the sframe provided as input,
and returns the resulting Plot object.
The function supports SFrames.
Parameters
----------
sf : SFrame
The data to get a columnwise summary for.
Returns
-------
out : Plot
A :class: Plot object that is the columnwise summary plot.
Examples
--------
Make a columnwise summary of an SFrame.
>>> x = turicreate.SArray([1,2,3,4,5])
>>> s = turicreate.SArray(['a','b','c','a','a'])
>>> sf_test = turicreate.SFrame([x,x,x,x,s,s,s,x,s,x,s,s,s,x,x])
>>> colsum = turicreate.visualization.columnwise_summary(sf_test)
""" |
if not isinstance(sf, tc.data_structures.sframe.SFrame):
raise ValueError("turicreate.visualization.columnwise_summary " +
"supports SFrame")
plt_ref = tc.extensions.plot_columnwise_summary(sf)
return Plot(plt_ref) |
<SYSTEM_TASK:>
Plots a histogram of the sarray provided as input, and returns the
<END_TASK>
<USER_TASK:>
Description:
def histogram(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots a histogram of the sarray provided as input, and returns the
resulting Plot object.
The function supports numeric SArrays with dtypes int or float.
Parameters
----------
sa : SArray
The data to get a histogram for. Must be numeric (int/float).
xlabel : str (optional)
The text label for the X axis. Defaults to "Values".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Count".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the histogram.
Examples
--------
Make a histogram of an SArray.
>>> x = turicreate.SArray([1,2,3,4,5,1,1,1,1,2,2,3,2,3,1,1,1,4])
>>> hist = turicreate.visualization.histogram(x)
""" |
if (not isinstance(sa, tc.data_structures.sarray.SArray) or
sa.dtype not in [int, float]):
raise ValueError("turicreate.visualization.histogram supports " +
"SArrays of dtypes: int, float")
title = _get_title(title)
plt_ref = tc.extensions.plot_histogram(sa,
xlabel, ylabel, title)
return Plot(plt_ref) |
<SYSTEM_TASK:>
Plots an item frequency of the sarray provided as input, and returns the
<END_TASK>
<USER_TASK:>
Description:
def item_frequency(sa, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT, title=LABEL_DEFAULT):
"""
Plots an item frequency of the sarray provided as input, and returns the
resulting Plot object.
The function supports SArrays with dtype str.
Parameters
----------
sa : SArray
The data to get an item frequency for. Must have dtype str
xlabel : str (optional)
The text label for the X axis. Defaults to "Values".
ylabel : str (optional)
The text label for the Y axis. Defaults to "Count".
title : str (optional)
The title of the plot. Defaults to LABEL_DEFAULT. If the value is
LABEL_DEFAULT, the title will be "<xlabel> vs. <ylabel>". If the value
is None, the title will be omitted. Otherwise, the string passed in as the
title will be used as the plot title.
Returns
-------
out : Plot
A :class: Plot object that is the item frequency plot.
Examples
--------
Make an item frequency of an SArray.
>>> x = turicreate.SArray(['a','ab','acd','ab','a','a','a','ab','cd'])
>>> ifplt = turicreate.visualization.item_frequency(x)
""" |
if (not isinstance(sa, tc.data_structures.sarray.SArray) or
sa.dtype != str):
raise ValueError("turicreate.visualization.item_frequency supports " +
"SArrays of dtype str")
title = _get_title(title)
plt_ref = tc.extensions.plot_item_frequency(sa,
xlabel, ylabel, title)
return Plot(plt_ref) |
<SYSTEM_TASK:>
Parses the input file and returns C code and corresponding header file.
<END_TASK>
<USER_TASK:>
Description:
def Parse(factory, file):
"""
Parses the input file and returns C code and corresponding header file.
""" |
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(factory, data))
return entities |
<SYSTEM_TASK:>
Creates the name inside an enumeration for distinguishing data
<END_TASK>
<USER_TASK:>
Description:
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types.""" |
name = "%s_%s" % (self._name, entry.Name())
return name.upper() |
<SYSTEM_TASK:>
Takes an array, add indentation to each entry and prints it.
<END_TASK>
<USER_TASK:>
Description:
def PrintIndented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it.""" |
for entry in code:
print >>file, '%s%s' % (ident, entry) |
<SYSTEM_TASK:>
Compute the K-core decomposition of the graph. Return a model object with
<END_TASK>
<USER_TASK:>
Description:
def create(graph, kmin=0, kmax=10, verbose=True):
"""
Compute the K-core decomposition of the graph. Return a model object with
total number of cores as well as the core id for each vertex in the graph.
Parameters
----------
graph : SGraph
The graph on which to compute the k-core decomposition.
kmin : int, optional
Minimum core id. Vertices having smaller core id than `kmin` will be
assigned with core_id = `kmin`.
kmax : int, optional
Maximum core id. Vertices having larger core id than `kmax` will be
assigned with core_id=`kmax`.
verbose : bool, optional
If True, print progress updates.
Returns
-------
out : KcoreModel
References
----------
- Alvarez-Hamelin, J.I., et al. (2005) `K-Core Decomposition: A Tool for the
Visualization of Large Networks <http://arxiv.org/abs/cs/0504107>`_.
Examples
--------
If given an :class:`~turicreate.SGraph` ``g``, we can create
a :class:`~turicreate.kcore.KcoreModel` as follows:
>>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/email-Enron.txt.gz', format='snap')
>>> kc = turicreate.kcore.create(g)
We can obtain the ``core id`` corresponding to each vertex in the graph
``g`` using:
>>> kcore_id = kc['core_id'] # SFrame
We can add the new core id field to the original graph g using:
>>> g.vertices['core_id'] = kc['graph'].vertices['core_id']
Note that the task above does not require a join because the vertex
ordering is preserved through ``create()``.
See Also
--------
KcoreModel
""" |
from turicreate._cython.cy_server import QuietProgress
if not isinstance(graph, _SGraph):
raise TypeError('graph input must be a SGraph object.')
opts = {'graph': graph.__proxy__, 'kmin': kmin, 'kmax': kmax}
with QuietProgress(verbose):
params = _tc.extensions._toolkits.graph.kcore.create(opts)
return KcoreModel(params['model']) |
<SYSTEM_TASK:>
Raise an error if an option is not supported.
<END_TASK>
<USER_TASK:>
Description:
def raise_error_unsupported_categorical_option(option_name, option_value, layer_type, layer_name):
"""
Raise an error if an option is not supported.
""" |
raise RuntimeError("Unsupported option %s=%s in layer %s(%s)" % (option_name, option_value,
layer_type, layer_name)) |
<SYSTEM_TASK:>
Given a list of class labels and a list of output_features, validate the
<END_TASK>
<USER_TASK:>
Description:
def process_or_validate_classifier_output_features(
output_features, class_labels, supports_class_scores = True):
"""
Given a list of class labels and a list of output_features, validate the
list and return a valid version of output_features with all the correct
data type information included.
""" |
def raise_error(msg):
raise ValueError("Classifier error: %s" % msg)
class_labels = list(class_labels)
# First, we need to determine the type of the classes.
_int_types = _integer_types + (bool, _np.bool_, _np.int32, _np.int64)
if all(isinstance(cl, _int_types) for cl in class_labels):
output_class_type = datatypes.Int64()
elif all(isinstance(cl, _string_types) for cl in class_labels):
output_class_type = datatypes.String()
else:
raise ValueError('Class labels must be all of type int or all of type string.')
if output_features is None:
out = [("classLabel", output_class_type)]
if supports_class_scores:
out += [("classProbability", datatypes.Dictionary(output_class_type))]
elif isinstance(output_features, _string_types):
out = [(output_features, output_class_type)]
if supports_class_scores:
out += [("classProbability", datatypes.Dictionary(output_class_type))]
elif (isinstance(output_features, (list, tuple))
and all(isinstance(fn, _string_types) for fn in output_features)
and len(output_features) == 2):
if supports_class_scores:
out = [(output_features[0], output_class_type),
(output_features[1], datatypes.Dictionary(output_class_type))]
else:
raise ValueError("Classifier model (as trained) does not support output scores for classes.")
elif is_valid_feature_list(output_features):
output_features = [(k, datatypes._normalize_datatype(dt)) for k, dt in output_features]
if len(output_features) == 1 or not supports_class_scores:
if not output_features[0][1] == output_class_type:
raise ValueError("Type of output class feature does not match type of class labels.")
else:
# Make sure the first two output features specified give the output
# class field and the output class scores dictionary field
if (isinstance(output_features[0][1], datatypes.Dictionary)
and isinstance(output_features[1][1], output_class_type)):
output_features[0], output_features[1] = output_features[1], output_features[0]
if not isinstance(output_features[1][1], datatypes.Dictionary):
raise_error("Output features class scores should be dictionary type.")
if output_features[1][1].key_type != output_class_type:
raise_error("Class scores dictionary key type does not match type of class labels.")
if output_features[0][1] != output_class_type:
raise_error("Specified type of output class does not match type of class labels.")
# NOTE: We are intentionally allowing the case where additional fields are allowed
# beyond the original two features.
out = output_features
else:
raise_error("Form of output features not recognized")
return out |
<SYSTEM_TASK:>
Outputs the last `num` elements that were appended either by `append` or
<END_TASK>
<USER_TASK:>
Description:
def read_history(self, num=10, segment=0):
"""
Outputs the last `num` elements that were appended either by `append` or
`append_multiple`.
Returns
-------
out : list
""" |
if num < 0:
num = 0
if segment < 0:
raise TypeError("segment must be >= 0")
return self._builder.read_history(num, segment) |
<SYSTEM_TASK:>
Returns the first element of 'property-sets' which is a subset of
<END_TASK>
<USER_TASK:>
Description:
def find_satisfied_condition(conditions, ps):
"""Returns the first element of 'property-sets' which is a subset of
'properties', or an empty list if no such element exists.""" |
assert is_iterable_typed(conditions, property_set.PropertySet)
assert isinstance(ps, property_set.PropertySet)
for condition in conditions:
found_all = True
for i in condition.all():
if i.value:
found = i.value in ps.get(i.feature)
else:
# Handle value-less properties like '<architecture>' (compare with
# '<architecture>x86').
# If $(i) is a value-less property it should match default
# value of an optional property. See the first line in the
# example below:
#
# property set properties result
# <a> <b>foo <b>foo match
# <a> <b>foo <a>foo <b>foo no match
# <a>foo <b>foo <b>foo no match
# <a>foo <b>foo <a>foo <b>foo match
found = not ps.get(i.feature)
found_all = found_all and found
if found_all:
return condition
return None |
<SYSTEM_TASK:>
Adds a new flag setting with the specified values.
<END_TASK>
<USER_TASK:>
Description:
def __add_flag (rule_or_module, variable_name, condition, values):
""" Adds a new flag setting with the specified values.
Does no checking.
""" |
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, property_set.PropertySet)
assert is_iterable(values) and all(
isinstance(v, (basestring, type(None))) for v in values)
f = Flag(variable_name, values, condition, rule_or_module)
# Grab the name of the module
m = __re_first_segment.match (rule_or_module)
assert m
module = m.group(1)
__module_flags.setdefault(module, []).append(f)
__flags.setdefault(rule_or_module, []).append(f) |
<SYSTEM_TASK:>
If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged.
<END_TASK>
<USER_TASK:>
Description:
def root (path, root):
""" If 'path' is relative, it is rooted at 'root'. Otherwise, it's unchanged.
""" |
if os.path.isabs (path):
return path
else:
return os.path.join (root, path) |
<SYSTEM_TASK:>
Recursive version of GLOB. Builds the glob of files while
<END_TASK>
<USER_TASK:>
Description:
def glob_tree(roots, patterns, exclude_patterns=None):
"""Recursive version of GLOB. Builds the glob of files while
also searching in the subdirectories of the given roots. An
optional set of exclusion patterns will filter out the
matching entries from the result. The exclusions also apply
to the subdirectory scanning, such that directories that
match the exclusion patterns will not be searched.""" |
if not exclude_patterns:
exclude_patterns = []
result = glob(roots, patterns, exclude_patterns)
subdirs = [s for s in glob(roots, ["*"], exclude_patterns) if s != "." and s != ".." and os.path.isdir(s)]
if subdirs:
result.extend(glob_tree(subdirs, patterns, exclude_patterns))
return result |
<SYSTEM_TASK:>
Recursive version of GLOB which glob sall parent directories
<END_TASK>
<USER_TASK:>
Description:
def glob_in_parents(dir, patterns, upper_limit=None):
"""Recursive version of GLOB which glob sall parent directories
of dir until the first match is found. Returns an empty result if no match
is found""" |
assert(isinstance(dir, str))
assert(isinstance(patterns, list))
result = []
absolute_dir = os.path.join(os.getcwd(), dir)
absolute_dir = os.path.normpath(absolute_dir)
while absolute_dir:
new_dir = os.path.split(absolute_dir)[0]
if new_dir == absolute_dir:
break
result = glob([new_dir], patterns)
if result:
break
absolute_dir = new_dir
return result |
<SYSTEM_TASK:>
Recursively walks each thing in val, opening lists and dictionaries,
<END_TASK>
<USER_TASK:>
Description:
def _wrap_function_return(val):
"""
Recursively walks each thing in val, opening lists and dictionaries,
converting all occurrences of UnityGraphProxy to an SGraph,
UnitySFrameProxy to SFrame, and UnitySArrayProxy to SArray.
""" |
if type(val) is _UnityGraphProxy:
return _SGraph(_proxy = val)
elif type(val) is _UnitySFrameProxy:
return _SFrame(_proxy = val)
elif type(val) is _UnitySArrayProxy:
return _SArray(_proxy = val)
elif type(val) is _UnityModel:
# we need to cast it up to the appropriate type
uid = val.get_uid()
if uid in class_uid_to_class:
return class_uid_to_class[uid](_proxy=val)
else:
return val
elif type(val) is list:
return [_wrap_function_return(i) for i in val]
elif type(val) is dict:
return dict( (i, _wrap_function_return(val[i])) for i in val)
else:
return val |
<SYSTEM_TASK:>
Dispatches arguments to a toolkit function.
<END_TASK>
<USER_TASK:>
Description:
def _run_toolkit_function(fnname, arguments, args, kwargs):
"""
Dispatches arguments to a toolkit function.
Parameters
----------
fnname : string
The toolkit function to run
arguments : list[string]
The list of all the arguments the function takes.
args : list
The arguments that were passed
kwargs : dictionary
The keyword arguments that were passed
""" |
# scan for all the arguments in args
num_args_got = len(args) + len(kwargs)
num_args_required = len(arguments)
if num_args_got != num_args_required:
raise TypeError("Expecting " + str(num_args_required) + " arguments, got " + str(num_args_got))
## fill the dict first with the regular args
argument_dict = {}
for i in range(len(args)):
argument_dict[arguments[i]] = args[i]
# now fill with the kwargs.
for k in kwargs.keys():
if k in argument_dict:
raise TypeError("Got multiple values for keyword argument '" + k + "'")
argument_dict[k] = kwargs[k]
# unwrap it
with cython_context():
ret = _get_unity().run_toolkit(fnname, argument_dict)
# handle errors
if not ret[0]:
if len(ret[1]) > 0:
raise _ToolkitError(ret[1])
else:
raise _ToolkitError("Toolkit failed with unknown error")
ret = _wrap_function_return(ret[2])
if type(ret) is dict and 'return_value' in ret:
return ret['return_value']
else:
return ret |
<SYSTEM_TASK:>
Publishes all functions and classes registered in unity_server.
<END_TASK>
<USER_TASK:>
Description:
def _publish():
import copy
"""
Publishes all functions and classes registered in unity_server.
The functions and classes will appear in the module turicreate.extensions
""" |
unity = _get_unity()
fnlist = unity.list_toolkit_functions()
# Loop through all the functions and inject it into
# turicreate.extensions.[blah]
# Note that [blah] may be somemodule.somefunction
# and so the injection has to be
# turicreate.extensions.somemodule.somefunction
for fn in fnlist:
props = unity.describe_toolkit_function(fn)
# quit if there is nothing we can process
if 'arguments' not in props:
continue
arguments = props['arguments']
newfunc = _make_injected_function(fn, arguments)
newfunc.__doc__ = "Name: " + fn + "\nParameters: " + str(arguments) + "\n"
if 'documentation' in props:
newfunc.__doc__ += props['documentation'] + "\n"
newfunc.__dict__['__glmeta__'] = {'extension_name':fn}
modpath = fn.split('.')
# walk the module tree
mod = _thismodule
for path in modpath[:-1]:
try:
getattr(mod, path)
except:
_setattr_wrapper(mod, path, _types.ModuleType(name=path))
mod = getattr(mod, path)
_setattr_wrapper(mod, modpath[-1], newfunc)
# Repeat for classes
tkclasslist = unity.list_toolkit_classes()
for tkclass in tkclasslist:
m = unity.describe_toolkit_class(tkclass)
# of v2 type
if not ('functions' in m and 'get_properties' in m and 'set_properties' in m and 'uid' in m):
continue
# create a new class
if _version_info.major == 3:
new_class = _ToolkitClass.__dict__.copy()
del new_class['__dict__']
del new_class['__weakref__']
else:
new_class = copy.deepcopy(_ToolkitClass.__dict__)
new_class['__init__'] = _types.FunctionType(new_class['__init__'].__code__,
new_class['__init__'].__globals__,
name='__init__',
argdefs=(),
closure=())
# rewrite the init method to add the toolkit class name so it will
# default construct correctly
new_class['__init__'].tkclass_name = tkclass
newclass = _class_type(tkclass, (), new_class)
setattr(newclass, '__glmeta__', {'extension_name':tkclass})
class_uid_to_class[m['uid']] = newclass
modpath = tkclass.split('.')
# walk the module tree
mod = _thismodule
for path in modpath[:-1]:
try:
getattr(mod, path)
except:
_setattr_wrapper(mod, path, _types.ModuleType(name=path))
mod = getattr(mod, path)
_setattr_wrapper(mod, modpath[-1], newclass) |
<SYSTEM_TASK:>
Given a toolkit function name, return the argument list
<END_TASK>
<USER_TASK:>
Description:
def _get_argument_list_from_toolkit_function_name(fn):
"""
Given a toolkit function name, return the argument list
""" |
unity = _get_unity()
fnprops = unity.describe_toolkit_function(fn)
argnames = fnprops['arguments']
return argnames |
<SYSTEM_TASK:>
Print lines of input along with output.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""
Print lines of input along with output.
""" |
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n') |
<SYSTEM_TASK:>
Returns a type checker for a message field of the specified types.
<END_TASK>
<USER_TASK:>
Description:
def GetTypeChecker(field):
"""Returns a type checker for a message field of the specified types.
Args:
field: FieldDescriptor object for this field.
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
""" |
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type] |
<SYSTEM_TASK:>
Type check the provided value and return it.
<END_TASK>
<USER_TASK:>
Description:
def CheckValue(self, proposed_value):
"""Type check the provided value and return it.
The returned value might have been normalized to another type.
""" |
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
return proposed_value |
<SYSTEM_TASK:>
Escape a bytes string for use in an ascii protocol buffer.
<END_TASK>
<USER_TASK:>
Description:
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
""" |
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text) |
<SYSTEM_TASK:>
Unescape a text string with C-style escape sequences to UTF-8 bytes.
<END_TASK>
<USER_TASK:>
Description:
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes.""" |
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape')) |
<SYSTEM_TASK:>
Specifies that targets with suffix from 'suffixes' have the type 'type'.
<END_TASK>
<USER_TASK:>
Description:
def register_suffixes (suffixes, type):
""" Specifies that targets with suffix from 'suffixes' have the type 'type'.
If a different type is already specified for any of syffixes, issues an error.
""" |
assert is_iterable_typed(suffixes, basestring)
assert isinstance(type, basestring)
for s in suffixes:
if s in __suffixes_to_types:
old_type = __suffixes_to_types [s]
if old_type != type:
raise BaseException ('Attempting to specify type for suffix "%s"\nOld type: "%s", New type "%s"' % (s, old_type, type))
else:
__suffixes_to_types [s] = type |
<SYSTEM_TASK:>
Sets a scanner class that will be used for this 'type'.
<END_TASK>
<USER_TASK:>
Description:
def set_scanner (type, scanner):
""" Sets a scanner class that will be used for this 'type'.
""" |
if __debug__:
from .scanner import Scanner
assert isinstance(type, basestring)
assert issubclass(scanner, Scanner)
validate (type)
__types [type]['scanner'] = scanner |
<SYSTEM_TASK:>
Returns a scanner instance appropriate to 'type' and 'property_set'.
<END_TASK>
<USER_TASK:>
Description:
def get_scanner (type, prop_set):
""" Returns a scanner instance appropriate to 'type' and 'property_set'.
""" |
if __debug__:
from .property_set import PropertySet
assert isinstance(type, basestring)
assert isinstance(prop_set, PropertySet)
if registered (type):
scanner_type = __types [type]['scanner']
if scanner_type:
return scanner.get (scanner_type, prop_set.raw ())
pass
return None |
<SYSTEM_TASK:>
Returns type and all of its bases, in the order of their distance from type.
<END_TASK>
<USER_TASK:>
Description:
def all_bases (type):
""" Returns type and all of its bases, in the order of their distance from type.
""" |
assert isinstance(type, basestring)
result = []
while type:
result.append (type)
type = __types [type]['base']
return result |
<SYSTEM_TASK:>
Returns type and all classes that derive from it, in the order of their distance from type.
<END_TASK>
<USER_TASK:>
Description:
def all_derived (type):
""" Returns type and all classes that derive from it, in the order of their distance from type.
""" |
assert isinstance(type, basestring)
result = [type]
for d in __types [type]['derived']:
result.extend (all_derived (d))
return result |
<SYSTEM_TASK:>
Returns true if 'type' is 'base' or has 'base' as its direct or indirect base.
<END_TASK>
<USER_TASK:>
Description:
def is_derived (type, base):
""" Returns true if 'type' is 'base' or has 'base' as its direct or indirect base.
""" |
assert isinstance(type, basestring)
assert isinstance(base, basestring)
# TODO: this isn't very efficient, especially for bases close to type
if base in all_bases (type):
return True
else:
return False |
<SYSTEM_TASK:>
Same as is_derived. Should be removed.
<END_TASK>
<USER_TASK:>
Description:
def is_subtype (type, base):
""" Same as is_derived. Should be removed.
""" |
assert isinstance(type, basestring)
assert isinstance(base, basestring)
# TODO: remove this method
return is_derived (type, base) |
<SYSTEM_TASK:>
Returns suffix that should be used when generating target of 'type',
<END_TASK>
<USER_TASK:>
Description:
def generated_target_ps(is_suffix, type, prop_set):
""" Returns suffix that should be used when generating target of 'type',
with the specified properties. If not suffix were specified for
'type', returns suffix for base type, if any.
""" |
if __debug__:
from .property_set import PropertySet
assert isinstance(is_suffix, (int, bool))
assert isinstance(type, basestring)
assert isinstance(prop_set, PropertySet)
key = (is_suffix, type, prop_set)
v = __target_suffixes_cache.get(key, None)
if not v:
v = generated_target_ps_real(is_suffix, type, prop_set.raw())
__target_suffixes_cache [key] = v
return v |
<SYSTEM_TASK:>
Returns file type given it's name. If there are several dots in filename,
<END_TASK>
<USER_TASK:>
Description:
def type(filename):
""" Returns file type given it's name. If there are several dots in filename,
tries each suffix. E.g. for name of "file.so.1.2" suffixes "2", "1", and
"so" will be tried.
""" |
assert isinstance(filename, basestring)
while 1:
filename, suffix = os.path.splitext (filename)
if not suffix: return None
suffix = suffix[1:]
if suffix in __suffixes_to_types:
return __suffixes_to_types[suffix] |
<SYSTEM_TASK:>
Register the given type on the specified OSes, or on remaining OSes
<END_TASK>
<USER_TASK:>
Description:
def register_type (type, suffixes, base_type = None, os = []):
""" Register the given type on the specified OSes, or on remaining OSes
if os is not specified. This rule is injected into each of the type
modules for the sake of convenience.
""" |
assert isinstance(type, basestring)
assert is_iterable_typed(suffixes, basestring)
assert isinstance(base_type, basestring) or base_type is None
assert is_iterable_typed(os, basestring)
if registered (type):
return
if not os or os_name () in os:
register (type, suffixes, base_type) |
<SYSTEM_TASK:>
Pretty print a list to be readable.
<END_TASK>
<USER_TASK:>
Description:
def pretty_print_list(lst, name = 'features', repr_format=True):
""" Pretty print a list to be readable.
""" |
if not lst or len(lst) < 8:
if repr_format:
return lst.__repr__()
else:
return ', '.join(map(str, lst))
else:
topk = ', '.join(map(str, lst[:3]))
if repr_format:
lst_separator = "["
lst_end_separator = "]"
else:
lst_separator = ""
lst_end_separator = ""
return "{start}{topk}, ... {last}{end} (total {size} {name})".format(\
topk = topk, last = lst[-1], name = name, size = len(lst),
start = lst_separator, end = lst_end_separator) |
<SYSTEM_TASK:>
Convert a flatten layer from keras to coreml.
<END_TASK>
<USER_TASK:>
Description:
def convert_flatten(builder, layer, input_names, output_names, keras_layer):
"""Convert a flatten layer from keras to coreml.
Parameters
keras_layer: layer
----------
A keras layer object.
builder: NeuralNetworkBuilder
A neural network builder object.
""" |
input_name, output_name = (input_names[0], output_names[0])
# blob_order == 0 if the input blob needs not be rearranged
# blob_order == 1 if the input blob needs to be rearranged
blob_order = 0
# using keras_layer.input.shape have a "?" (Dimension[None] at the front),
# making a 3D tensor with unknown batch size 4D
if len(keras_layer.input.shape) == 4:
blob_order = 1
builder.add_flatten(name=layer, mode=blob_order, input_name=input_name, output_name=output_name) |
<SYSTEM_TASK:>
Get float property from the DMatrix.
<END_TASK>
<USER_TASK:>
Description:
def get_float_info(self, field):
"""Get float property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
""" |
length = ctypes.c_ulong()
ret = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGDMatrixGetFloatInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.float32) |
<SYSTEM_TASK:>
Get unsigned integer property from the DMatrix.
<END_TASK>
<USER_TASK:>
Description:
def get_uint_info(self, field):
"""Get unsigned integer property from the DMatrix.
Parameters
----------
field: str
The field name of the information
Returns
-------
info : array
a numpy array of float information of the data
""" |
length = ctypes.c_ulong()
ret = ctypes.POINTER(ctypes.c_uint)()
_check_call(_LIB.XGDMatrixGetUIntInfo(self.handle,
c_str(field),
ctypes.byref(length),
ctypes.byref(ret)))
return ctypes2numpy(ret, length.value, np.uint32) |
<SYSTEM_TASK:>
Save DMatrix to an XGBoost buffer.
<END_TASK>
<USER_TASK:>
Description:
def save_binary(self, fname, silent=True):
"""Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
""" |
_check_call(_LIB.XGDMatrixSaveBinary(self.handle,
c_str(fname),
int(silent))) |
<SYSTEM_TASK:>
Get the number of rows in the DMatrix.
<END_TASK>
<USER_TASK:>
Description:
def num_row(self):
"""Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
""" |
ret = ctypes.c_ulong()
_check_call(_LIB.XGDMatrixNumRow(self.handle,
ctypes.byref(ret)))
return ret.value |
<SYSTEM_TASK:>
Slice the DMatrix and return a new DMatrix that only contains `rindex`.
<END_TASK>
<USER_TASK:>
Description:
def slice(self, rindex):
"""Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
""" |
res = DMatrix(None, feature_names=self.feature_names)
res.handle = ctypes.c_void_p()
_check_call(_LIB.XGDMatrixSliceDMatrix(self.handle,
c_array(ctypes.c_int, rindex),
len(rindex),
ctypes.byref(res.handle)))
return res |
<SYSTEM_TASK:>
Update for one iteration, with objective function calculated internally.
<END_TASK>
<USER_TASK:>
Description:
def update(self, dtrain, iteration, fobj=None):
"""
Update for one iteration, with objective function calculated internally.
Parameters
----------
dtrain : DMatrix
Training data.
iteration : int
Current iteration number.
fobj : function
Customized objective function.
""" |
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
if fobj is None:
_check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle))
else:
pred = self.predict(dtrain)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess) |
<SYSTEM_TASK:>
Boost the booster for one iteration, with customized gradient statistics.
<END_TASK>
<USER_TASK:>
Description:
def boost(self, dtrain, grad, hess):
"""
Boost the booster for one iteration, with customized gradient statistics.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
""" |
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
self._validate_features(dtrain)
_check_call(_LIB.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))) |
<SYSTEM_TASK:>
Evaluate a set of data.
<END_TASK>
<USER_TASK:>
Description:
def eval_set(self, evals, iteration=0, feval=None):
# pylint: disable=invalid-name
"""Evaluate a set of data.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
iteration : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
result: str
Evaluation result string.
""" |
if feval is None:
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))
if not isinstance(d[1], STRING_TYPES):
raise TypeError('expected string, got {}'.format(type(d[1]).__name__))
self._validate_features(d[0])
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
msg = ctypes.c_char_p()
_check_call(_LIB.XGBoosterEvalOneIter(self.handle, iteration,
dmats, evnames, len(evals),
ctypes.byref(msg)))
return msg.value
else:
res = '[%d]' % iteration
for dmat, evname in evals:
name, val = feval(self.predict(dmat), dmat)
res += '\t%s-%s:%f' % (evname, name, val)
return res |
<SYSTEM_TASK:>
Save the model to a in memory buffer represetation
<END_TASK>
<USER_TASK:>
Description:
def save_raw(self):
"""
Save the model to a in memory buffer represetation
Returns
-------
a in memory buffer represetation of the model
""" |
length = ctypes.c_ulong()
cptr = ctypes.POINTER(ctypes.c_char)()
_check_call(_LIB.XGBoosterGetModelRaw(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
return ctypes2buffer(cptr, length.value) |
<SYSTEM_TASK:>
Dump model into a text file.
<END_TASK>
<USER_TASK:>
Description:
def dump_model(self, fout, fmap='', with_stats=False):
"""
Dump model into a text file.
Parameters
----------
foout : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool (optional)
Controls whether the split statistics are output.
""" |
if isinstance(fout, STRING_TYPES):
fout = open(fout, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats)
for i in range(len(ret)):
fout.write('booster[{}]:\n'.format(i))
fout.write(ret[i])
if need_close:
fout.close() |
<SYSTEM_TASK:>
Returns the dump the model as a list of strings.
<END_TASK>
<USER_TASK:>
Description:
def get_dump(self, fmap='', with_stats=False):
"""
Returns the dump the model as a list of strings.
""" |
length = ctypes.c_ulong()
sarr = ctypes.POINTER(ctypes.c_char_p)()
if self.feature_names is not None and fmap == '':
flen = int(len(self.feature_names))
fname = from_pystr_to_cstr(self.feature_names)
if self.feature_types is None:
# use quantitative as default
# {'q': quantitative, 'i': indicator}
ftype = from_pystr_to_cstr(['q'] * flen)
else:
ftype = from_pystr_to_cstr(self.feature_types)
_check_call(_LIB.XGBoosterDumpModelWithFeatures(self.handle,
flen,
fname,
ftype,
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
else:
if fmap != '' and not os.path.exists(fmap):
raise ValueError("No such file: {0}".format(fmap))
_check_call(_LIB.XGBoosterDumpModel(self.handle,
c_str(fmap),
int(with_stats),
ctypes.byref(length),
ctypes.byref(sarr)))
res = from_cstr_to_pystr(sarr, length)
return res |
<SYSTEM_TASK:>
Get feature importance of each feature.
<END_TASK>
<USER_TASK:>
Description:
def get_fscore(self, fmap=''):
"""Get feature importance of each feature.
Parameters
----------
fmap: str (optional)
The name of feature map file
""" |
trees = self.get_dump(fmap)
fmap = {}
for tree in trees:
for line in tree.split('\n'):
arr = line.split('[')
if len(arr) == 1:
continue
fid = arr[1].split(']')[0]
fid = fid.split('<')[0]
if fid not in fmap:
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap |
<SYSTEM_TASK:>
Matches all elements of 'list' agains the 'pattern'
<END_TASK>
<USER_TASK:>
Description:
def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
""" |
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result |
<SYSTEM_TASK:>
Replaces occurrences of a match string in a given
<END_TASK>
<USER_TASK:>
Description:
def replace(s, pattern, replacement):
"""Replaces occurrences of a match string in a given
string and returns the new string. The match string
can be a regex expression.
Args:
s (str): the string to modify
pattern (str): the search expression
replacement (str): the string to replace each match with
""" |
# the replacement string may contain invalid backreferences (like \1 or \g)
# which will cause python's regex to blow up. Since this should emulate
# the jam version exactly and the jam version didn't support
# backreferences, this version shouldn't either. re.sub
# allows replacement to be a callable; this is being used
# to simply return the replacement string and avoid the hassle
# of worrying about backreferences within the string.
def _replacement(matchobj):
return replacement
return re.sub(pattern, _replacement, s) |
<SYSTEM_TASK:>
Replaces occurrences of a match string in a given list of strings and returns
<END_TASK>
<USER_TASK:>
Description:
def replace_list(items, match, replacement):
"""Replaces occurrences of a match string in a given list of strings and returns
a list of new strings. The match string can be a regex expression.
Args:
items (list): the list of strings to modify.
match (str): the search expression.
replacement (str): the string to replace with.
""" |
return [replace(item, match, replacement) for item in items] |
<SYSTEM_TASK:>
Create a topic model from the given data set. A topic model assumes each
<END_TASK>
<USER_TASK:>
Description:
def create(dataset,
num_topics=10,
initial_topics=None,
alpha=None,
beta=.1,
num_iterations=10,
num_burnin=5,
associations=None,
verbose=False,
print_interval=10,
validation_set=None,
method='auto'):
"""
Create a topic model from the given data set. A topic model assumes each
document is a mixture of a set of topics, where for each topic some words
are more likely than others. One statistical approach to do this is called a
"topic model". This method learns a topic model for the given document
collection.
Parameters
----------
dataset : SArray of type dict or SFrame with a single column of type dict
A bag of words representation of a document corpus.
Each element is a dictionary representing a single document, where
the keys are words and the values are the number of times that word
occurs in that document.
num_topics : int, optional
The number of topics to learn.
initial_topics : SFrame, optional
An SFrame with a column of unique words representing the vocabulary
and a column of dense vectors representing
probability of that word given each topic. When provided,
these values are used to initialize the algorithm.
alpha : float, optional
Hyperparameter that controls the diversity of topics in a document.
Smaller values encourage fewer topics per document.
Provided value must be positive. Default value is 50/num_topics.
beta : float, optional
Hyperparameter that controls the diversity of words in a topic.
Smaller values encourage fewer words per topic. Provided value
must be positive.
num_iterations : int, optional
The number of iterations to perform.
num_burnin : int, optional
The number of iterations to perform when inferring the topics for
documents at prediction time.
verbose : bool, optional
When True, print most probable words for each topic while printing
progress.
print_interval : int, optional
The number of iterations to wait between progress reports.
associations : SFrame, optional
An SFrame with two columns named "word" and "topic" containing words
and the topic id that the word should be associated with. These words
are not considered during learning.
validation_set : SArray of type dict or SFrame with a single column
A bag of words representation of a document corpus, similar to the
format required for `dataset`. This will be used to monitor model
performance during training. Each document in the provided validation
set is randomly split: the first portion is used estimate which topic
each document belongs to, and the second portion is used to estimate
the model's performance at predicting the unseen words in the test data.
method : {'cgs', 'alias'}, optional
The algorithm used for learning the model.
- *cgs:* Collapsed Gibbs sampling
- *alias:* AliasLDA method.
Returns
-------
out : TopicModel
A fitted topic model. This can be used with
:py:func:`~TopicModel.get_topics()` and
:py:func:`~TopicModel.predict()`. While fitting is in progress, several
metrics are shown, including:
+------------------+---------------------------------------------------+
| Field | Description |
+==================+===================================================+
| Elapsed Time | The number of elapsed seconds. |
+------------------+---------------------------------------------------+
| Tokens/second | The number of unique words processed per second |
+------------------+---------------------------------------------------+
| Est. Perplexity | An estimate of the model's ability to model the |
| | training data. See the documentation on evaluate. |
+------------------+---------------------------------------------------+
See Also
--------
TopicModel, TopicModel.get_topics, TopicModel.predict,
turicreate.SArray.dict_trim_by_keys, TopicModel.evaluate
References
----------
- `Wikipedia - Latent Dirichlet allocation
<http://en.wikipedia.org/wiki/Latent_Dirichlet_allocation>`_
- Alias method: Li, A. et al. (2014) `Reducing the Sampling Complexity of
Topic Models. <http://www.sravi.org/pubs/fastlda-kdd2014.pdf>`_.
KDD 2014.
Examples
--------
The following example includes an SArray of documents, where
each element represents a document in "bag of words" representation
-- a dictionary with word keys and whose values are the number of times
that word occurred in the document:
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nytimes')
Once in this form, it is straightforward to learn a topic model.
>>> m = turicreate.topic_model.create(docs)
It is also easy to create a new topic model from an old one -- whether
it was created using Turi Create or another package.
>>> m2 = turicreate.topic_model.create(docs, initial_topics=m['topics'])
To manually fix several words to always be assigned to a topic, use
the `associations` argument. The following will ensure that topic 0
has the most probability for each of the provided words:
>>> from turicreate import SFrame
>>> associations = SFrame({'word':['hurricane', 'wind', 'storm'],
'topic': [0, 0, 0]})
>>> m = turicreate.topic_model.create(docs,
associations=associations)
More advanced usage allows you to control aspects of the model and the
learning method.
>>> import turicreate as tc
>>> m = tc.topic_model.create(docs,
num_topics=20, # number of topics
num_iterations=10, # algorithm parameters
alpha=.01, beta=.1) # hyperparameters
To evaluate the model's ability to generalize, we can create a train/test
split where a portion of the words in each document are held out from
training.
>>> train, test = tc.text_analytics.random_split(.8)
>>> m = tc.topic_model.create(train)
>>> results = m.evaluate(test)
>>> print results['perplexity']
""" |
dataset = _check_input(dataset)
_check_categorical_option_type("method", method, ['auto', 'cgs', 'alias'])
if method == 'cgs' or method == 'auto':
model_name = 'cgs_topic_model'
else:
model_name = 'alias_topic_model'
# If associations are provided, check they are in the proper format
if associations is None:
associations = _turicreate.SFrame({'word': [], 'topic': []})
if isinstance(associations, _turicreate.SFrame) and \
associations.num_rows() > 0:
assert set(associations.column_names()) == set(['word', 'topic']), \
"Provided associations must be an SFrame containing a word column\
and a topic column."
assert associations['word'].dtype == str, \
"Words must be strings."
assert associations['topic'].dtype == int, \
"Topic ids must be of int type."
if alpha is None:
alpha = float(50) / num_topics
if validation_set is not None:
_check_input(validation_set) # Must be a single column
if isinstance(validation_set, _turicreate.SFrame):
column_name = validation_set.column_names()[0]
validation_set = validation_set[column_name]
(validation_train, validation_test) = _random_split(validation_set)
else:
validation_train = _SArray()
validation_test = _SArray()
opts = {'model_name': model_name,
'data': dataset,
'num_topics': num_topics,
'num_iterations': num_iterations,
'print_interval': print_interval,
'alpha': alpha,
'beta': beta,
'num_burnin': num_burnin,
'associations': associations}
# Initialize the model with basic parameters
response = _turicreate.extensions._text.topicmodel_init(opts)
m = TopicModel(response['model'])
# If initial_topics provided, load it into the model
if isinstance(initial_topics, _turicreate.SFrame):
assert set(['vocabulary', 'topic_probabilities']) == \
set(initial_topics.column_names()), \
"The provided initial_topics does not have the proper format, \
e.g. wrong column names."
observed_topics = initial_topics['topic_probabilities'].apply(lambda x: len(x))
assert all(observed_topics == num_topics), \
"Provided num_topics value does not match the number of provided initial_topics."
# Rough estimate of total number of words
weight = len(dataset) * 1000
opts = {'model': m.__proxy__,
'topics': initial_topics['topic_probabilities'],
'vocabulary': initial_topics['vocabulary'],
'weight': weight}
response = _turicreate.extensions._text.topicmodel_set_topics(opts)
m = TopicModel(response['model'])
# Train the model on the given data set and retrieve predictions
opts = {'model': m.__proxy__,
'data': dataset,
'verbose': verbose,
'validation_train': validation_train,
'validation_test': validation_test}
response = _turicreate.extensions._text.topicmodel_train(opts)
m = TopicModel(response['model'])
return m |
<SYSTEM_TASK:>
Compute the perplexity of a set of test documents given a set
<END_TASK>
<USER_TASK:>
Description:
def perplexity(test_data, predictions, topics, vocabulary):
"""
Compute the perplexity of a set of test documents given a set
of predicted topics.
Let theta be the matrix of document-topic probabilities, where
theta_ik = p(topic k | document i). Let Phi be the matrix of term-topic
probabilities, where phi_jk = p(word j | topic k).
Then for each word in each document, we compute for a given word w
and document d
.. math::
p(word | \theta[doc_id,:], \phi[word_id,:]) =
\sum_k \theta[doc_id, k] * \phi[word_id, k]
We compute loglikelihood to be:
.. math::
l(D) = \sum_{i \in D} \sum_{j in D_i} count_{i,j} * log Pr(word_{i,j} | \theta, \phi)
and perplexity to be
.. math::
\exp \{ - l(D) / \sum_i \sum_j count_{i,j} \}
Parameters
----------
test_data : SArray of type dict or SFrame with a single column of type dict
Documents in bag-of-words format.
predictions : SArray
An SArray of vector type, where each vector contains estimates of the
probability that this document belongs to each of the topics.
This must have the same size as test_data; otherwise an exception
occurs. This can be the output of
:py:func:`~turicreate.topic_model.TopicModel.predict`, for example.
topics : SFrame
An SFrame containing two columns: 'vocabulary' and 'topic_probabilities'.
The value returned by m['topics'] is a valid input for this argument,
where m is a trained :py:class:`~turicreate.topic_model.TopicModel`.
vocabulary : SArray
An SArray of words to use. All words in test_data that are not in this
vocabulary will be ignored.
Notes
-----
For more details, see equations 13-16 of [PattersonTeh2013].
References
----------
.. [PERP] `Wikipedia - perplexity <http://en.wikipedia.org/wiki/Perplexity>`_
.. [PattersonTeh2013] Patterson, Teh. `"Stochastic Gradient Riemannian
Langevin Dynamics on the Probability Simplex"
<http://www.stats.ox.ac.uk/~teh/research/compstats/PatTeh2013a.pdf>`_
NIPS, 2013.
Examples
--------
>>> from turicreate import topic_model
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = topic_model.create(train_data)
>>> pred = m.predict(train_data)
>>> topics = m['topics']
>>> p = topic_model.perplexity(test_data, pred,
topics['topic_probabilities'],
topics['vocabulary'])
>>> p
1720.7 # lower values are better
""" |
test_data = _check_input(test_data)
assert isinstance(predictions, _SArray), \
"Predictions must be an SArray of vector type."
assert predictions.dtype == _array.array, \
"Predictions must be probabilities. Try using m.predict() with " + \
"output_type='probability'."
opts = {'test_data': test_data,
'predictions': predictions,
'topics': topics,
'vocabulary': vocabulary}
response = _turicreate.extensions._text.topicmodel_get_perplexity(opts)
return response['perplexity'] |
<SYSTEM_TASK:>
Get the words associated with a given topic. The score column is the
<END_TASK>
<USER_TASK:>
Description:
def get_topics(self, topic_ids=None, num_words=5, cdf_cutoff=1.0,
output_type='topic_probabilities'):
"""
Get the words associated with a given topic. The score column is the
probability of choosing that word given that you have chosen a
particular topic.
Parameters
----------
topic_ids : list of int, optional
The topics to retrieve words. Topic ids are zero-based.
Throws an error if greater than or equal to m['num_topics'], or
if the requested topic name is not present.
num_words : int, optional
The number of words to show.
cdf_cutoff : float, optional
Allows one to only show the most probable words whose cumulative
probability is below this cutoff. For example if there exist
three words where
.. math::
p(word_1 | topic_k) = .1
p(word_2 | topic_k) = .2
p(word_3 | topic_k) = .05
then setting :math:`cdf_{cutoff}=.3` would return only
:math:`word_1` and :math:`word_2` since
:math:`p(word_1 | topic_k) + p(word_2 | topic_k) <= cdf_{cutoff}`
output_type : {'topic_probabilities' | 'topic_words'}, optional
Determine the type of desired output. See below.
Returns
-------
out : SFrame
If output_type is 'topic_probabilities', then the returned value is
an SFrame with a column of words ranked by a column of scores for
each topic. Otherwise, the returned value is a SArray where
each element is a list of the most probable words for each topic.
Examples
--------
Get the highest ranked words for all topics.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs,
num_iterations=50)
>>> m.get_topics()
+-------+----------+-----------------+
| topic | word | score |
+-------+----------+-----------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 1 | function | 0.0482834508265 |
| 1 | input | 0.0456270024091 |
| 1 | point | 0.0302662839454 |
| 1 | result | 0.0239474934631 |
| 1 | problem | 0.0231750116011 |
| ... | ... | ... |
+-------+----------+-----------------+
Get the highest ranked words for topics 0 and 1 and show 15 words per
topic.
>>> m.get_topics([0, 1], num_words=15)
+-------+----------+------------------+
| topic | word | score |
+-------+----------+------------------+
| 0 | cell | 0.028974400831 |
| 0 | input | 0.0259470208503 |
| 0 | image | 0.0215721599763 |
| 0 | visual | 0.0173635081992 |
| 0 | object | 0.0172447874156 |
| 0 | response | 0.0139740298286 |
| 0 | layer | 0.0122585145062 |
| 0 | features | 0.0115343177265 |
| 0 | feature | 0.0103530459301 |
| 0 | spatial | 0.00823387994361 |
| ... | ... | ... |
+-------+----------+------------------+
If one wants to instead just get the top words per topic, one may
change the format of the output as follows.
>>> topics = m.get_topics(output_type='topic_words')
dtype: list
Rows: 10
[['cell', 'image', 'input', 'object', 'visual'],
['algorithm', 'data', 'learning', 'method', 'set'],
['function', 'input', 'point', 'problem', 'result'],
['model', 'output', 'pattern', 'set', 'unit'],
['action', 'learning', 'net', 'problem', 'system'],
['error', 'function', 'network', 'parameter', 'weight'],
['information', 'level', 'neural', 'threshold', 'weight'],
['control', 'field', 'model', 'network', 'neuron'],
['hidden', 'layer', 'system', 'training', 'vector'],
['component', 'distribution', 'local', 'model', 'optimal']]
""" |
_check_categorical_option_type('output_type', output_type,
['topic_probabilities', 'topic_words'])
if topic_ids is None:
topic_ids = list(range(self._get('num_topics')))
assert isinstance(topic_ids, list), \
"The provided topic_ids is not a list."
if any([type(x) == str for x in topic_ids]):
raise ValueError("Only integer topic_ids can be used at this point in time.")
if not all([x >= 0 and x < self.num_topics for x in topic_ids]):
raise ValueError("Topic id values must be non-negative and less than the " + \
"number of topics used to fit the model.")
opts = {'model': self.__proxy__,
'topic_ids': topic_ids,
'num_words': num_words,
'cdf_cutoff': cdf_cutoff}
response = _turicreate.extensions._text.topicmodel_get_topic(opts)
ret = response['top_words']
def sort_wordlist_by_prob(z):
words = sorted(z.items(), key=_operator.itemgetter(1), reverse=True)
return [word for (word, prob) in words]
if output_type != 'topic_probabilities':
ret = ret.groupby('topic',
{'word': _turicreate.aggregate.CONCAT('word', 'score')})
words = ret.sort('topic')['word'].apply(sort_wordlist_by_prob)
ret = _SFrame({'words': words})
return ret |
<SYSTEM_TASK:>
Use the model to predict topics for each document. The provided
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset, output_type='assignment', num_burnin=None):
"""
Use the model to predict topics for each document. The provided
`dataset` should be an SArray object where each element is a dict
representing a single document in bag-of-words format, where keys
are words and values are their corresponding counts. If `dataset` is
an SFrame, then it must contain a single column of dict type.
The current implementation will make inferences about each document
given its estimates of the topics learned when creating the model.
This is done via Gibbs sampling.
Parameters
----------
dataset : SArray, SFrame of type dict
A set of documents to use for making predictions.
output_type : str, optional
The type of output desired. This can either be
- assignment: the returned values are integers in [0, num_topics)
- probability: each returned prediction is a vector with length
num_topics, where element k represents the probability that
document belongs to topic k.
num_burnin : int, optional
The number of iterations of Gibbs sampling to perform when
inferring the topics for documents at prediction time.
If provided this will override the burnin value set during
training.
Returns
-------
out : SArray
See Also
--------
evaluate
Examples
--------
Make predictions about which topic each document belongs to.
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> m = turicreate.topic_model.create(docs)
>>> pred = m.predict(docs)
If one is interested in the probability of each topic
>>> pred = m.predict(docs, output_type='probability')
Notes
-----
For each unique word w in a document d, we sample an assignment to
topic k with probability proportional to
.. math::
p(z_{dw} = k) \propto (n_{d,k} + \\alpha) * \Phi_{w,k}
where
- :math:`W` is the size of the vocabulary,
- :math:`n_{d,k}` is the number of other times we have assigned a word in
document to d to topic :math:`k`,
- :math:`\Phi_{w,k}` is the probability under the model of choosing word
:math:`w` given the word is of topic :math:`k`. This is the matrix
returned by calling `m['topics']`.
This represents a collapsed Gibbs sampler for the document assignments
while we keep the topics learned during training fixed.
This process is done in parallel across all documents, five times per
document.
""" |
dataset = _check_input(dataset)
if num_burnin is None:
num_burnin = self.num_burnin
opts = {'model': self.__proxy__,
'data': dataset,
'num_burnin': num_burnin}
response = _turicreate.extensions._text.topicmodel_predict(opts)
preds = response['predictions']
# Get most likely topic if probabilities are not requested
if output_type not in ['probability', 'probabilities', 'prob']:
# equivalent to numpy.argmax(x)
preds = preds.apply(lambda x: max(_izip(x, _xrange(len(x))))[1])
return preds |
<SYSTEM_TASK:>
Estimate the model's ability to predict new data. Imagine you have a
<END_TASK>
<USER_TASK:>
Description:
def evaluate(self, train_data, test_data=None, metric='perplexity'):
"""
Estimate the model's ability to predict new data. Imagine you have a
corpus of books. One common approach to evaluating topic models is to
train on the first half of all of the books and see how well the model
predicts the second half of each book.
This method returns a metric called perplexity, which is related to the
likelihood of observing these words under the given model. See
:py:func:`~turicreate.topic_model.perplexity` for more details.
The provided `train_data` and `test_data` must have the same length,
i.e., both data sets must have the same number of documents; the model
will use train_data to estimate which topic the document belongs to, and
this is used to estimate the model's performance at predicting the
unseen words in the test data.
See :py:func:`~turicreate.topic_model.TopicModel.predict` for details
on how these predictions are made, and see
:py:func:`~turicreate.text_analytics.random_split` for a helper function
that can be used for making train/test splits.
Parameters
----------
train_data : SArray or SFrame
A set of documents to predict topics for.
test_data : SArray or SFrame, optional
A set of documents to evaluate performance on.
By default this will set to be the same as train_data.
metric : str
The chosen metric to use for evaluating the topic model.
Currently only 'perplexity' is supported.
Returns
-------
out : dict
The set of estimated evaluation metrics.
See Also
--------
predict, turicreate.toolkits.text_analytics.random_split
Examples
--------
>>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text')
>>> train_data, test_data = turicreate.text_analytics.random_split(docs)
>>> m = turicreate.topic_model.create(train_data)
>>> m.evaluate(train_data, test_data)
{'perplexity': 2467.530370396021}
""" |
train_data = _check_input(train_data)
if test_data is None:
test_data = train_data
else:
test_data = _check_input(test_data)
predictions = self.predict(train_data, output_type='probability')
topics = self.topics
ret = {}
ret['perplexity'] = perplexity(test_data,
predictions,
topics['topic_probabilities'],
topics['vocabulary'])
return ret |
<SYSTEM_TASK:>
Performs some sanity checks on the SFrame provided as input to
<END_TASK>
<USER_TASK:>
Description:
def _raise_error_if_not_drawing_classifier_input_sframe(
dataset, feature, target):
"""
Performs some sanity checks on the SFrame provided as input to
`turicreate.drawing_classifier.create` and raises a ToolkitError
if something in the dataset is missing or wrong.
""" |
from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe
_raise_error_if_not_sframe(dataset)
if feature not in dataset.column_names():
raise _ToolkitError("Feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):
raise _ToolkitError("Feature column must contain images"
+ " or stroke-based drawings encoded as lists of strokes"
+ " where each stroke is a list of points and"
+ " each point is stored as a dictionary")
if dataset[target].dtype != int and dataset[target].dtype != str:
raise _ToolkitError("Target column contains " + str(dataset[target].dtype)
+ " but it must contain strings or integers to represent"
+ " labels for drawings.")
if len(dataset) == 0:
raise _ToolkitError("Input Dataset is empty!") |
<SYSTEM_TASK:>
Predict with probabilities. The core prediction part that both
<END_TASK>
<USER_TASK:>
Description:
def _predict_with_probabilities(self, input_dataset, batch_size=None,
verbose=True):
"""
Predict with probabilities. The core prediction part that both
`evaluate` and `predict` share.
Returns an SFrame with two columns, self.target, and "probabilities".
The column with column name, self.target, contains the predictions made
by the model for the provided dataset.
The "probabilities" column contains the probabilities for each class
that the model predicted for the data provided to the function.
""" |
from .._mxnet import _mxnet_utils
import mxnet as _mx
from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter
is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)
dataset = _extensions._drawing_classifier_prepare_data(
input_dataset, self.feature) if is_stroke_input else input_dataset
batch_size = self.batch_size if batch_size is None else batch_size
loader = _SFrameClassifierIter(dataset, batch_size,
class_to_index=self._class_to_index,
feature_column=self.feature,
target_column=self.target,
load_labels=False,
shuffle=False,
iterations=1)
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
index = 0
last_time = 0
done = False
from turicreate import SArrayBuilder
from array import array
classes = self.classes
all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))
all_probabilities_builder = SArrayBuilder(dtype=array)
for batch in loader:
if batch.pad is not None:
size = batch_size - batch.pad
batch_data = _mx.nd.slice_axis(batch.data[0],
axis=0, begin=0, end=size)
else:
batch_data = batch.data[0]
size = batch_size
num_devices = min(batch_data.shape[0], len(ctx))
split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)
for data in split_data:
z = self._model(data).asnumpy()
predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))
split_length = z.shape[0]
all_predicted_builder.append_multiple(predicted)
all_probabilities_builder.append_multiple(z.tolist())
index += split_length
if index == dataset_size - 1:
done = True
cur_time = _time.time()
# Do not print progress if only a few samples are predicted
if verbose and (dataset_size >= 5
and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n = index + 1,
max_n = dataset_size,
width = len(str(dataset_size))))
last_time = cur_time
return (_tc.SFrame({self.target: all_predicted_builder.close(),
'probability': all_probabilities_builder.close()})) |
<SYSTEM_TASK:>
Predict on an SFrame or SArray of drawings, or on a single drawing.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, data, output_type='class', batch_size=None, verbose=True):
"""
Predict on an SFrame or SArray of drawings, or on a single drawing.
Parameters
----------
data : SFrame | SArray | tc.Image | list
The drawing(s) on which to perform drawing classification.
If dataset is an SFrame, it must have a column with the same name
as the feature column during training. Additional columns are
ignored.
If the data is a single drawing, it can be either of type tc.Image,
in which case it is a bitmap-based drawing input,
or of type list, in which case it is a stroke-based drawing input.
output_type : {'probability', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. Label ordering is dictated by the ``classes``
member variable.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
verbose : bool, optional
If True, prints prediction progress.
Returns
-------
out : SArray
An SArray with model predictions. Each element corresponds to
a drawing and contains a single value corresponding to the
predicted label. Each prediction will have type integer or string
depending on the type of the classes the model was trained on.
If `data` is a single drawing, the return value will be a single
prediction.
See Also
--------
evaluate
Examples
--------
.. sourcecode:: python
# Make predictions
>>> pred = model.predict(data)
# Print predictions, for a better overview
>>> print(pred)
dtype: int
Rows: 10
[3, 4, 3, 3, 4, 5, 8, 8, 8, 4]
""" |
_tkutl._check_categorical_option_type("output_type", output_type,
["probability", "class", "probability_vector"])
if isinstance(data, _tc.SArray):
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: data
}),
batch_size,
verbose
)
elif isinstance(data, _tc.SFrame):
predicted = self._predict_with_probabilities(data, batch_size, verbose)
else:
# single input
predicted = self._predict_with_probabilities(
_tc.SFrame({
self.feature: [data]
}),
batch_size,
verbose
)
if output_type == "class":
return predicted[self.target]
elif output_type == "probability":
_class_to_index = self._class_to_index
target = self.target
return predicted.apply(
lambda row: row["probability"][_class_to_index[row[target]]])
else:
assert (output_type == "probability_vector")
return predicted["probability"] |
<SYSTEM_TASK:>
Return an SFrame containing a bag of words representation of each column.
<END_TASK>
<USER_TASK:>
Description:
def _BOW_FEATURE_EXTRACTOR(sf, target=None):
"""
Return an SFrame containing a bag of words representation of each column.
""" |
if isinstance(sf, dict):
out = _tc.SArray([sf]).unpack('')
elif isinstance(sf, _tc.SFrame):
out = sf.__copy__()
else:
raise ValueError("Unrecognized input to feature extractor.")
for f in _get_str_columns(out):
if target != f:
out[f] = _tc.text_analytics.count_words(out[f])
return out |
<SYSTEM_TASK:>
Returns a list of names of columns that are string type.
<END_TASK>
<USER_TASK:>
Description:
def _get_str_columns(sf):
"""
Returns a list of names of columns that are string type.
""" |
return [name for name in sf.column_names() if sf[name].dtype == str] |
<SYSTEM_TASK:>
Return predictions for ``dataset``, using the trained model.
<END_TASK>
<USER_TASK:>
Description:
def predict(self, dataset, output_type='class'):
"""
Return predictions for ``dataset``, using the trained model.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
Returns
-------
out : SArray
An SArray with model predictions.
See Also
----------
create, evaluate, classify
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> m.predict(dataset)
""" |
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.predict(f(dataset, target), output_type=output_type) |
<SYSTEM_TASK:>
Return a classification, for each example in the ``dataset``, using the
<END_TASK>
<USER_TASK:>
Description:
def classify(self, dataset):
"""
Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as both class
labels as well as probabilities that the predicted value is the
associated label.
Parameters
----------
dataset : SFrame
dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities.
See Also
----------
create, evaluate, predict
Examples
--------
>>> import turicreate as tc
>>> dataset = tc.SFrame({'rating': [1, 5], 'text': ['hate it', 'love it']})
>>> m = tc.text_classifier.create(dataset, 'rating', features=['text'])
>>> output = m.classify(dataset)
""" |
m = self.__proxy__['classifier']
target = self.__proxy__['target']
f = _BOW_FEATURE_EXTRACTOR
return m.classify(f(dataset, target)) |
<SYSTEM_TASK:>
Takes an SVM regression model produces a starting spec using the parts.
<END_TASK>
<USER_TASK:>
Description:
def _generate_base_svm_regression_spec(model):
"""
Takes an SVM regression model produces a starting spec using the parts.
that are shared between all SVMs.
""" |
if not(_HAS_SKLEARN):
raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.')
spec = _Model_pb2.Model()
spec.specificationVersion = SPECIFICATION_VERSION
svm = spec.supportVectorRegressor
_set_kernel(model, svm)
svm.rho = -model.intercept_[0]
for i in range(len(model._dual_coef_)):
for cur_alpha in model._dual_coef_[i]:
svm.coefficients.alpha.append(cur_alpha)
for cur_src_vector in model.support_vectors_:
cur_dest_vector = svm.denseSupportVectors.vectors.add()
for i in cur_src_vector:
cur_dest_vector.values.append(i)
return spec |
<SYSTEM_TASK:>
Verify that the given extension handle is valid.
<END_TASK>
<USER_TASK:>
Description:
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid.""" |
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name)) |
<SYSTEM_TASK:>
Sets class-level attributes for all enum fields defined in this message.
<END_TASK>
<USER_TASK:>
Description:
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
""" |
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number) |
<SYSTEM_TASK:>
Returns a function which returns a default value for a field.
<END_TASK>
<USER_TASK:>
Description:
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
""" |
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault |
<SYSTEM_TASK:>
Re-raise the currently-handled TypeError with the field name added.
<END_TASK>
<USER_TASK:>
Description:
def _ReraiseTypeErrorWithFieldName(message_name, field_name):
"""Re-raise the currently-handled TypeError with the field name added.""" |
exc = sys.exc_info()[1]
if len(exc.args) == 1 and type(exc) is TypeError:
# simple TypeError; add field name to exception message
exc = TypeError('%s for field %s.%s' % (str(exc), message_name, field_name))
# re-raise possibly-amended exception with original traceback:
six.reraise(type(exc), exc, sys.exc_info()[2]) |
<SYSTEM_TASK:>
Returns a field descriptor by field name.
<END_TASK>
<USER_TASK:>
Description:
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
""" |
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message %s has no "%s" field.' %
(message_descriptor.name, field_name)) |
<SYSTEM_TASK:>
Adds a public property for a nonrepeated, scalar protocol message field.
<END_TASK>
<USER_TASK:>
Description:
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
""" |
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
# pylint: disable=protected-access
# Testing the value for truthiness captures all of the proto3 defaults
# (0, 0.0, enum 0, and False).
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc)) |
<SYSTEM_TASK:>
Unpacks Any message and returns the unpacked message.
<END_TASK>
<USER_TASK:>
Description:
def _InternalUnpackAny(msg):
"""Unpacks Any message and returns the unpacked message.
This internal method is different from public Any Unpack method which takes
the target message as argument. _InternalUnpackAny method does not have
target message type and need to find the message type in descriptor pool.
Args:
msg: An Any message to be unpacked.
Returns:
The unpacked message.
""" |
# TODO(amauryfa): Don't use the factory of generated messages.
# To make Any work with custom factories, use the message factory of the
# parent message.
# pylint: disable=g-import-not-at-top
from google.protobuf import symbol_database
factory = symbol_database.Default()
type_url = msg.type_url
if not type_url:
return None
# TODO(haberman): For now we just strip the hostname. Better logic will be
# required.
type_name = type_url.split('/')[-1]
descriptor = factory.pool.FindMessageTypeByName(type_name)
if descriptor is None:
return None
message_class = factory.GetPrototype(descriptor)
message = message_class()
message.ParseFromString(msg.value)
return message |
<SYSTEM_TASK:>
Returns the number of bytes needed to serialize a non-repeated element.
<END_TASK>
<USER_TASK:>
Description:
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
""" |
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type) |
<SYSTEM_TASK:>
Adds the IsInitialized and FindInitializationError methods to the
<END_TASK>
<USER_TASK:>
Description:
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class.""" |
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
if (field.message_type.has_options and
field.message_type.GetOptions().map_entry):
continue
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if _IsMapField(field):
if _IsMessageMapField(field):
for key in value:
element = value[key]
prefix = "%s[%s]." % (name, key)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
# ScalarMaps can't have any initialization errors.
pass
elif field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [prefix + error for error in sub_errors]
return errors
cls.FindInitializationErrors = FindInitializationErrors |
<SYSTEM_TASK:>
Adds implementations of all Message methods to cls.
<END_TASK>
<USER_TASK:>
Description:
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls.""" |
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddReprMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
_AddReduceMethod(cls)
# Adds methods which do not depend on cls.
cls.Clear = _Clear
cls.DiscardUnknownFields = _DiscardUnknownFields
cls._SetListener = _SetListener |
<SYSTEM_TASK:>
Adds implementation of private helper methods to cls.
<END_TASK>
<USER_TASK:>
Description:
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls.""" |
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.