Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
BaseFigure._get_child_props | (self, child) |
Return the properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
|
Return the properties dict for a child trace or child layout | def _get_child_props(self, child):
"""
Return the properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
"""
# Try to find index of child as a trace
# -------------------------------------
if isinstance(child, BaseTraceType):
# ### Child is a trace ###
trace_index = child._trace_ind
return self._data[trace_index]
# Child is the layout
# -------------------
elif child is self.layout:
return self._layout
# Unknown child
# -------------
else:
raise ValueError("Unrecognized child: %s" % child) | [
"def",
"_get_child_props",
"(",
"self",
",",
"child",
")",
":",
"# Try to find index of child as a trace",
"# -------------------------------------",
"if",
"isinstance",
"(",
"child",
",",
"BaseTraceType",
")",
":",
"# ### Child is a trace ###",
"trace_index",
"=",
"child",
".",
"_trace_ind",
"return",
"self",
".",
"_data",
"[",
"trace_index",
"]",
"# Child is the layout",
"# -------------------",
"elif",
"child",
"is",
"self",
".",
"layout",
":",
"return",
"self",
".",
"_layout",
"# Unknown child",
"# -------------",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized child: %s\"",
"%",
"child",
")"
] | [
1907,
4
] | [
1937,
62
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._get_child_prop_defaults | (self, child) |
Return the default properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
|
Return the default properties dict for a child trace or child layout | def _get_child_prop_defaults(self, child):
"""
Return the default properties dict for a child trace or child layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
dict
"""
# Child is a trace
# ----------------
if isinstance(child, BaseTraceType):
trace_index = child._trace_ind
return self._data_defaults[trace_index]
# Child is the layout
# -------------------
elif child is self.layout:
return self._layout_defaults
# Unknown child
# -------------
else:
raise ValueError("Unrecognized child: %s" % child) | [
"def",
"_get_child_prop_defaults",
"(",
"self",
",",
"child",
")",
":",
"# Child is a trace",
"# ----------------",
"if",
"isinstance",
"(",
"child",
",",
"BaseTraceType",
")",
":",
"trace_index",
"=",
"child",
".",
"_trace_ind",
"return",
"self",
".",
"_data_defaults",
"[",
"trace_index",
"]",
"# Child is the layout",
"# -------------------",
"elif",
"child",
"is",
"self",
".",
"layout",
":",
"return",
"self",
".",
"_layout_defaults",
"# Unknown child",
"# -------------",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized child: %s\"",
"%",
"child",
")"
] | [
1939,
4
] | [
1968,
62
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._init_child_props | (self, child) |
Initialize the properites dict for a child trace or layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
None
|
Initialize the properites dict for a child trace or layout | def _init_child_props(self, child):
"""
Initialize the properites dict for a child trace or layout
Note: this method must match the name/signature of one on
BasePlotlyType
Parameters
----------
child : BaseTraceType | BaseLayoutType
Returns
-------
None
"""
# layout and traces dict are initialize when figure is constructed
# and when new traces are added to the figure
pass | [
"def",
"_init_child_props",
"(",
"self",
",",
"child",
")",
":",
"# layout and traces dict are initialize when figure is constructed",
"# and when new traces are added to the figure",
"pass"
] | [
1970,
4
] | [
1987,
12
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.layout | (self) |
The `layout` property of the figure
Returns
-------
plotly.graph_objs.Layout
|
The `layout` property of the figure | def layout(self):
"""
The `layout` property of the figure
Returns
-------
plotly.graph_objs.Layout
"""
return self["layout"] | [
"def",
"layout",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"layout\"",
"]"
] | [
2012,
4
] | [
2020,
29
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.plotly_relayout | (self, relayout_data, **kwargs) |
Perform a Plotly relayout operation on the figure's layout
Parameters
----------
relayout_data : dict
Dict of layout updates
dict keys are strings that specify the properties to be updated.
Nested properties are expressed by joining successive keys on
'.' characters (e.g. 'xaxis.range')
dict values are the values to use to update the layout.
Returns
-------
None
|
Perform a Plotly relayout operation on the figure's layout | def plotly_relayout(self, relayout_data, **kwargs):
"""
Perform a Plotly relayout operation on the figure's layout
Parameters
----------
relayout_data : dict
Dict of layout updates
dict keys are strings that specify the properties to be updated.
Nested properties are expressed by joining successive keys on
'.' characters (e.g. 'xaxis.range')
dict values are the values to use to update the layout.
Returns
-------
None
"""
# Handle source_view_id
# ---------------------
# If not None, the source_view_id is the UID of the frontend
# Plotly.js view that initially triggered this relayout operation
# (e.g. the user clicked on the toolbar to change the drag mode
# from zoom to pan). We pass this UID along so that the frontend
# views can determine whether they need to apply the relayout
# operation on themselves.
if "source_view_id" in kwargs:
msg_kwargs = {"source_view_id": kwargs["source_view_id"]}
else:
msg_kwargs = {}
# Perform relayout operation on layout dict
# -----------------------------------------
relayout_changes = self._perform_plotly_relayout(relayout_data)
if relayout_changes:
# The relayout operation resulted in a change to some layout
# properties, so we dispatch change callbacks and send the
# relayout message to the frontend (if any)
self._send_relayout_msg(relayout_changes, **msg_kwargs)
self._dispatch_layout_change_callbacks(relayout_changes) | [
"def",
"plotly_relayout",
"(",
"self",
",",
"relayout_data",
",",
"*",
"*",
"kwargs",
")",
":",
"# Handle source_view_id",
"# ---------------------",
"# If not None, the source_view_id is the UID of the frontend",
"# Plotly.js view that initially triggered this relayout operation",
"# (e.g. the user clicked on the toolbar to change the drag mode",
"# from zoom to pan). We pass this UID along so that the frontend",
"# views can determine whether they need to apply the relayout",
"# operation on themselves.",
"if",
"\"source_view_id\"",
"in",
"kwargs",
":",
"msg_kwargs",
"=",
"{",
"\"source_view_id\"",
":",
"kwargs",
"[",
"\"source_view_id\"",
"]",
"}",
"else",
":",
"msg_kwargs",
"=",
"{",
"}",
"# Perform relayout operation on layout dict",
"# -----------------------------------------",
"relayout_changes",
"=",
"self",
".",
"_perform_plotly_relayout",
"(",
"relayout_data",
")",
"if",
"relayout_changes",
":",
"# The relayout operation resulted in a change to some layout",
"# properties, so we dispatch change callbacks and send the",
"# relayout message to the frontend (if any)",
"self",
".",
"_send_relayout_msg",
"(",
"relayout_changes",
",",
"*",
"*",
"msg_kwargs",
")",
"self",
".",
"_dispatch_layout_change_callbacks",
"(",
"relayout_changes",
")"
] | [
2051,
4
] | [
2093,
68
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._perform_plotly_relayout | (self, relayout_data) |
Perform a relayout operation on the figure's layout data and return
the changes that were applied
Parameters
----------
relayout_data : dict[str, any]
See the docstring for plotly_relayout
Returns
-------
relayout_changes: dict[str, any]
Subset of relayout_data including only the keys / values that
resulted in a change to the figure's layout data
|
Perform a relayout operation on the figure's layout data and return
the changes that were applied | def _perform_plotly_relayout(self, relayout_data):
"""
Perform a relayout operation on the figure's layout data and return
the changes that were applied
Parameters
----------
relayout_data : dict[str, any]
See the docstring for plotly_relayout
Returns
-------
relayout_changes: dict[str, any]
Subset of relayout_data including only the keys / values that
resulted in a change to the figure's layout data
"""
# Initialize relayout changes
# ---------------------------
# This will be a subset of the relayout_data including only the
# keys / values that are changed in the figure's layout data
relayout_changes = {}
# Process each key
# ----------------
for key_path_str, v in relayout_data.items():
if not BaseFigure._is_key_path_compatible(key_path_str, self.layout):
raise ValueError(
"""
Invalid property path '{key_path_str}' for layout
""".format(
key_path_str=key_path_str
)
)
# Apply set operation on the layout dict
val_changed = BaseFigure._set_in(self._layout, key_path_str, v)
if val_changed:
relayout_changes[key_path_str] = v
return relayout_changes | [
"def",
"_perform_plotly_relayout",
"(",
"self",
",",
"relayout_data",
")",
":",
"# Initialize relayout changes",
"# ---------------------------",
"# This will be a subset of the relayout_data including only the",
"# keys / values that are changed in the figure's layout data",
"relayout_changes",
"=",
"{",
"}",
"# Process each key",
"# ----------------",
"for",
"key_path_str",
",",
"v",
"in",
"relayout_data",
".",
"items",
"(",
")",
":",
"if",
"not",
"BaseFigure",
".",
"_is_key_path_compatible",
"(",
"key_path_str",
",",
"self",
".",
"layout",
")",
":",
"raise",
"ValueError",
"(",
"\"\"\"\nInvalid property path '{key_path_str}' for layout\n\"\"\"",
".",
"format",
"(",
"key_path_str",
"=",
"key_path_str",
")",
")",
"# Apply set operation on the layout dict",
"val_changed",
"=",
"BaseFigure",
".",
"_set_in",
"(",
"self",
".",
"_layout",
",",
"key_path_str",
",",
"v",
")",
"if",
"val_changed",
":",
"relayout_changes",
"[",
"key_path_str",
"]",
"=",
"v",
"return",
"relayout_changes"
] | [
2095,
4
] | [
2136,
31
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._is_key_path_compatible | (key_path_str, plotly_obj) |
Return whether the specifieid key path string is compatible with
the specified plotly object for the purpose of relayout/restyle
operation
|
Return whether the specifieid key path string is compatible with
the specified plotly object for the purpose of relayout/restyle
operation
| def _is_key_path_compatible(key_path_str, plotly_obj):
"""
Return whether the specifieid key path string is compatible with
the specified plotly object for the purpose of relayout/restyle
operation
"""
# Convert string to tuple of path components
# e.g. 'foo[0].bar[1]' -> ('foo', 0, 'bar', 1)
key_path_tuple = BaseFigure._str_to_dict_path(key_path_str)
# Remove trailing integer component
# e.g. ('foo', 0, 'bar', 1) -> ('foo', 0, 'bar')
# We do this because it's fine for relayout/restyle to create new
# elements in the final array in the path.
if isinstance(key_path_tuple[-1], int):
key_path_tuple = key_path_tuple[:-1]
# Test whether modified key path tuple is in plotly_obj
return key_path_tuple in plotly_obj | [
"def",
"_is_key_path_compatible",
"(",
"key_path_str",
",",
"plotly_obj",
")",
":",
"# Convert string to tuple of path components",
"# e.g. 'foo[0].bar[1]' -> ('foo', 0, 'bar', 1)",
"key_path_tuple",
"=",
"BaseFigure",
".",
"_str_to_dict_path",
"(",
"key_path_str",
")",
"# Remove trailing integer component",
"# e.g. ('foo', 0, 'bar', 1) -> ('foo', 0, 'bar')",
"# We do this because it's fine for relayout/restyle to create new",
"# elements in the final array in the path.",
"if",
"isinstance",
"(",
"key_path_tuple",
"[",
"-",
"1",
"]",
",",
"int",
")",
":",
"key_path_tuple",
"=",
"key_path_tuple",
"[",
":",
"-",
"1",
"]",
"# Test whether modified key path tuple is in plotly_obj",
"return",
"key_path_tuple",
"in",
"plotly_obj"
] | [
2139,
4
] | [
2158,
43
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._relayout_child | (self, child, key_path_str, val) |
Process relayout operation on child layout object
Parameters
----------
child : BaseLayoutType
The figure's layout
key_path_str :
A key path string (e.g. 'foo.bar[0]')
val
Relayout value
Returns
-------
None
|
Process relayout operation on child layout object | def _relayout_child(self, child, key_path_str, val):
"""
Process relayout operation on child layout object
Parameters
----------
child : BaseLayoutType
The figure's layout
key_path_str :
A key path string (e.g. 'foo.bar[0]')
val
Relayout value
Returns
-------
None
"""
# Validate input
# --------------
assert child is self.layout
# Not in batch mode
# -------------
# Dispatch change callbacks and send relayout message
if not self._in_batch_mode:
relayout_msg = {key_path_str: val}
self._send_relayout_msg(relayout_msg)
self._dispatch_layout_change_callbacks(relayout_msg)
# In batch mode
# -------------
# Add key_path_str/val to saved batch edits
else:
self._batch_layout_edits[key_path_str] = val | [
"def",
"_relayout_child",
"(",
"self",
",",
"child",
",",
"key_path_str",
",",
"val",
")",
":",
"# Validate input",
"# --------------",
"assert",
"child",
"is",
"self",
".",
"layout",
"# Not in batch mode",
"# -------------",
"# Dispatch change callbacks and send relayout message",
"if",
"not",
"self",
".",
"_in_batch_mode",
":",
"relayout_msg",
"=",
"{",
"key_path_str",
":",
"val",
"}",
"self",
".",
"_send_relayout_msg",
"(",
"relayout_msg",
")",
"self",
".",
"_dispatch_layout_change_callbacks",
"(",
"relayout_msg",
")",
"# In batch mode",
"# -------------",
"# Add key_path_str/val to saved batch edits",
"else",
":",
"self",
".",
"_batch_layout_edits",
"[",
"key_path_str",
"]",
"=",
"val"
] | [
2160,
4
] | [
2194,
56
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._build_dispatch_plan | (key_path_strs) |
Build a dispatch plan for a list of key path strings
A dispatch plan is a dict:
- *from* path tuples that reference an object that has descendants
that are referenced in `key_path_strs`.
- *to* sets of tuples that correspond to descendants of the object
above.
Parameters
----------
key_path_strs : list[str]
List of key path strings. For example:
['xaxis.rangeselector.font.color', 'xaxis.rangeselector.bgcolor']
Returns
-------
dispatch_plan: dict[tuple[str|int], set[tuple[str|int]]]
Examples
--------
>>> key_path_strs = ['xaxis.rangeselector.font.color',
... 'xaxis.rangeselector.bgcolor']
>>> BaseFigure._build_dispatch_plan(key_path_strs) # doctest: +SKIP
{(): {'xaxis',
('xaxis', 'rangeselector'),
('xaxis', 'rangeselector', 'bgcolor'),
('xaxis', 'rangeselector', 'font'),
('xaxis', 'rangeselector', 'font', 'color')},
('xaxis',): {('rangeselector',),
('rangeselector', 'bgcolor'),
('rangeselector', 'font'),
('rangeselector', 'font', 'color')},
('xaxis', 'rangeselector'): {('bgcolor',),
('font',),
('font', 'color')},
('xaxis', 'rangeselector', 'font'): {('color',)}}
|
Build a dispatch plan for a list of key path strings | def _build_dispatch_plan(key_path_strs):
"""
Build a dispatch plan for a list of key path strings
A dispatch plan is a dict:
- *from* path tuples that reference an object that has descendants
that are referenced in `key_path_strs`.
- *to* sets of tuples that correspond to descendants of the object
above.
Parameters
----------
key_path_strs : list[str]
List of key path strings. For example:
['xaxis.rangeselector.font.color', 'xaxis.rangeselector.bgcolor']
Returns
-------
dispatch_plan: dict[tuple[str|int], set[tuple[str|int]]]
Examples
--------
>>> key_path_strs = ['xaxis.rangeselector.font.color',
... 'xaxis.rangeselector.bgcolor']
>>> BaseFigure._build_dispatch_plan(key_path_strs) # doctest: +SKIP
{(): {'xaxis',
('xaxis', 'rangeselector'),
('xaxis', 'rangeselector', 'bgcolor'),
('xaxis', 'rangeselector', 'font'),
('xaxis', 'rangeselector', 'font', 'color')},
('xaxis',): {('rangeselector',),
('rangeselector', 'bgcolor'),
('rangeselector', 'font'),
('rangeselector', 'font', 'color')},
('xaxis', 'rangeselector'): {('bgcolor',),
('font',),
('font', 'color')},
('xaxis', 'rangeselector', 'font'): {('color',)}}
"""
dispatch_plan = {}
for key_path_str in key_path_strs:
key_path = BaseFigure._str_to_dict_path(key_path_str)
key_path_so_far = ()
keys_left = key_path
# Iterate down the key path
for next_key in key_path:
if key_path_so_far not in dispatch_plan:
dispatch_plan[key_path_so_far] = set()
to_add = [keys_left[: i + 1] for i in range(len(keys_left))]
dispatch_plan[key_path_so_far].update(to_add)
key_path_so_far = key_path_so_far + (next_key,)
keys_left = keys_left[1:]
return dispatch_plan | [
"def",
"_build_dispatch_plan",
"(",
"key_path_strs",
")",
":",
"dispatch_plan",
"=",
"{",
"}",
"for",
"key_path_str",
"in",
"key_path_strs",
":",
"key_path",
"=",
"BaseFigure",
".",
"_str_to_dict_path",
"(",
"key_path_str",
")",
"key_path_so_far",
"=",
"(",
")",
"keys_left",
"=",
"key_path",
"# Iterate down the key path",
"for",
"next_key",
"in",
"key_path",
":",
"if",
"key_path_so_far",
"not",
"in",
"dispatch_plan",
":",
"dispatch_plan",
"[",
"key_path_so_far",
"]",
"=",
"set",
"(",
")",
"to_add",
"=",
"[",
"keys_left",
"[",
":",
"i",
"+",
"1",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"keys_left",
")",
")",
"]",
"dispatch_plan",
"[",
"key_path_so_far",
"]",
".",
"update",
"(",
"to_add",
")",
"key_path_so_far",
"=",
"key_path_so_far",
"+",
"(",
"next_key",
",",
")",
"keys_left",
"=",
"keys_left",
"[",
"1",
":",
"]",
"return",
"dispatch_plan"
] | [
2199,
4
] | [
2260,
28
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._dispatch_layout_change_callbacks | (self, relayout_data) |
Dispatch property change callbacks given relayout_data
Parameters
----------
relayout_data : dict[str, any]
See docstring for plotly_relayout.
Returns
-------
None
|
Dispatch property change callbacks given relayout_data | def _dispatch_layout_change_callbacks(self, relayout_data):
"""
Dispatch property change callbacks given relayout_data
Parameters
----------
relayout_data : dict[str, any]
See docstring for plotly_relayout.
Returns
-------
None
"""
# Build dispatch plan
# -------------------
key_path_strs = list(relayout_data.keys())
dispatch_plan = BaseFigure._build_dispatch_plan(key_path_strs)
# Dispatch changes to each layout objects
# ---------------------------------------
for path_tuple, changed_paths in dispatch_plan.items():
if path_tuple in self.layout:
dispatch_obj = self.layout[path_tuple]
if isinstance(dispatch_obj, BasePlotlyType):
dispatch_obj._dispatch_change_callbacks(changed_paths) | [
"def",
"_dispatch_layout_change_callbacks",
"(",
"self",
",",
"relayout_data",
")",
":",
"# Build dispatch plan",
"# -------------------",
"key_path_strs",
"=",
"list",
"(",
"relayout_data",
".",
"keys",
"(",
")",
")",
"dispatch_plan",
"=",
"BaseFigure",
".",
"_build_dispatch_plan",
"(",
"key_path_strs",
")",
"# Dispatch changes to each layout objects",
"# ---------------------------------------",
"for",
"path_tuple",
",",
"changed_paths",
"in",
"dispatch_plan",
".",
"items",
"(",
")",
":",
"if",
"path_tuple",
"in",
"self",
".",
"layout",
":",
"dispatch_obj",
"=",
"self",
".",
"layout",
"[",
"path_tuple",
"]",
"if",
"isinstance",
"(",
"dispatch_obj",
",",
"BasePlotlyType",
")",
":",
"dispatch_obj",
".",
"_dispatch_change_callbacks",
"(",
"changed_paths",
")"
] | [
2262,
4
] | [
2286,
74
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._dispatch_trace_change_callbacks | (self, restyle_data, trace_indexes) |
Dispatch property change callbacks given restyle_data
Parameters
----------
restyle_data : dict[str, any]
See docstring for plotly_restyle.
trace_indexes : list[int]
List of trace indexes that restyle operation applied to
Returns
-------
None
|
Dispatch property change callbacks given restyle_data | def _dispatch_trace_change_callbacks(self, restyle_data, trace_indexes):
"""
Dispatch property change callbacks given restyle_data
Parameters
----------
restyle_data : dict[str, any]
See docstring for plotly_restyle.
trace_indexes : list[int]
List of trace indexes that restyle operation applied to
Returns
-------
None
"""
# Build dispatch plan
# -------------------
key_path_strs = list(restyle_data.keys())
dispatch_plan = BaseFigure._build_dispatch_plan(key_path_strs)
# Dispatch changes to each object in each trace
# ---------------------------------------------
for path_tuple, changed_paths in dispatch_plan.items():
for trace_ind in trace_indexes:
trace = self.data[trace_ind]
if path_tuple in trace:
dispatch_obj = trace[path_tuple]
if isinstance(dispatch_obj, BasePlotlyType):
dispatch_obj._dispatch_change_callbacks(changed_paths) | [
"def",
"_dispatch_trace_change_callbacks",
"(",
"self",
",",
"restyle_data",
",",
"trace_indexes",
")",
":",
"# Build dispatch plan",
"# -------------------",
"key_path_strs",
"=",
"list",
"(",
"restyle_data",
".",
"keys",
"(",
")",
")",
"dispatch_plan",
"=",
"BaseFigure",
".",
"_build_dispatch_plan",
"(",
"key_path_strs",
")",
"# Dispatch changes to each object in each trace",
"# ---------------------------------------------",
"for",
"path_tuple",
",",
"changed_paths",
"in",
"dispatch_plan",
".",
"items",
"(",
")",
":",
"for",
"trace_ind",
"in",
"trace_indexes",
":",
"trace",
"=",
"self",
".",
"data",
"[",
"trace_ind",
"]",
"if",
"path_tuple",
"in",
"trace",
":",
"dispatch_obj",
"=",
"trace",
"[",
"path_tuple",
"]",
"if",
"isinstance",
"(",
"dispatch_obj",
",",
"BasePlotlyType",
")",
":",
"dispatch_obj",
".",
"_dispatch_change_callbacks",
"(",
"changed_paths",
")"
] | [
2288,
4
] | [
2318,
78
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.frames | (self) |
The `frames` property is a tuple of the figure's frame objects
Returns
-------
tuple[plotly.graph_objs.Frame]
|
The `frames` property is a tuple of the figure's frame objects | def frames(self):
"""
The `frames` property is a tuple of the figure's frame objects
Returns
-------
tuple[plotly.graph_objs.Frame]
"""
return self["frames"] | [
"def",
"frames",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"frames\"",
"]"
] | [
2323,
4
] | [
2331,
29
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.plotly_update | (
self, restyle_data=None, relayout_data=None, trace_indexes=None, **kwargs
) |
Perform a Plotly update operation on the figure.
Note: This operation both mutates and returns the figure
Parameters
----------
restyle_data : dict
Traces update specification. See the docstring for the
`plotly_restyle` method for details
relayout_data : dict
Layout update specification. See the docstring for the
`plotly_relayout` method for details
trace_indexes :
Trace index, or list of trace indexes, that the update operation
applies to. Defaults to all trace indexes.
Returns
-------
BaseFigure
None
|
Perform a Plotly update operation on the figure. | def plotly_update(
self, restyle_data=None, relayout_data=None, trace_indexes=None, **kwargs
):
"""
Perform a Plotly update operation on the figure.
Note: This operation both mutates and returns the figure
Parameters
----------
restyle_data : dict
Traces update specification. See the docstring for the
`plotly_restyle` method for details
relayout_data : dict
Layout update specification. See the docstring for the
`plotly_relayout` method for details
trace_indexes :
Trace index, or list of trace indexes, that the update operation
applies to. Defaults to all trace indexes.
Returns
-------
BaseFigure
None
"""
# Handle source_view_id
# ---------------------
# If not None, the source_view_id is the UID of the frontend
# Plotly.js view that initially triggered this update operation
# (e.g. the user clicked a button that triggered an update
# operation). We pass this UID along so that the frontend views can
# determine whether they need to apply the update operation on
# themselves.
if "source_view_id" in kwargs:
msg_kwargs = {"source_view_id": kwargs["source_view_id"]}
else:
msg_kwargs = {}
# Perform update operation
# ------------------------
# This updates the _data and _layout dicts, and returns the changes
# to the traces (restyle_changes) and layout (relayout_changes)
(
restyle_changes,
relayout_changes,
trace_indexes,
) = self._perform_plotly_update(
restyle_data=restyle_data,
relayout_data=relayout_data,
trace_indexes=trace_indexes,
)
# Send update message
# -------------------
# Send a plotly_update message to the frontend (if any)
if restyle_changes or relayout_changes:
self._send_update_msg(
restyle_data=restyle_changes,
relayout_data=relayout_changes,
trace_indexes=trace_indexes,
**msg_kwargs
)
# Dispatch changes
# ----------------
# ### Dispatch restyle changes ###
if restyle_changes:
self._dispatch_trace_change_callbacks(restyle_changes, trace_indexes)
# ### Dispatch relayout changes ###
if relayout_changes:
self._dispatch_layout_change_callbacks(relayout_changes) | [
"def",
"plotly_update",
"(",
"self",
",",
"restyle_data",
"=",
"None",
",",
"relayout_data",
"=",
"None",
",",
"trace_indexes",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Handle source_view_id",
"# ---------------------",
"# If not None, the source_view_id is the UID of the frontend",
"# Plotly.js view that initially triggered this update operation",
"# (e.g. the user clicked a button that triggered an update",
"# operation). We pass this UID along so that the frontend views can",
"# determine whether they need to apply the update operation on",
"# themselves.",
"if",
"\"source_view_id\"",
"in",
"kwargs",
":",
"msg_kwargs",
"=",
"{",
"\"source_view_id\"",
":",
"kwargs",
"[",
"\"source_view_id\"",
"]",
"}",
"else",
":",
"msg_kwargs",
"=",
"{",
"}",
"# Perform update operation",
"# ------------------------",
"# This updates the _data and _layout dicts, and returns the changes",
"# to the traces (restyle_changes) and layout (relayout_changes)",
"(",
"restyle_changes",
",",
"relayout_changes",
",",
"trace_indexes",
",",
")",
"=",
"self",
".",
"_perform_plotly_update",
"(",
"restyle_data",
"=",
"restyle_data",
",",
"relayout_data",
"=",
"relayout_data",
",",
"trace_indexes",
"=",
"trace_indexes",
",",
")",
"# Send update message",
"# -------------------",
"# Send a plotly_update message to the frontend (if any)",
"if",
"restyle_changes",
"or",
"relayout_changes",
":",
"self",
".",
"_send_update_msg",
"(",
"restyle_data",
"=",
"restyle_changes",
",",
"relayout_data",
"=",
"relayout_changes",
",",
"trace_indexes",
"=",
"trace_indexes",
",",
"*",
"*",
"msg_kwargs",
")",
"# Dispatch changes",
"# ----------------",
"# ### Dispatch restyle changes ###",
"if",
"restyle_changes",
":",
"self",
".",
"_dispatch_trace_change_callbacks",
"(",
"restyle_changes",
",",
"trace_indexes",
")",
"# ### Dispatch relayout changes ###",
"if",
"relayout_changes",
":",
"self",
".",
"_dispatch_layout_change_callbacks",
"(",
"relayout_changes",
")"
] | [
2344,
4
] | [
2416,
68
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.batch_update | (self) |
A context manager that batches up trace and layout assignment
operations into a singe plotly_update message that is executed when
the context exits.
Examples
--------
For example, suppose we have a figure widget, `fig`, with a single
trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
If we want to update the xaxis range, the yaxis range, and the
marker color, we could do so using a series of three property
assignments as follows:
>>> fig.layout.xaxis.range = [0, 5]
>>> fig.layout.yaxis.range = [0, 10]
>>> fig.data[0].marker.color = 'green'
This will work, however it will result in three messages being
sent to the front end (two relayout messages for the axis range
updates followed by one restyle message for the marker color
update). This can cause the plot to appear to stutter as the
three updates are applied incrementally.
We can avoid this problem by performing these three assignments in a
`batch_update` context as follows:
>>> with fig.batch_update():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
... fig.data[0].marker.color = 'green'
Now, these three property updates will be sent to the frontend in a
single update message, and they will be applied by the front end
simultaneously.
|
A context manager that batches up trace and layout assignment
operations into a singe plotly_update message that is executed when
the context exits. | def batch_update(self):
"""
A context manager that batches up trace and layout assignment
operations into a singe plotly_update message that is executed when
the context exits.
Examples
--------
For example, suppose we have a figure widget, `fig`, with a single
trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
If we want to update the xaxis range, the yaxis range, and the
marker color, we could do so using a series of three property
assignments as follows:
>>> fig.layout.xaxis.range = [0, 5]
>>> fig.layout.yaxis.range = [0, 10]
>>> fig.data[0].marker.color = 'green'
This will work, however it will result in three messages being
sent to the front end (two relayout messages for the axis range
updates followed by one restyle message for the marker color
update). This can cause the plot to appear to stutter as the
three updates are applied incrementally.
We can avoid this problem by performing these three assignments in a
`batch_update` context as follows:
>>> with fig.batch_update():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
... fig.data[0].marker.color = 'green'
Now, these three property updates will be sent to the frontend in a
single update message, and they will be applied by the front end
simultaneously.
"""
if self._in_batch_mode is True:
yield
else:
try:
self._in_batch_mode = True
yield
finally:
# ### Disable batch mode ###
self._in_batch_mode = False
# ### Build plotly_update params ###
(
restyle_data,
relayout_data,
trace_indexes,
) = self._build_update_params_from_batch()
# ### Call plotly_update ###
self.plotly_update(
restyle_data=restyle_data,
relayout_data=relayout_data,
trace_indexes=trace_indexes,
)
# ### Clear out saved batch edits ###
self._batch_layout_edits.clear()
self._batch_trace_edits.clear() | [
"def",
"batch_update",
"(",
"self",
")",
":",
"if",
"self",
".",
"_in_batch_mode",
"is",
"True",
":",
"yield",
"else",
":",
"try",
":",
"self",
".",
"_in_batch_mode",
"=",
"True",
"yield",
"finally",
":",
"# ### Disable batch mode ###",
"self",
".",
"_in_batch_mode",
"=",
"False",
"# ### Build plotly_update params ###",
"(",
"restyle_data",
",",
"relayout_data",
",",
"trace_indexes",
",",
")",
"=",
"self",
".",
"_build_update_params_from_batch",
"(",
")",
"# ### Call plotly_update ###",
"self",
".",
"plotly_update",
"(",
"restyle_data",
"=",
"restyle_data",
",",
"relayout_data",
"=",
"relayout_data",
",",
"trace_indexes",
"=",
"trace_indexes",
",",
")",
"# ### Clear out saved batch edits ###",
"self",
".",
"_batch_layout_edits",
".",
"clear",
"(",
")",
"self",
".",
"_batch_trace_edits",
".",
"clear",
"(",
")"
] | [
2480,
4
] | [
2546,
47
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._build_update_params_from_batch | (self) |
Convert `_batch_trace_edits` and `_batch_layout_edits` into the
`restyle_data`, `relayout_data`, and `trace_indexes` params accepted
by the `plotly_update` method.
Returns
-------
(dict, dict, list[int])
|
Convert `_batch_trace_edits` and `_batch_layout_edits` into the
`restyle_data`, `relayout_data`, and `trace_indexes` params accepted
by the `plotly_update` method. | def _build_update_params_from_batch(self):
"""
Convert `_batch_trace_edits` and `_batch_layout_edits` into the
`restyle_data`, `relayout_data`, and `trace_indexes` params accepted
by the `plotly_update` method.
Returns
-------
(dict, dict, list[int])
"""
# Handle Style / Trace Indexes
# ----------------------------
batch_style_commands = self._batch_trace_edits
trace_indexes = sorted(set([trace_ind for trace_ind in batch_style_commands]))
all_props = sorted(
set(
[
prop
for trace_style in self._batch_trace_edits.values()
for prop in trace_style
]
)
)
# Initialize restyle_data dict with all values undefined
restyle_data = {
prop: [Undefined for _ in range(len(trace_indexes))] for prop in all_props
}
# Fill in values
for trace_ind, trace_style in batch_style_commands.items():
for trace_prop, trace_val in trace_style.items():
restyle_trace_index = trace_indexes.index(trace_ind)
restyle_data[trace_prop][restyle_trace_index] = trace_val
# Handle Layout
# -------------
relayout_data = self._batch_layout_edits
# Return plotly_update params
# ---------------------------
return restyle_data, relayout_data, trace_indexes | [
"def",
"_build_update_params_from_batch",
"(",
"self",
")",
":",
"# Handle Style / Trace Indexes",
"# ----------------------------",
"batch_style_commands",
"=",
"self",
".",
"_batch_trace_edits",
"trace_indexes",
"=",
"sorted",
"(",
"set",
"(",
"[",
"trace_ind",
"for",
"trace_ind",
"in",
"batch_style_commands",
"]",
")",
")",
"all_props",
"=",
"sorted",
"(",
"set",
"(",
"[",
"prop",
"for",
"trace_style",
"in",
"self",
".",
"_batch_trace_edits",
".",
"values",
"(",
")",
"for",
"prop",
"in",
"trace_style",
"]",
")",
")",
"# Initialize restyle_data dict with all values undefined",
"restyle_data",
"=",
"{",
"prop",
":",
"[",
"Undefined",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"trace_indexes",
")",
")",
"]",
"for",
"prop",
"in",
"all_props",
"}",
"# Fill in values",
"for",
"trace_ind",
",",
"trace_style",
"in",
"batch_style_commands",
".",
"items",
"(",
")",
":",
"for",
"trace_prop",
",",
"trace_val",
"in",
"trace_style",
".",
"items",
"(",
")",
":",
"restyle_trace_index",
"=",
"trace_indexes",
".",
"index",
"(",
"trace_ind",
")",
"restyle_data",
"[",
"trace_prop",
"]",
"[",
"restyle_trace_index",
"]",
"=",
"trace_val",
"# Handle Layout",
"# -------------",
"relayout_data",
"=",
"self",
".",
"_batch_layout_edits",
"# Return plotly_update params",
"# ---------------------------",
"return",
"restyle_data",
",",
"relayout_data",
",",
"trace_indexes"
] | [
2548,
4
] | [
2591,
57
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.batch_animate | (self, duration=500, easing="cubic-in-out") |
Context manager to animate trace / layout updates
Parameters
----------
duration : number
The duration of the transition, in milliseconds.
If equal to zero, updates are synchronous.
easing : string
The easing function used for the transition.
One of:
- linear
- quad
- cubic
- sin
- exp
- circle
- elastic
- back
- bounce
- linear-in
- quad-in
- cubic-in
- sin-in
- exp-in
- circle-in
- elastic-in
- back-in
- bounce-in
- linear-out
- quad-out
- cubic-out
- sin-out
- exp-out
- circle-out
- elastic-out
- back-out
- bounce-out
- linear-in-out
- quad-in-out
- cubic-in-out
- sin-in-out
- exp-in-out
- circle-in-out
- elastic-in-out
- back-in-out
- bounce-in-out
Examples
--------
Suppose we have a figure widget, `fig`, with a single trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
1) Animate a change in the xaxis and yaxis ranges using default
duration and easing parameters.
>>> with fig.batch_animate():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
2) Animate a change in the size and color of the trace's markers
over 2 seconds using the elastic-in-out easing method
>>> with fig.batch_animate(duration=2000, easing='elastic-in-out'):
... fig.data[0].marker.color = 'green'
... fig.data[0].marker.size = 20
|
Context manager to animate trace / layout updates | def batch_animate(self, duration=500, easing="cubic-in-out"):
"""
Context manager to animate trace / layout updates
Parameters
----------
duration : number
The duration of the transition, in milliseconds.
If equal to zero, updates are synchronous.
easing : string
The easing function used for the transition.
One of:
- linear
- quad
- cubic
- sin
- exp
- circle
- elastic
- back
- bounce
- linear-in
- quad-in
- cubic-in
- sin-in
- exp-in
- circle-in
- elastic-in
- back-in
- bounce-in
- linear-out
- quad-out
- cubic-out
- sin-out
- exp-out
- circle-out
- elastic-out
- back-out
- bounce-out
- linear-in-out
- quad-in-out
- cubic-in-out
- sin-in-out
- exp-in-out
- circle-in-out
- elastic-in-out
- back-in-out
- bounce-in-out
Examples
--------
Suppose we have a figure widget, `fig`, with a single trace.
>>> import plotly.graph_objs as go
>>> fig = go.FigureWidget(data=[{'y': [3, 4, 2]}])
1) Animate a change in the xaxis and yaxis ranges using default
duration and easing parameters.
>>> with fig.batch_animate():
... fig.layout.xaxis.range = [0, 5]
... fig.layout.yaxis.range = [0, 10]
2) Animate a change in the size and color of the trace's markers
over 2 seconds using the elastic-in-out easing method
>>> with fig.batch_animate(duration=2000, easing='elastic-in-out'):
... fig.data[0].marker.color = 'green'
... fig.data[0].marker.size = 20
"""
# Validate inputs
# ---------------
duration = self._animation_duration_validator.validate_coerce(duration)
easing = self._animation_easing_validator.validate_coerce(easing)
if self._in_batch_mode is True:
yield
else:
try:
self._in_batch_mode = True
yield
finally:
# Exit batch mode
# ---------------
self._in_batch_mode = False
# Apply batch animate
# -------------------
self._perform_batch_animate(
{
"transition": {"duration": duration, "easing": easing},
"frame": {"duration": duration},
}
) | [
"def",
"batch_animate",
"(",
"self",
",",
"duration",
"=",
"500",
",",
"easing",
"=",
"\"cubic-in-out\"",
")",
":",
"# Validate inputs",
"# ---------------",
"duration",
"=",
"self",
".",
"_animation_duration_validator",
".",
"validate_coerce",
"(",
"duration",
")",
"easing",
"=",
"self",
".",
"_animation_easing_validator",
".",
"validate_coerce",
"(",
"easing",
")",
"if",
"self",
".",
"_in_batch_mode",
"is",
"True",
":",
"yield",
"else",
":",
"try",
":",
"self",
".",
"_in_batch_mode",
"=",
"True",
"yield",
"finally",
":",
"# Exit batch mode",
"# ---------------",
"self",
".",
"_in_batch_mode",
"=",
"False",
"# Apply batch animate",
"# -------------------",
"self",
".",
"_perform_batch_animate",
"(",
"{",
"\"transition\"",
":",
"{",
"\"duration\"",
":",
"duration",
",",
"\"easing\"",
":",
"easing",
"}",
",",
"\"frame\"",
":",
"{",
"\"duration\"",
":",
"duration",
"}",
",",
"}",
")"
] | [
2594,
4
] | [
2688,
17
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._perform_batch_animate | (self, animation_opts) |
Perform the batch animate operation
This method should be called with the batch_animate() context
manager exits.
Parameters
----------
animation_opts : dict
Animation options as accepted by frontend Plotly.animation command
Returns
-------
None
|
Perform the batch animate operation | def _perform_batch_animate(self, animation_opts):
"""
Perform the batch animate operation
This method should be called with the batch_animate() context
manager exits.
Parameters
----------
animation_opts : dict
Animation options as accepted by frontend Plotly.animation command
Returns
-------
None
"""
# Apply commands to internal dictionaries as an update
# ----------------------------------------------------
(
restyle_data,
relayout_data,
trace_indexes,
) = self._build_update_params_from_batch()
(
restyle_changes,
relayout_changes,
trace_indexes,
) = self._perform_plotly_update(restyle_data, relayout_data, trace_indexes)
# Convert style / trace_indexes into animate form
# -----------------------------------------------
if self._batch_trace_edits:
animate_styles, animate_trace_indexes = zip(
*[
(trace_style, trace_index)
for trace_index, trace_style in self._batch_trace_edits.items()
]
)
else:
animate_styles, animate_trace_indexes = {}, []
animate_layout = copy(self._batch_layout_edits)
# Send animate message
# --------------------
# Sends animate message to the front end (if any)
self._send_animate_msg(
styles_data=list(animate_styles),
relayout_data=animate_layout,
trace_indexes=list(animate_trace_indexes),
animation_opts=animation_opts,
)
# Clear batched commands
# ----------------------
self._batch_layout_edits.clear()
self._batch_trace_edits.clear()
# Dispatch callbacks
# ------------------
# ### Dispatch restyle changes ###
if restyle_changes:
self._dispatch_trace_change_callbacks(restyle_changes, trace_indexes)
# ### Dispatch relayout changes ###
if relayout_changes:
self._dispatch_layout_change_callbacks(relayout_changes) | [
"def",
"_perform_batch_animate",
"(",
"self",
",",
"animation_opts",
")",
":",
"# Apply commands to internal dictionaries as an update",
"# ----------------------------------------------------",
"(",
"restyle_data",
",",
"relayout_data",
",",
"trace_indexes",
",",
")",
"=",
"self",
".",
"_build_update_params_from_batch",
"(",
")",
"(",
"restyle_changes",
",",
"relayout_changes",
",",
"trace_indexes",
",",
")",
"=",
"self",
".",
"_perform_plotly_update",
"(",
"restyle_data",
",",
"relayout_data",
",",
"trace_indexes",
")",
"# Convert style / trace_indexes into animate form",
"# -----------------------------------------------",
"if",
"self",
".",
"_batch_trace_edits",
":",
"animate_styles",
",",
"animate_trace_indexes",
"=",
"zip",
"(",
"*",
"[",
"(",
"trace_style",
",",
"trace_index",
")",
"for",
"trace_index",
",",
"trace_style",
"in",
"self",
".",
"_batch_trace_edits",
".",
"items",
"(",
")",
"]",
")",
"else",
":",
"animate_styles",
",",
"animate_trace_indexes",
"=",
"{",
"}",
",",
"[",
"]",
"animate_layout",
"=",
"copy",
"(",
"self",
".",
"_batch_layout_edits",
")",
"# Send animate message",
"# --------------------",
"# Sends animate message to the front end (if any)",
"self",
".",
"_send_animate_msg",
"(",
"styles_data",
"=",
"list",
"(",
"animate_styles",
")",
",",
"relayout_data",
"=",
"animate_layout",
",",
"trace_indexes",
"=",
"list",
"(",
"animate_trace_indexes",
")",
",",
"animation_opts",
"=",
"animation_opts",
",",
")",
"# Clear batched commands",
"# ----------------------",
"self",
".",
"_batch_layout_edits",
".",
"clear",
"(",
")",
"self",
".",
"_batch_trace_edits",
".",
"clear",
"(",
")",
"# Dispatch callbacks",
"# ------------------",
"# ### Dispatch restyle changes ###",
"if",
"restyle_changes",
":",
"self",
".",
"_dispatch_trace_change_callbacks",
"(",
"restyle_changes",
",",
"trace_indexes",
")",
"# ### Dispatch relayout changes ###",
"if",
"relayout_changes",
":",
"self",
".",
"_dispatch_layout_change_callbacks",
"(",
"relayout_changes",
")"
] | [
2690,
4
] | [
2757,
68
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.to_dict | (self) |
Convert figure to a dictionary
Note: the dictionary includes the properties explicitly set by the
user, it does not include default values of unspecified properties
Returns
-------
dict
|
Convert figure to a dictionary | def to_dict(self):
"""
Convert figure to a dictionary
Note: the dictionary includes the properties explicitly set by the
user, it does not include default values of unspecified properties
Returns
-------
dict
"""
# Handle data
# -----------
data = deepcopy(self._data)
# Handle layout
# -------------
layout = deepcopy(self._layout)
# Handle frames
# -------------
# Frame key is only added if there are any frames
res = {"data": data, "layout": layout}
frames = deepcopy([frame._props for frame in self._frame_objs])
if frames:
res["frames"] = frames
return res | [
"def",
"to_dict",
"(",
"self",
")",
":",
"# Handle data",
"# -----------",
"data",
"=",
"deepcopy",
"(",
"self",
".",
"_data",
")",
"# Handle layout",
"# -------------",
"layout",
"=",
"deepcopy",
"(",
"self",
".",
"_layout",
")",
"# Handle frames",
"# -------------",
"# Frame key is only added if there are any frames",
"res",
"=",
"{",
"\"data\"",
":",
"data",
",",
"\"layout\"",
":",
"layout",
"}",
"frames",
"=",
"deepcopy",
"(",
"[",
"frame",
".",
"_props",
"for",
"frame",
"in",
"self",
".",
"_frame_objs",
"]",
")",
"if",
"frames",
":",
"res",
"[",
"\"frames\"",
"]",
"=",
"frames",
"return",
"res"
] | [
2761,
4
] | [
2788,
18
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure.to_plotly_json | (self) |
Convert figure to a JSON representation as a Python dict
Returns
-------
dict
|
Convert figure to a JSON representation as a Python dict | def to_plotly_json(self):
"""
Convert figure to a JSON representation as a Python dict
Returns
-------
dict
"""
return self.to_dict() | [
"def",
"to_plotly_json",
"(",
"self",
")",
":",
"return",
"self",
".",
"to_dict",
"(",
")"
] | [
2790,
4
] | [
2798,
29
] | python | en | ['en', 'error', 'th'] | False |
BaseFigure._to_ordered_dict | (d, skip_uid=False) |
Static helper for converting dict or list to structure of ordered
dictionaries
|
Static helper for converting dict or list to structure of ordered
dictionaries
| def _to_ordered_dict(d, skip_uid=False):
"""
Static helper for converting dict or list to structure of ordered
dictionaries
"""
if isinstance(d, dict):
# d is a dict
result = collections.OrderedDict()
for key in sorted(d.keys()):
if skip_uid and key == "uid":
continue
else:
result[key] = BaseFigure._to_ordered_dict(d[key], skip_uid=skip_uid)
elif isinstance(d, list) and d and isinstance(d[0], dict):
# d is a list of dicts
result = [BaseFigure._to_ordered_dict(el, skip_uid=skip_uid) for el in d]
else:
result = d
return result | [
"def",
"_to_ordered_dict",
"(",
"d",
",",
"skip_uid",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"d",
",",
"dict",
")",
":",
"# d is a dict",
"result",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"for",
"key",
"in",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
":",
"if",
"skip_uid",
"and",
"key",
"==",
"\"uid\"",
":",
"continue",
"else",
":",
"result",
"[",
"key",
"]",
"=",
"BaseFigure",
".",
"_to_ordered_dict",
"(",
"d",
"[",
"key",
"]",
",",
"skip_uid",
"=",
"skip_uid",
")",
"elif",
"isinstance",
"(",
"d",
",",
"list",
")",
"and",
"d",
"and",
"isinstance",
"(",
"d",
"[",
"0",
"]",
",",
"dict",
")",
":",
"# d is a list of dicts",
"result",
"=",
"[",
"BaseFigure",
".",
"_to_ordered_dict",
"(",
"el",
",",
"skip_uid",
"=",
"skip_uid",
")",
"for",
"el",
"in",
"d",
"]",
"else",
":",
"result",
"=",
"d",
"return",
"result"
] | [
2801,
4
] | [
2821,
21
] | python | en | ['en', 'error', 'th'] | False |
remove_first_sv | (emb, first_sv) |
Projects out the first singular value (first_sv) from the embedding (emb).
Inputs:
emb: torch Tensor shape (glove_dim)
first_sv: torch Tensor shape (glove_dim)
Returns:
new emb: torch Tensor shape (glove_dim)
|
Projects out the first singular value (first_sv) from the embedding (emb). | def remove_first_sv(emb, first_sv):
"""
Projects out the first singular value (first_sv) from the embedding (emb).
Inputs:
emb: torch Tensor shape (glove_dim)
first_sv: torch Tensor shape (glove_dim)
Returns:
new emb: torch Tensor shape (glove_dim)
"""
# Calculate dot prod of emb and first_sv using torch.mm:
# (1, glove_dim) x (glove_dim, 1) -> (1,1) -> float
dot_prod = torch.mm(torch.unsqueeze(emb, 0), torch.unsqueeze(first_sv, 1)).item()
return emb - first_sv * dot_prod | [
"def",
"remove_first_sv",
"(",
"emb",
",",
"first_sv",
")",
":",
"# Calculate dot prod of emb and first_sv using torch.mm:",
"# (1, glove_dim) x (glove_dim, 1) -> (1,1) -> float",
"dot_prod",
"=",
"torch",
".",
"mm",
"(",
"torch",
".",
"unsqueeze",
"(",
"emb",
",",
"0",
")",
",",
"torch",
".",
"unsqueeze",
"(",
"first_sv",
",",
"1",
")",
")",
".",
"item",
"(",
")",
"return",
"emb",
"-",
"first_sv",
"*",
"dot_prod"
] | [
222,
0
] | [
236,
36
] | python | en | ['en', 'error', 'th'] | False |
get_word_counts | (opt, count_inputs) |
Goes through the dataset specified in opt, returns word counts and all utterances.
Inputs:
count_inputs: If True, include both input and reply when counting words and
utterances. Otherwise, only include reply text.
Returns:
word_counter: a Counter mapping each word to the total number of times it appears
total_count: int. total word count, i.e. the sum of the counts for each word
all_utts: list of strings. all the utterances that were used for counting words
|
Goes through the dataset specified in opt, returns word counts and all utterances. | def get_word_counts(opt, count_inputs):
"""
Goes through the dataset specified in opt, returns word counts and all utterances.
Inputs:
count_inputs: If True, include both input and reply when counting words and
utterances. Otherwise, only include reply text.
Returns:
word_counter: a Counter mapping each word to the total number of times it appears
total_count: int. total word count, i.e. the sum of the counts for each word
all_utts: list of strings. all the utterances that were used for counting words
"""
# Create repeat label agent and assign it to the specified task
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
# Count word frequency for all words in dataset
word_counter = Counter()
total_count = 0
all_utts = []
log_timer = TimeLogger()
while True:
world.parley()
# Count words in reply
reply = world.acts[0].get('labels', world.acts[0].get('eval_labels'))[0]
words = reply.split()
word_counter.update(words)
total_count += len(words)
all_utts.append(reply)
# Optionally count words in input text
if count_inputs:
input = world.acts[0]['text']
input = input.split('\n')[-1] # e.g. in ConvAI2, this removes persona
words = input.split()
word_counter.update(words)
total_count += len(words)
all_utts.append(input)
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(world.total_parleys, world.num_examples())
print(text)
if world.epoch_done():
print('EPOCH DONE')
break
assert total_count == sum(word_counter.values())
return word_counter, total_count, all_utts | [
"def",
"get_word_counts",
"(",
"opt",
",",
"count_inputs",
")",
":",
"# Create repeat label agent and assign it to the specified task",
"agent",
"=",
"RepeatLabelAgent",
"(",
"opt",
")",
"world",
"=",
"create_task",
"(",
"opt",
",",
"agent",
")",
"# Count word frequency for all words in dataset",
"word_counter",
"=",
"Counter",
"(",
")",
"total_count",
"=",
"0",
"all_utts",
"=",
"[",
"]",
"log_timer",
"=",
"TimeLogger",
"(",
")",
"while",
"True",
":",
"world",
".",
"parley",
"(",
")",
"# Count words in reply",
"reply",
"=",
"world",
".",
"acts",
"[",
"0",
"]",
".",
"get",
"(",
"'labels'",
",",
"world",
".",
"acts",
"[",
"0",
"]",
".",
"get",
"(",
"'eval_labels'",
")",
")",
"[",
"0",
"]",
"words",
"=",
"reply",
".",
"split",
"(",
")",
"word_counter",
".",
"update",
"(",
"words",
")",
"total_count",
"+=",
"len",
"(",
"words",
")",
"all_utts",
".",
"append",
"(",
"reply",
")",
"# Optionally count words in input text",
"if",
"count_inputs",
":",
"input",
"=",
"world",
".",
"acts",
"[",
"0",
"]",
"[",
"'text'",
"]",
"input",
"=",
"input",
".",
"split",
"(",
"'\\n'",
")",
"[",
"-",
"1",
"]",
"# e.g. in ConvAI2, this removes persona",
"words",
"=",
"input",
".",
"split",
"(",
")",
"word_counter",
".",
"update",
"(",
"words",
")",
"total_count",
"+=",
"len",
"(",
"words",
")",
"all_utts",
".",
"append",
"(",
"input",
")",
"if",
"log_timer",
".",
"time",
"(",
")",
">",
"opt",
"[",
"'log_every_n_secs'",
"]",
":",
"text",
",",
"_log",
"=",
"log_timer",
".",
"log",
"(",
"world",
".",
"total_parleys",
",",
"world",
".",
"num_examples",
"(",
")",
")",
"print",
"(",
"text",
")",
"if",
"world",
".",
"epoch_done",
"(",
")",
":",
"print",
"(",
"'EPOCH DONE'",
")",
"break",
"assert",
"total_count",
"==",
"sum",
"(",
"word_counter",
".",
"values",
"(",
")",
")",
"return",
"word_counter",
",",
"total_count",
",",
"all_utts"
] | [
239,
0
] | [
290,
46
] | python | en | ['en', 'error', 'th'] | False |
learn_arora | (opt) |
Go through ConvAI2 data and collect word counts, thus compute the unigram
probability distribution. Use those probs to compute weighted sentence embeddings
for all utterances, thus compute first principal component.
Save all info to arora.pkl file.
|
Go through ConvAI2 data and collect word counts, thus compute the unigram
probability distribution. Use those probs to compute weighted sentence embeddings
for all utterances, thus compute first principal component. | def learn_arora(opt):
"""
Go through ConvAI2 data and collect word counts, thus compute the unigram
probability distribution. Use those probs to compute weighted sentence embeddings
for all utterances, thus compute first principal component.
Save all info to arora.pkl file.
"""
arora_file = os.path.join(opt['datapath'], 'controllable_dialogue', 'arora.pkl')
opt['task'] = 'fromfile:parlaiformat'
opt['log_every_n_secs'] = 2
print('Getting word counts from ConvAI2 train set...')
opt['datatype'] = 'train:ordered'
opt['fromfile_datapath'] = os.path.join(
opt['datapath'], 'controllable_dialogue', 'ConvAI2_parlaiformat', 'train.txt'
)
# Do include inputs because ConvAI2 train set reverses every convo:
word_counter_train, total_count_train, all_utts_train = get_word_counts(
opt, count_inputs=False
)
print('Getting word counts from ConvAI2 val set...')
opt['datatype'] = 'valid'
opt['fromfile_datapath'] = os.path.join(
opt['datapath'], 'controllable_dialogue', 'ConvAI2_parlaiformat', 'valid.txt'
)
# Don't include inputs because ConvAI2 val set doesn't reverses convos:
word_counter_valid, total_count_valid, all_utts_valid = get_word_counts(
opt, count_inputs=True
)
# Merge word counts
word_counter = word_counter_train
for word, count in word_counter_valid.items():
word_counter[word] += count
total_count = total_count_train + total_count_valid
# Merge all_utts
all_utts = all_utts_train + all_utts_valid
# Compute unigram prob for every word
print("Computing unigram probs for all words...")
word2prob = {w: c / total_count for w, c in word_counter.items()}
# Settings for sentence embedder
arora_a = 0.0001
glove_name = '840B'
glove_dim = 300
# Embed every sentence, without removing first singular value
print('Embedding all sentences...')
sent_embedder = SentenceEmbedder(
word2prob,
arora_a,
glove_name,
glove_dim,
first_sv=None,
data_path=opt['datapath'],
)
utt_embs = []
log_timer = TimeLogger()
for n, utt in enumerate(all_utts):
utt_emb = sent_embedder.embed_sent(utt.split(), rem_first_sv=False)
utt_embs.append(utt_emb)
if log_timer.time() > opt['log_every_n_secs']:
text, _log = log_timer.log(n, len(all_utts))
print(text)
# Use SVD to calculate singular vector
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html
print('Calculating SVD...')
utt_embs = np.stack(utt_embs, axis=0) # shape (num_embs, glove_dim)
U, s, V = np.linalg.svd(utt_embs, full_matrices=False)
first_sv = V[0, :] # first row of V. shape (glove_dim)
# Remove singular vector from all embs to get complete Arora-style sent embs
print('Removing singular vec from all sentence embeddings...')
utt_embs_adj = [
remove_first_sv(torch.Tensor(emb), torch.Tensor(first_sv)).numpy()
for emb in utt_embs
] # list of np arrays shape (glove_dim)
# Make dict mapping ConvAI2 dataset utterances to Arora sent emb
# We save this to file for convenience (e.g. if you want to inspect)
utt2emb = {utt: emb for (utt, emb) in zip(all_utts, utt_embs_adj)}
# Save unigram distribution, first singular value, hyperparameter value for a,
# info about GloVe vectors used, and full dict of utt->emb to file
print("Saving Arora embedding info to %s..." % arora_file)
with PathManager.open(arora_file, "wb") as f:
pickle.dump(
{
'word2prob': word2prob, # dict: string to float between 0 and 1
'first_sv': first_sv, # np array shape (glove_dim)
'arora_a': arora_a, # float, 0.0001
'glove_name': glove_name, # string, '840B'
'glove_dim': glove_dim, # int, 300
'utt2emb': utt2emb, # dict: string to np array shape (glove_dim)
},
f,
) | [
"def",
"learn_arora",
"(",
"opt",
")",
":",
"arora_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opt",
"[",
"'datapath'",
"]",
",",
"'controllable_dialogue'",
",",
"'arora.pkl'",
")",
"opt",
"[",
"'task'",
"]",
"=",
"'fromfile:parlaiformat'",
"opt",
"[",
"'log_every_n_secs'",
"]",
"=",
"2",
"print",
"(",
"'Getting word counts from ConvAI2 train set...'",
")",
"opt",
"[",
"'datatype'",
"]",
"=",
"'train:ordered'",
"opt",
"[",
"'fromfile_datapath'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opt",
"[",
"'datapath'",
"]",
",",
"'controllable_dialogue'",
",",
"'ConvAI2_parlaiformat'",
",",
"'train.txt'",
")",
"# Do include inputs because ConvAI2 train set reverses every convo:",
"word_counter_train",
",",
"total_count_train",
",",
"all_utts_train",
"=",
"get_word_counts",
"(",
"opt",
",",
"count_inputs",
"=",
"False",
")",
"print",
"(",
"'Getting word counts from ConvAI2 val set...'",
")",
"opt",
"[",
"'datatype'",
"]",
"=",
"'valid'",
"opt",
"[",
"'fromfile_datapath'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opt",
"[",
"'datapath'",
"]",
",",
"'controllable_dialogue'",
",",
"'ConvAI2_parlaiformat'",
",",
"'valid.txt'",
")",
"# Don't include inputs because ConvAI2 val set doesn't reverses convos:",
"word_counter_valid",
",",
"total_count_valid",
",",
"all_utts_valid",
"=",
"get_word_counts",
"(",
"opt",
",",
"count_inputs",
"=",
"True",
")",
"# Merge word counts",
"word_counter",
"=",
"word_counter_train",
"for",
"word",
",",
"count",
"in",
"word_counter_valid",
".",
"items",
"(",
")",
":",
"word_counter",
"[",
"word",
"]",
"+=",
"count",
"total_count",
"=",
"total_count_train",
"+",
"total_count_valid",
"# Merge all_utts",
"all_utts",
"=",
"all_utts_train",
"+",
"all_utts_valid",
"# Compute unigram prob for every word",
"print",
"(",
"\"Computing unigram probs for all words...\"",
")",
"word2prob",
"=",
"{",
"w",
":",
"c",
"/",
"total_count",
"for",
"w",
",",
"c",
"in",
"word_counter",
".",
"items",
"(",
")",
"}",
"# Settings for sentence embedder",
"arora_a",
"=",
"0.0001",
"glove_name",
"=",
"'840B'",
"glove_dim",
"=",
"300",
"# Embed every sentence, without removing first singular value",
"print",
"(",
"'Embedding all sentences...'",
")",
"sent_embedder",
"=",
"SentenceEmbedder",
"(",
"word2prob",
",",
"arora_a",
",",
"glove_name",
",",
"glove_dim",
",",
"first_sv",
"=",
"None",
",",
"data_path",
"=",
"opt",
"[",
"'datapath'",
"]",
",",
")",
"utt_embs",
"=",
"[",
"]",
"log_timer",
"=",
"TimeLogger",
"(",
")",
"for",
"n",
",",
"utt",
"in",
"enumerate",
"(",
"all_utts",
")",
":",
"utt_emb",
"=",
"sent_embedder",
".",
"embed_sent",
"(",
"utt",
".",
"split",
"(",
")",
",",
"rem_first_sv",
"=",
"False",
")",
"utt_embs",
".",
"append",
"(",
"utt_emb",
")",
"if",
"log_timer",
".",
"time",
"(",
")",
">",
"opt",
"[",
"'log_every_n_secs'",
"]",
":",
"text",
",",
"_log",
"=",
"log_timer",
".",
"log",
"(",
"n",
",",
"len",
"(",
"all_utts",
")",
")",
"print",
"(",
"text",
")",
"# Use SVD to calculate singular vector",
"# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html",
"print",
"(",
"'Calculating SVD...'",
")",
"utt_embs",
"=",
"np",
".",
"stack",
"(",
"utt_embs",
",",
"axis",
"=",
"0",
")",
"# shape (num_embs, glove_dim)",
"U",
",",
"s",
",",
"V",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"utt_embs",
",",
"full_matrices",
"=",
"False",
")",
"first_sv",
"=",
"V",
"[",
"0",
",",
":",
"]",
"# first row of V. shape (glove_dim)",
"# Remove singular vector from all embs to get complete Arora-style sent embs",
"print",
"(",
"'Removing singular vec from all sentence embeddings...'",
")",
"utt_embs_adj",
"=",
"[",
"remove_first_sv",
"(",
"torch",
".",
"Tensor",
"(",
"emb",
")",
",",
"torch",
".",
"Tensor",
"(",
"first_sv",
")",
")",
".",
"numpy",
"(",
")",
"for",
"emb",
"in",
"utt_embs",
"]",
"# list of np arrays shape (glove_dim)",
"# Make dict mapping ConvAI2 dataset utterances to Arora sent emb",
"# We save this to file for convenience (e.g. if you want to inspect)",
"utt2emb",
"=",
"{",
"utt",
":",
"emb",
"for",
"(",
"utt",
",",
"emb",
")",
"in",
"zip",
"(",
"all_utts",
",",
"utt_embs_adj",
")",
"}",
"# Save unigram distribution, first singular value, hyperparameter value for a,",
"# info about GloVe vectors used, and full dict of utt->emb to file",
"print",
"(",
"\"Saving Arora embedding info to %s...\"",
"%",
"arora_file",
")",
"with",
"PathManager",
".",
"open",
"(",
"arora_file",
",",
"\"wb\"",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"{",
"'word2prob'",
":",
"word2prob",
",",
"# dict: string to float between 0 and 1",
"'first_sv'",
":",
"first_sv",
",",
"# np array shape (glove_dim)",
"'arora_a'",
":",
"arora_a",
",",
"# float, 0.0001",
"'glove_name'",
":",
"glove_name",
",",
"# string, '840B'",
"'glove_dim'",
":",
"glove_dim",
",",
"# int, 300",
"'utt2emb'",
":",
"utt2emb",
",",
"# dict: string to np array shape (glove_dim)",
"}",
",",
"f",
",",
")"
] | [
293,
0
] | [
395,
9
] | python | en | ['en', 'error', 'th'] | False |
load_arora | (opt) |
Load the data in the arora.pkl file in data/controllable_dialogue.
|
Load the data in the arora.pkl file in data/controllable_dialogue.
| def load_arora(opt):
"""
Load the data in the arora.pkl file in data/controllable_dialogue.
"""
arora_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'arora.pkl')
print("Loading Arora embedding info from %s..." % arora_fp)
with PathManager.open(arora_fp, "rb") as f:
data = pickle.load(f)
print("Done loading arora info.")
return data | [
"def",
"load_arora",
"(",
"opt",
")",
":",
"arora_fp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"opt",
"[",
"'datapath'",
"]",
",",
"CONTROLLABLE_DIR",
",",
"'arora.pkl'",
")",
"print",
"(",
"\"Loading Arora embedding info from %s...\"",
"%",
"arora_fp",
")",
"with",
"PathManager",
".",
"open",
"(",
"arora_fp",
",",
"\"rb\"",
")",
"as",
"f",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"print",
"(",
"\"Done loading arora info.\"",
")",
"return",
"data"
] | [
398,
0
] | [
407,
15
] | python | en | ['en', 'error', 'th'] | False |
SentenceEmbedder.__init__ | (self, word2prob, arora_a, glove_name, glove_dim, first_sv, data_path) |
Inputs:
word2prob: dict mapping words to their unigram probs
arora_a: a float. Is the constant (called "a" in the paper)
used to compute Arora sentence embeddings.
glove_name: the version of GloVe to use, e.g. '840B'
glove_dim: the dimension of the GloVe embeddings to use, e.g. 300
first_sv: np array shape (glove_dim). The first singular value,
used to compute Arora sentence embeddings. Can be None.
data_path: The data path (we will use this to download glove)
|
Inputs:
word2prob: dict mapping words to their unigram probs
arora_a: a float. Is the constant (called "a" in the paper)
used to compute Arora sentence embeddings.
glove_name: the version of GloVe to use, e.g. '840B'
glove_dim: the dimension of the GloVe embeddings to use, e.g. 300
first_sv: np array shape (glove_dim). The first singular value,
used to compute Arora sentence embeddings. Can be None.
data_path: The data path (we will use this to download glove)
| def __init__(self, word2prob, arora_a, glove_name, glove_dim, first_sv, data_path):
"""
Inputs:
word2prob: dict mapping words to their unigram probs
arora_a: a float. Is the constant (called "a" in the paper)
used to compute Arora sentence embeddings.
glove_name: the version of GloVe to use, e.g. '840B'
glove_dim: the dimension of the GloVe embeddings to use, e.g. 300
first_sv: np array shape (glove_dim). The first singular value,
used to compute Arora sentence embeddings. Can be None.
data_path: The data path (we will use this to download glove)
"""
self.word2prob = word2prob
self.arora_a = arora_a
self.glove_name = glove_name
self.glove_dim = glove_dim
self.first_sv = first_sv
self.data_path = data_path
if self.first_sv is not None:
self.first_sv = torch.tensor(self.first_sv) # convert to torch tensor
self.min_word_prob = min(word2prob.values()) # prob of rarest word
self.tt_embs = None # will be torchtext.vocab.GloVe object
self.emb_matrix = None # will be np array shape (vocab_size, glove_dim)
# Initialize a cache, which holds up to 64 sentences, along with their
# corresponding word similarity scores (i.e. cosine sim for every word in the
# vocab). This enables us to repeatedly retrieve sims for sentences we have
# already processed (useful for batched beam search).
self.cache_limit = 64
self.cache_sent2sims = {} # maps sent to sims. holds up to cache_limit.
self.cache_sentqueue = deque() | [
"def",
"__init__",
"(",
"self",
",",
"word2prob",
",",
"arora_a",
",",
"glove_name",
",",
"glove_dim",
",",
"first_sv",
",",
"data_path",
")",
":",
"self",
".",
"word2prob",
"=",
"word2prob",
"self",
".",
"arora_a",
"=",
"arora_a",
"self",
".",
"glove_name",
"=",
"glove_name",
"self",
".",
"glove_dim",
"=",
"glove_dim",
"self",
".",
"first_sv",
"=",
"first_sv",
"self",
".",
"data_path",
"=",
"data_path",
"if",
"self",
".",
"first_sv",
"is",
"not",
"None",
":",
"self",
".",
"first_sv",
"=",
"torch",
".",
"tensor",
"(",
"self",
".",
"first_sv",
")",
"# convert to torch tensor",
"self",
".",
"min_word_prob",
"=",
"min",
"(",
"word2prob",
".",
"values",
"(",
")",
")",
"# prob of rarest word",
"self",
".",
"tt_embs",
"=",
"None",
"# will be torchtext.vocab.GloVe object",
"self",
".",
"emb_matrix",
"=",
"None",
"# will be np array shape (vocab_size, glove_dim)",
"# Initialize a cache, which holds up to 64 sentences, along with their",
"# corresponding word similarity scores (i.e. cosine sim for every word in the",
"# vocab). This enables us to repeatedly retrieve sims for sentences we have",
"# already processed (useful for batched beam search).",
"self",
".",
"cache_limit",
"=",
"64",
"self",
".",
"cache_sent2sims",
"=",
"{",
"}",
"# maps sent to sims. holds up to cache_limit.",
"self",
".",
"cache_sentqueue",
"=",
"deque",
"(",
")"
] | [
37,
4
] | [
68,
38
] | python | en | ['en', 'error', 'th'] | False |
SentenceEmbedder.get_glove_embs | (self) |
Loads torchtext GloVe embs from file and stores in self.tt_embs.
|
Loads torchtext GloVe embs from file and stores in self.tt_embs.
| def get_glove_embs(self):
"""
Loads torchtext GloVe embs from file and stores in self.tt_embs.
"""
if not hasattr(self, 'glove_cache'):
self.glove_cache = modelzoo_path(self.data_path, 'models:glove_vectors')
print('Loading torchtext GloVe embs (for Arora sentence embs)...')
self.tt_embs = vocab.GloVe(
name=self.glove_name, dim=self.glove_dim, cache=self.glove_cache
)
print('Finished loading torchtext GloVe embs') | [
"def",
"get_glove_embs",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'glove_cache'",
")",
":",
"self",
".",
"glove_cache",
"=",
"modelzoo_path",
"(",
"self",
".",
"data_path",
",",
"'models:glove_vectors'",
")",
"print",
"(",
"'Loading torchtext GloVe embs (for Arora sentence embs)...'",
")",
"self",
".",
"tt_embs",
"=",
"vocab",
".",
"GloVe",
"(",
"name",
"=",
"self",
".",
"glove_name",
",",
"dim",
"=",
"self",
".",
"glove_dim",
",",
"cache",
"=",
"self",
".",
"glove_cache",
")",
"print",
"(",
"'Finished loading torchtext GloVe embs'",
")"
] | [
70,
4
] | [
80,
54
] | python | en | ['en', 'error', 'th'] | False |
SentenceEmbedder.get_emb_matrix | (self, dictionary) |
Construct an embedding matrix containing pretrained GloVe vectors for all words
in dictionary, and store in self.emb_matrix. This is needed for response-
relatedness weighted decoding.
Inputs:
dictionary: ParlAI dictionary
|
Construct an embedding matrix containing pretrained GloVe vectors for all words
in dictionary, and store in self.emb_matrix. This is needed for response-
relatedness weighted decoding. | def get_emb_matrix(self, dictionary):
"""
Construct an embedding matrix containing pretrained GloVe vectors for all words
in dictionary, and store in self.emb_matrix. This is needed for response-
relatedness weighted decoding.
Inputs:
dictionary: ParlAI dictionary
"""
print(
'Constructing GloVe emb matrix for response-relatedness weighted '
'decoding...'
)
self.emb_matrix = []
oov_indices = [] # list of dictionary indices for all OOV words
for idx in range(len(dictionary)):
word = dictionary[idx]
if word in self.tt_embs.stoi:
word_emb = self.tt_embs.vectors[self.tt_embs.stoi[word]]
else:
# If word is OOV, enter a zero vector instead.
# This means that the cosine similarity will always be zero.
word_emb = torch.zeros(self.glove_dim)
oov_indices.append(idx)
self.emb_matrix.append(word_emb)
self.emb_matrix = np.stack(self.emb_matrix) # (vocab_size, glove_dim)
print(
'Done constructing GloVe emb matrix; found %i OOVs of %i words'
% (len(oov_indices), len(dictionary))
)
# Get the norm of each of the word vectors. This is needed for cosine sims.
# self.emb_matrix_norm is a np array shape (vocab_size)
self.emb_matrix_norm = np.linalg.norm(self.emb_matrix, axis=1)
# For the OOV words which have zero vectors,
# set the norm to 1.0 so we don't have divide-by-zero errors
for idx in oov_indices:
self.emb_matrix_norm[idx] = 1.0 | [
"def",
"get_emb_matrix",
"(",
"self",
",",
"dictionary",
")",
":",
"print",
"(",
"'Constructing GloVe emb matrix for response-relatedness weighted '",
"'decoding...'",
")",
"self",
".",
"emb_matrix",
"=",
"[",
"]",
"oov_indices",
"=",
"[",
"]",
"# list of dictionary indices for all OOV words",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"dictionary",
")",
")",
":",
"word",
"=",
"dictionary",
"[",
"idx",
"]",
"if",
"word",
"in",
"self",
".",
"tt_embs",
".",
"stoi",
":",
"word_emb",
"=",
"self",
".",
"tt_embs",
".",
"vectors",
"[",
"self",
".",
"tt_embs",
".",
"stoi",
"[",
"word",
"]",
"]",
"else",
":",
"# If word is OOV, enter a zero vector instead.",
"# This means that the cosine similarity will always be zero.",
"word_emb",
"=",
"torch",
".",
"zeros",
"(",
"self",
".",
"glove_dim",
")",
"oov_indices",
".",
"append",
"(",
"idx",
")",
"self",
".",
"emb_matrix",
".",
"append",
"(",
"word_emb",
")",
"self",
".",
"emb_matrix",
"=",
"np",
".",
"stack",
"(",
"self",
".",
"emb_matrix",
")",
"# (vocab_size, glove_dim)",
"print",
"(",
"'Done constructing GloVe emb matrix; found %i OOVs of %i words'",
"%",
"(",
"len",
"(",
"oov_indices",
")",
",",
"len",
"(",
"dictionary",
")",
")",
")",
"# Get the norm of each of the word vectors. This is needed for cosine sims.",
"# self.emb_matrix_norm is a np array shape (vocab_size)",
"self",
".",
"emb_matrix_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"emb_matrix",
",",
"axis",
"=",
"1",
")",
"# For the OOV words which have zero vectors,",
"# set the norm to 1.0 so we don't have divide-by-zero errors",
"for",
"idx",
"in",
"oov_indices",
":",
"self",
".",
"emb_matrix_norm",
"[",
"idx",
"]",
"=",
"1.0"
] | [
82,
4
] | [
120,
43
] | python | en | ['en', 'error', 'th'] | False |
SentenceEmbedder.get_word_sims | (self, sent, sent_emb, dictionary) |
Given a sentence and its Arora-style sentence embedding, compute the cosine
similarities to it, for all words in the dictionary.
Inputs:
sent: string. Used only for caching lookup purposes.
sent_emb: torch Tensor shape (glove_dim).
dictionary: ParlAI dictionary
Returns:
sims: torch Tensor shape (vocab_size), containing the cosine sims.
|
Given a sentence and its Arora-style sentence embedding, compute the cosine
similarities to it, for all words in the dictionary. | def get_word_sims(self, sent, sent_emb, dictionary):
"""
Given a sentence and its Arora-style sentence embedding, compute the cosine
similarities to it, for all words in the dictionary.
Inputs:
sent: string. Used only for caching lookup purposes.
sent_emb: torch Tensor shape (glove_dim).
dictionary: ParlAI dictionary
Returns:
sims: torch Tensor shape (vocab_size), containing the cosine sims.
"""
# If we haven't initialized the GloVe emb matrix yet, do so
if self.emb_matrix is None:
self.get_emb_matrix(dictionary)
# If we have already computed sims for this sentence, return it
if sent in self.cache_sent2sims:
sims = self.cache_sent2sims[sent]
return sims
# Compute the cosine similarities. Implementation from here:
# https://codereview.stackexchange.com/questions/55717/efficient-numpy-cosine-distance-calculation
dotted = self.emb_matrix.dot(sent_emb) # shape (vocab_size)
sent_emb_norm = np.linalg.norm(sent_emb) # norm of the sent emb. scalar
norms = np.multiply(self.emb_matrix_norm, sent_emb_norm) # shape (vocab_size)
sims = np.divide(dotted, norms) # divide dot prods by norms. shape (vocab_size)
sims = torch.tensor(sims) # convert to torch Tensor, shape (vocab_size)
# Cache sims in self.cache_sent2sims
self.cache_sentqueue.append(sent) # append sent to right
self.cache_sent2sims[sent] = sims # add (sent, sims) pair to cache
if len(self.cache_sentqueue) > self.cache_limit:
to_remove = self.cache_sentqueue.popleft() # remove from left
del self.cache_sent2sims[to_remove] # remove from cache
assert len(self.cache_sent2sims) == len(self.cache_sentqueue)
assert len(self.cache_sent2sims) <= self.cache_limit
return sims | [
"def",
"get_word_sims",
"(",
"self",
",",
"sent",
",",
"sent_emb",
",",
"dictionary",
")",
":",
"# If we haven't initialized the GloVe emb matrix yet, do so",
"if",
"self",
".",
"emb_matrix",
"is",
"None",
":",
"self",
".",
"get_emb_matrix",
"(",
"dictionary",
")",
"# If we have already computed sims for this sentence, return it",
"if",
"sent",
"in",
"self",
".",
"cache_sent2sims",
":",
"sims",
"=",
"self",
".",
"cache_sent2sims",
"[",
"sent",
"]",
"return",
"sims",
"# Compute the cosine similarities. Implementation from here:",
"# https://codereview.stackexchange.com/questions/55717/efficient-numpy-cosine-distance-calculation",
"dotted",
"=",
"self",
".",
"emb_matrix",
".",
"dot",
"(",
"sent_emb",
")",
"# shape (vocab_size)",
"sent_emb_norm",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"sent_emb",
")",
"# norm of the sent emb. scalar",
"norms",
"=",
"np",
".",
"multiply",
"(",
"self",
".",
"emb_matrix_norm",
",",
"sent_emb_norm",
")",
"# shape (vocab_size)",
"sims",
"=",
"np",
".",
"divide",
"(",
"dotted",
",",
"norms",
")",
"# divide dot prods by norms. shape (vocab_size)",
"sims",
"=",
"torch",
".",
"tensor",
"(",
"sims",
")",
"# convert to torch Tensor, shape (vocab_size)",
"# Cache sims in self.cache_sent2sims",
"self",
".",
"cache_sentqueue",
".",
"append",
"(",
"sent",
")",
"# append sent to right",
"self",
".",
"cache_sent2sims",
"[",
"sent",
"]",
"=",
"sims",
"# add (sent, sims) pair to cache",
"if",
"len",
"(",
"self",
".",
"cache_sentqueue",
")",
">",
"self",
".",
"cache_limit",
":",
"to_remove",
"=",
"self",
".",
"cache_sentqueue",
".",
"popleft",
"(",
")",
"# remove from left",
"del",
"self",
".",
"cache_sent2sims",
"[",
"to_remove",
"]",
"# remove from cache",
"assert",
"len",
"(",
"self",
".",
"cache_sent2sims",
")",
"==",
"len",
"(",
"self",
".",
"cache_sentqueue",
")",
"assert",
"len",
"(",
"self",
".",
"cache_sent2sims",
")",
"<=",
"self",
".",
"cache_limit",
"return",
"sims"
] | [
122,
4
] | [
161,
19
] | python | en | ['en', 'error', 'th'] | False |
SentenceEmbedder.embed_sent | (self, sent, rem_first_sv=True) |
Produce a Arora-style sentence embedding for a given sentence.
Inputs:
sent: tokenized sentence; a list of strings
rem_first_sv: If True, remove the first singular value when you compute the
sentence embddings. Otherwise, don't remove it.
Returns:
sent_emb: tensor length glove_dim, or None.
If sent_emb is None, that's because all of the words were OOV for GloVe.
|
Produce a Arora-style sentence embedding for a given sentence. | def embed_sent(self, sent, rem_first_sv=True):
"""
Produce a Arora-style sentence embedding for a given sentence.
Inputs:
sent: tokenized sentence; a list of strings
rem_first_sv: If True, remove the first singular value when you compute the
sentence embddings. Otherwise, don't remove it.
Returns:
sent_emb: tensor length glove_dim, or None.
If sent_emb is None, that's because all of the words were OOV for GloVe.
"""
# If we haven't loaded the torchtext GloVe embeddings, do so
if self.tt_embs is None:
self.get_glove_embs()
# Lookup glove embeddings for words
tokens = [t for t in sent if t in self.tt_embs.stoi] # in-vocab tokens
# glove_oov_tokens = [t for t in sent if t not in self.tt_embs.stoi]
# if len(glove_oov_tokens)>0:
# print("WARNING: tokens OOV for glove: ", glove_oov_tokens)
if len(tokens) == 0:
print(
'WARNING: tried to embed utterance %s but all tokens are OOV for '
'GloVe. Returning embedding=None' % sent
)
return None
word_embs = [
self.tt_embs.vectors[self.tt_embs.stoi[t]] for t in tokens
] # list of torch Tensors shape (glove_dim)
# Get unigram probabilities for the words. If we don't have a word in word2prob,
# assume it's as rare as the rarest word in word2prob.
unigram_probs = [
self.word2prob[t] if t in self.word2prob else self.min_word_prob
for t in tokens
] # list of floats
# word2prob_oov_tokens = [t for t in tokens if t not in self.word2prob]
# if len(word2prob_oov_tokens)>0:
# print('WARNING: tokens OOV for word2prob, so assuming they are '
# 'maximally rare: ', word2prob_oov_tokens)
# Calculate the weighted average of the word embeddings
smooth_inverse_freqs = [
self.arora_a / (self.arora_a + p) for p in unigram_probs
] # list of floats
sent_emb = sum(
[word_emb * wt for (word_emb, wt) in zip(word_embs, smooth_inverse_freqs)]
) / len(
word_embs
) # torch Tensor shape (glove_dim)
# Remove the first singular value from sent_emb
if rem_first_sv:
sent_emb = remove_first_sv(sent_emb, self.first_sv)
return sent_emb | [
"def",
"embed_sent",
"(",
"self",
",",
"sent",
",",
"rem_first_sv",
"=",
"True",
")",
":",
"# If we haven't loaded the torchtext GloVe embeddings, do so",
"if",
"self",
".",
"tt_embs",
"is",
"None",
":",
"self",
".",
"get_glove_embs",
"(",
")",
"# Lookup glove embeddings for words",
"tokens",
"=",
"[",
"t",
"for",
"t",
"in",
"sent",
"if",
"t",
"in",
"self",
".",
"tt_embs",
".",
"stoi",
"]",
"# in-vocab tokens",
"# glove_oov_tokens = [t for t in sent if t not in self.tt_embs.stoi]",
"# if len(glove_oov_tokens)>0:",
"# print(\"WARNING: tokens OOV for glove: \", glove_oov_tokens)",
"if",
"len",
"(",
"tokens",
")",
"==",
"0",
":",
"print",
"(",
"'WARNING: tried to embed utterance %s but all tokens are OOV for '",
"'GloVe. Returning embedding=None'",
"%",
"sent",
")",
"return",
"None",
"word_embs",
"=",
"[",
"self",
".",
"tt_embs",
".",
"vectors",
"[",
"self",
".",
"tt_embs",
".",
"stoi",
"[",
"t",
"]",
"]",
"for",
"t",
"in",
"tokens",
"]",
"# list of torch Tensors shape (glove_dim)",
"# Get unigram probabilities for the words. If we don't have a word in word2prob,",
"# assume it's as rare as the rarest word in word2prob.",
"unigram_probs",
"=",
"[",
"self",
".",
"word2prob",
"[",
"t",
"]",
"if",
"t",
"in",
"self",
".",
"word2prob",
"else",
"self",
".",
"min_word_prob",
"for",
"t",
"in",
"tokens",
"]",
"# list of floats",
"# word2prob_oov_tokens = [t for t in tokens if t not in self.word2prob]",
"# if len(word2prob_oov_tokens)>0:",
"# print('WARNING: tokens OOV for word2prob, so assuming they are '",
"# 'maximally rare: ', word2prob_oov_tokens)",
"# Calculate the weighted average of the word embeddings",
"smooth_inverse_freqs",
"=",
"[",
"self",
".",
"arora_a",
"/",
"(",
"self",
".",
"arora_a",
"+",
"p",
")",
"for",
"p",
"in",
"unigram_probs",
"]",
"# list of floats",
"sent_emb",
"=",
"sum",
"(",
"[",
"word_emb",
"*",
"wt",
"for",
"(",
"word_emb",
",",
"wt",
")",
"in",
"zip",
"(",
"word_embs",
",",
"smooth_inverse_freqs",
")",
"]",
")",
"/",
"len",
"(",
"word_embs",
")",
"# torch Tensor shape (glove_dim)",
"# Remove the first singular value from sent_emb",
"if",
"rem_first_sv",
":",
"sent_emb",
"=",
"remove_first_sv",
"(",
"sent_emb",
",",
"self",
".",
"first_sv",
")",
"return",
"sent_emb"
] | [
163,
4
] | [
219,
23
] | python | en | ['en', 'error', 'th'] | False |
TestPing.test_init | (self) | Test initialization. | Test initialization. | def test_init(self):
"""Test initialization."""
assert self.test_ping.comment == self.test_comment
assert self.test_ping.response_requested == self.test_response_requested | [
"def",
"test_init",
"(",
"self",
")",
":",
"assert",
"self",
".",
"test_ping",
".",
"comment",
"==",
"self",
".",
"test_comment",
"assert",
"self",
".",
"test_ping",
".",
"response_requested",
"==",
"self",
".",
"test_response_requested"
] | [
16,
4
] | [
19,
80
] | python | co | ['es', 'co', 'en'] | False |
TestPing.test_deserialize | (self, mock_ping_schema_load) |
Test deserialization.
|
Test deserialization.
| def test_deserialize(self, mock_ping_schema_load):
"""
Test deserialization.
"""
obj = {"obj": "obj"}
msg = Ping.deserialize(obj)
mock_ping_schema_load.assert_called_once_with(obj)
assert msg is mock_ping_schema_load.return_value | [
"def",
"test_deserialize",
"(",
"self",
",",
"mock_ping_schema_load",
")",
":",
"obj",
"=",
"{",
"\"obj\"",
":",
"\"obj\"",
"}",
"msg",
"=",
"Ping",
".",
"deserialize",
"(",
"obj",
")",
"mock_ping_schema_load",
".",
"assert_called_once_with",
"(",
"obj",
")",
"assert",
"msg",
"is",
"mock_ping_schema_load",
".",
"return_value"
] | [
26,
4
] | [
35,
56
] | python | en | ['en', 'error', 'th'] | False |
TestPing.test_serialize | (self, mock_ping_schema_load) |
Test serialization.
|
Test serialization.
| def test_serialize(self, mock_ping_schema_load):
"""
Test serialization.
"""
msg_dict = self.test_ping.serialize()
mock_ping_schema_load.assert_called_once_with(self.test_ping)
assert msg_dict is mock_ping_schema_load.return_value | [
"def",
"test_serialize",
"(",
"self",
",",
"mock_ping_schema_load",
")",
":",
"msg_dict",
"=",
"self",
".",
"test_ping",
".",
"serialize",
"(",
")",
"mock_ping_schema_load",
".",
"assert_called_once_with",
"(",
"self",
".",
"test_ping",
")",
"assert",
"msg_dict",
"is",
"mock_ping_schema_load",
".",
"return_value"
] | [
38,
4
] | [
45,
61
] | python | en | ['en', 'error', 'th'] | False |
setup | (bot) |
Mandatory function to add the Cog to the bot.
|
Mandatory function to add the Cog to the bot.
| def setup(bot):
"""
Mandatory function to add the Cog to the bot.
"""
bot.add_cog(GeneralDebugCog(bot)) | [
"def",
"setup",
"(",
"bot",
")",
":",
"bot",
".",
"add_cog",
"(",
"GeneralDebugCog",
"(",
"bot",
")",
")"
] | [
579,
0
] | [
583,
37
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.array | (self) |
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
|
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series | def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"] | [
"def",
"array",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"array\"",
"]"
] | [
30,
4
] | [
42,
28
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.arrayminus | (self) |
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
|
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series | def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"] | [
"def",
"arrayminus",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"arrayminus\"",
"]"
] | [
51,
4
] | [
64,
33
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.arrayminussrc | (self) |
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
|
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object | def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"] | [
"def",
"arrayminussrc",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"arrayminussrc\"",
"]"
] | [
73,
4
] | [
85,
36
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.arraysrc | (self) |
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
|
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object | def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"] | [
"def",
"arraysrc",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"arraysrc\"",
"]"
] | [
94,
4
] | [
105,
31
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.color | (self) |
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
|
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen | def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"] | [
"def",
"color",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"color\"",
"]"
] | [
114,
4
] | [
164,
28
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.symmetric | (self) |
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
|
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False) | def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"] | [
"def",
"symmetric",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"symmetric\"",
"]"
] | [
173,
4
] | [
186,
32
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.thickness | (self) |
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
|
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf] | def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"] | [
"def",
"thickness",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"thickness\"",
"]"
] | [
195,
4
] | [
206,
32
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.traceref | (self) |
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
|
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807] | def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"] | [
"def",
"traceref",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"traceref\"",
"]"
] | [
215,
4
] | [
225,
31
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.tracerefminus | (self) |
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
|
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807] | def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"] | [
"def",
"tracerefminus",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"tracerefminus\"",
"]"
] | [
234,
4
] | [
244,
36
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.type | (self) |
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
|
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data'] | def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the sqaure of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"] | [
"def",
"type",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"type\"",
"]"
] | [
253,
4
] | [
271,
27
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.value | (self) |
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
|
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf] | def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"] | [
"def",
"value",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"value\"",
"]"
] | [
280,
4
] | [
293,
28
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.valueminus | (self) |
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
|
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf] | def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"] | [
"def",
"valueminus",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"valueminus\"",
"]"
] | [
302,
4
] | [
316,
33
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.visible | (self) |
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
|
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False) | def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"] | [
"def",
"visible",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"visible\"",
"]"
] | [
325,
4
] | [
336,
30
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.width | (self) |
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
|
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf] | def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"] | [
"def",
"width",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"width\"",
"]"
] | [
345,
4
] | [
357,
28
] | python | en | ['en', 'error', 'th'] | False |
ErrorY.__init__ | (
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
) |
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
|
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref | def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.bar.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the sqaure of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super(ErrorY, self).__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.bar.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.ErrorY`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"array",
"=",
"None",
",",
"arrayminus",
"=",
"None",
",",
"arrayminussrc",
"=",
"None",
",",
"arraysrc",
"=",
"None",
",",
"color",
"=",
"None",
",",
"symmetric",
"=",
"None",
",",
"thickness",
"=",
"None",
",",
"traceref",
"=",
"None",
",",
"tracerefminus",
"=",
"None",
",",
"type",
"=",
"None",
",",
"value",
"=",
"None",
",",
"valueminus",
"=",
"None",
",",
"visible",
"=",
"None",
",",
"width",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"ErrorY",
",",
"self",
")",
".",
"__init__",
"(",
"\"error_y\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.bar.ErrorY \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.bar.ErrorY`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"array\"",
",",
"None",
")",
"_v",
"=",
"array",
"if",
"array",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"array\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"arrayminus\"",
",",
"None",
")",
"_v",
"=",
"arrayminus",
"if",
"arrayminus",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"arrayminus\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"arrayminussrc\"",
",",
"None",
")",
"_v",
"=",
"arrayminussrc",
"if",
"arrayminussrc",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"arrayminussrc\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"arraysrc\"",
",",
"None",
")",
"_v",
"=",
"arraysrc",
"if",
"arraysrc",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"arraysrc\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"color\"",
",",
"None",
")",
"_v",
"=",
"color",
"if",
"color",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"color\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"symmetric\"",
",",
"None",
")",
"_v",
"=",
"symmetric",
"if",
"symmetric",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"symmetric\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"thickness\"",
",",
"None",
")",
"_v",
"=",
"thickness",
"if",
"thickness",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"thickness\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"traceref\"",
",",
"None",
")",
"_v",
"=",
"traceref",
"if",
"traceref",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"traceref\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"tracerefminus\"",
",",
"None",
")",
"_v",
"=",
"tracerefminus",
"if",
"tracerefminus",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"tracerefminus\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"type\"",
",",
"None",
")",
"_v",
"=",
"type",
"if",
"type",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"type\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"value\"",
",",
"None",
")",
"_v",
"=",
"value",
"if",
"value",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"value\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"valueminus\"",
",",
"None",
")",
"_v",
"=",
"valueminus",
"if",
"valueminus",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"valueminus\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"visible\"",
",",
"None",
")",
"_v",
"=",
"visible",
"if",
"visible",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"visible\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"width\"",
",",
"None",
")",
"_v",
"=",
"width",
"if",
"width",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"width\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
423,
4
] | [
600,
34
] | python | en | ['en', 'error', 'th'] | False |
MessageSocketHandler.open | (self) |
Opens a websocket and assigns a random UUID that is stored in the class-level
`subs` variable.
|
Opens a websocket and assigns a random UUID that is stored in the class-level
`subs` variable.
| def open(self):
"""
Opens a websocket and assigns a random UUID that is stored in the class-level
`subs` variable.
"""
if self.sid not in self.subs.values():
self.subs[self.sid] = self
self.set_nodelay(True) | [
"def",
"open",
"(",
"self",
")",
":",
"if",
"self",
".",
"sid",
"not",
"in",
"self",
".",
"subs",
".",
"values",
"(",
")",
":",
"self",
".",
"subs",
"[",
"self",
".",
"sid",
"]",
"=",
"self",
"self",
".",
"set_nodelay",
"(",
"True",
")"
] | [
31,
4
] | [
38,
34
] | python | en | ['en', 'error', 'th'] | False |
MessageSocketHandler.on_close | (self) |
Runs when a socket is closed.
|
Runs when a socket is closed.
| def on_close(self):
"""
Runs when a socket is closed.
"""
if self.sid in self.subs:
del self.subs[self.sid] | [
"def",
"on_close",
"(",
"self",
")",
":",
"if",
"self",
".",
"sid",
"in",
"self",
".",
"subs",
":",
"del",
"self",
".",
"subs",
"[",
"self",
".",
"sid",
"]"
] | [
40,
4
] | [
45,
35
] | python | en | ['en', 'error', 'th'] | False |
MessageSocketHandler.on_message | (self, message_text) |
Callback that runs when a new message is received from a client See the
chat_service README for the resultant message structure.
Args:
message_text: A stringified JSON object with a text or attachment key.
`text` should contain a string message and `attachment` is a dict.
See `WebsocketAgent.put_data` for more information about the
attachment dict structure.
|
Callback that runs when a new message is received from a client See the
chat_service README for the resultant message structure. | def on_message(self, message_text):
"""
Callback that runs when a new message is received from a client See the
chat_service README for the resultant message structure.
Args:
message_text: A stringified JSON object with a text or attachment key.
`text` should contain a string message and `attachment` is a dict.
See `WebsocketAgent.put_data` for more information about the
attachment dict structure.
"""
logging.info('websocket message from client: {}'.format(message_text))
message = json.loads(message_text)
message_history = message.get('message_history', [])
#self.sid = message.get('user_id')
#self.subs[self.sid] = self
message = {
'text': message.get('text', ''),
'message_history': message_history,
'payload': message.get('payload'),
'sender': {'id': self.sid},
'recipient': {'id': 0},
}
self.message_callback(message) | [
"def",
"on_message",
"(",
"self",
",",
"message_text",
")",
":",
"logging",
".",
"info",
"(",
"'websocket message from client: {}'",
".",
"format",
"(",
"message_text",
")",
")",
"message",
"=",
"json",
".",
"loads",
"(",
"message_text",
")",
"message_history",
"=",
"message",
".",
"get",
"(",
"'message_history'",
",",
"[",
"]",
")",
"#self.sid = message.get('user_id')",
"#self.subs[self.sid] = self",
"message",
"=",
"{",
"'text'",
":",
"message",
".",
"get",
"(",
"'text'",
",",
"''",
")",
",",
"'message_history'",
":",
"message_history",
",",
"'payload'",
":",
"message",
".",
"get",
"(",
"'payload'",
")",
",",
"'sender'",
":",
"{",
"'id'",
":",
"self",
".",
"sid",
"}",
",",
"'recipient'",
":",
"{",
"'id'",
":",
"0",
"}",
",",
"}",
"self",
".",
"message_callback",
"(",
"message",
")"
] | [
47,
4
] | [
72,
38
] | python | en | ['en', 'error', 'th'] | False |
run_migrations_offline | () | Run migrations in 'offline' mode.ec
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
| Run migrations in 'offline' mode.ec | def run_migrations_offline():
"""Run migrations in 'offline' mode.ec
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
include_object=include_object
)
with context.begin_transaction():
context.run_migrations() | [
"def",
"run_migrations_offline",
"(",
")",
":",
"url",
"=",
"config",
".",
"get_main_option",
"(",
"\"sqlalchemy.url\"",
")",
"context",
".",
"configure",
"(",
"url",
"=",
"url",
",",
"target_metadata",
"=",
"target_metadata",
",",
"literal_binds",
"=",
"True",
",",
"dialect_opts",
"=",
"{",
"\"paramstyle\"",
":",
"\"named\"",
"}",
",",
"include_object",
"=",
"include_object",
")",
"with",
"context",
".",
"begin_transaction",
"(",
")",
":",
"context",
".",
"run_migrations",
"(",
")"
] | [
37,
0
] | [
59,
32
] | python | en | ['en', 'nl', 'it'] | False |
run_migrations_online | () | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
| Run migrations in 'online' mode. | def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata, include_object=include_object
)
with context.begin_transaction():
context.run_migrations() | [
"def",
"run_migrations_online",
"(",
")",
":",
"connectable",
"=",
"engine_from_config",
"(",
"config",
".",
"get_section",
"(",
"config",
".",
"config_ini_section",
")",
",",
"prefix",
"=",
"\"sqlalchemy.\"",
",",
"poolclass",
"=",
"pool",
".",
"NullPool",
",",
")",
"with",
"connectable",
".",
"connect",
"(",
")",
"as",
"connection",
":",
"context",
".",
"configure",
"(",
"connection",
"=",
"connection",
",",
"target_metadata",
"=",
"target_metadata",
",",
"include_object",
"=",
"include_object",
")",
"with",
"context",
".",
"begin_transaction",
"(",
")",
":",
"context",
".",
"run_migrations",
"(",
")"
] | [
62,
0
] | [
81,
36
] | python | en | ['en', 'nl', 'it'] | False |
Projection.type | (self) |
Sets the projection type. The projection type could be either
"perspective" or "orthographic". The default is "perspective".
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perspective', 'orthographic']
Returns
-------
Any
|
Sets the projection type. The projection type could be either
"perspective" or "orthographic". The default is "perspective".
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perspective', 'orthographic'] | def type(self):
"""
Sets the projection type. The projection type could be either
"perspective" or "orthographic". The default is "perspective".
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['perspective', 'orthographic']
Returns
-------
Any
"""
return self["type"] | [
"def",
"type",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"type\"",
"]"
] | [
15,
4
] | [
28,
27
] | python | en | ['en', 'error', 'th'] | False |
Projection.__init__ | (self, arg=None, type=None, **kwargs) |
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.c
amera.Projection`
type
Sets the projection type. The projection type could be
either "perspective" or "orthographic". The default is
"perspective".
Returns
-------
Projection
|
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.c
amera.Projection`
type
Sets the projection type. The projection type could be
either "perspective" or "orthographic". The default is
"perspective". | def __init__(self, arg=None, type=None, **kwargs):
"""
Construct a new Projection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.scene.c
amera.Projection`
type
Sets the projection type. The projection type could be
either "perspective" or "orthographic". The default is
"perspective".
Returns
-------
Projection
"""
super(Projection, self).__init__("projection")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.camera.Projection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.camera.Projection`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"type",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Projection",
",",
"self",
")",
".",
"__init__",
"(",
"\"projection\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.layout.scene.camera.Projection \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.layout.scene.camera.Projection`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"type\"",
",",
"None",
")",
"_v",
"=",
"type",
"if",
"type",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"type\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
45,
4
] | [
104,
34
] | python | en | ['en', 'error', 'th'] | False |
Stream.maxpoints | (self) |
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
|
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000] | def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"] | [
"def",
"maxpoints",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"maxpoints\"",
"]"
] | [
15,
4
] | [
28,
32
] | python | en | ['en', 'error', 'th'] | False |
Stream.token | (self) |
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
|
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string | def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"] | [
"def",
"token",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"token\"",
"]"
] | [
37,
4
] | [
50,
28
] | python | en | ['en', 'error', 'th'] | False |
Stream.__init__ | (self, arg=None, maxpoints=None, token=None, **kwargs) |
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.funnel.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
|
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.funnel.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details. | def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.funnel.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.funnel.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"maxpoints",
"=",
"None",
",",
"token",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Stream",
",",
"self",
")",
".",
"__init__",
"(",
"\"stream\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.funnel.Stream \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.funnel.Stream`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"maxpoints\"",
",",
"None",
")",
"_v",
"=",
"maxpoints",
"if",
"maxpoints",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"maxpoints\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"token\"",
",",
"None",
")",
"_v",
"=",
"token",
"if",
"token",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"token\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
72,
4
] | [
139,
34
] | python | en | ['en', 'error', 'th'] | False |
Title.font | (self) |
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.yaxis.title.Font
|
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size | def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be customized by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.scene.yaxis.title.Font
"""
return self["font"] | [
"def",
"font",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"font\"",
"]"
] | [
15,
4
] | [
53,
27
] | python | en | ['en', 'error', 'th'] | False |
Title.text | (self) |
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
|
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string | def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"] | [
"def",
"text",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"text\"",
"]"
] | [
62,
4
] | [
76,
27
] | python | en | ['en', 'error', 'th'] | False |
Title.__init__ | (self, arg=None, font=None, text=None, **kwargs) |
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.yaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
|
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.yaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated. | def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.yaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be customized by the now deprecated `titlefont`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.yaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.yaxis.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"font",
"=",
"None",
",",
"text",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Title",
",",
"self",
")",
".",
"__init__",
"(",
"\"title\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.layout.scene.yaxis.Title \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.layout.scene.yaxis.Title`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"font\"",
",",
"None",
")",
"_v",
"=",
"font",
"if",
"font",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"font\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"text\"",
",",
"None",
")",
"_v",
"=",
"text",
"if",
"text",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"text\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
98,
4
] | [
166,
34
] | python | en | ['en', 'error', 'th'] | False |
SelfChatWorld.init_contexts | (self, shared=None) |
Override to load or instantiate contexts to be used to seed the self chat.
|
Override to load or instantiate contexts to be used to seed the self chat.
| def init_contexts(self, shared=None) -> None:
"""
Override to load or instantiate contexts to be used to seed the self chat.
"""
pass | [
"def",
"init_contexts",
"(",
"self",
",",
"shared",
"=",
"None",
")",
"->",
"None",
":",
"pass"
] | [
61,
4
] | [
65,
12
] | python | en | ['en', 'error', 'th'] | False |
SelfChatWorld.get_contexts | (self) |
Override to return a pair of contexts with which to seed the self chat episode.
This function will be called before the first turn of every episode.
|
Override to return a pair of contexts with which to seed the self chat episode. | def get_contexts(self):
"""
Override to return a pair of contexts with which to seed the self chat episode.
This function will be called before the first turn of every episode.
"""
return ['Hi!', ''] | [
"def",
"get_contexts",
"(",
"self",
")",
":",
"return",
"[",
"'Hi!'",
",",
"''",
"]"
] | [
67,
4
] | [
73,
26
] | python | en | ['en', 'error', 'th'] | False |
SelfChatWorld.init_openers | (self) |
Override to load or instantiate opening messages to be used to seed the self
chat.
|
Override to load or instantiate opening messages to be used to seed the self
chat.
| def init_openers(self) -> None:
"""
Override to load or instantiate opening messages to be used to seed the self
chat.
"""
if self.opt.get('seed_messages_from_task'):
self._openers = load_openers(self.opt) | [
"def",
"init_openers",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"opt",
".",
"get",
"(",
"'seed_messages_from_task'",
")",
":",
"self",
".",
"_openers",
"=",
"load_openers",
"(",
"self",
".",
"opt",
")"
] | [
75,
4
] | [
81,
50
] | python | en | ['en', 'error', 'th'] | False |
SelfChatWorld.get_openers | (self, episode_num: int) |
Override to return one or more opening messages with which to seed the self chat
episode.
The return value should be an array of strings, each string being a message in
response to the string before it.
|
Override to return one or more opening messages with which to seed the self chat
episode. | def get_openers(self, episode_num: int) -> Optional[List[str]]:
"""
Override to return one or more opening messages with which to seed the self chat
episode.
The return value should be an array of strings, each string being a message in
response to the string before it.
"""
if self._openers:
return [random.choice(self._openers)]
return None | [
"def",
"get_openers",
"(",
"self",
",",
"episode_num",
":",
"int",
")",
"->",
"Optional",
"[",
"List",
"[",
"str",
"]",
"]",
":",
"if",
"self",
".",
"_openers",
":",
"return",
"[",
"random",
".",
"choice",
"(",
"self",
".",
"_openers",
")",
"]",
"return",
"None"
] | [
83,
4
] | [
93,
19
] | python | en | ['en', 'error', 'th'] | False |
Textfont.color | (self) |
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
|
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen | def color(self):
"""
Sets the text font color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"] | [
"def",
"color",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"color\"",
"]"
] | [
15,
4
] | [
65,
28
] | python | en | ['en', 'error', 'th'] | False |
Textfont.__init__ | (self, arg=None, color=None, **kwargs) |
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
|
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.selected.Textfont`
color
Sets the text font color of selected points. | def __init__(self, arg=None, color=None, **kwargs):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatterternary
.selected.Textfont`
color
Sets the text font color of selected points.
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.selected.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.selected.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"color",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Textfont",
",",
"self",
")",
".",
"__init__",
"(",
"\"textfont\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.scatterternary.selected.Textfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatterternary.selected.Textfont`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"color\"",
",",
"None",
")",
"_v",
"=",
"color",
"if",
"color",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"color\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
80,
4
] | [
137,
34
] | python | en | ['en', 'error', 'th'] | False |
parse_requirements | (fname='requirements.txt', with_version=True) | Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
| Parse the package dependencies listed in a requirements file but strips
specific versioning information. | def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | [
"def",
"parse_requirements",
"(",
"fname",
"=",
"'requirements.txt'",
",",
"with_version",
"=",
"True",
")",
":",
"import",
"sys",
"from",
"os",
".",
"path",
"import",
"exists",
"import",
"re",
"require_fpath",
"=",
"fname",
"def",
"parse_line",
"(",
"line",
")",
":",
"\"\"\"Parse information from a line in a requirements text file.\"\"\"",
"if",
"line",
".",
"startswith",
"(",
"'-r '",
")",
":",
"# Allow specifying requirements in other files",
"target",
"=",
"line",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"for",
"info",
"in",
"parse_require_file",
"(",
"target",
")",
":",
"yield",
"info",
"else",
":",
"info",
"=",
"{",
"'line'",
":",
"line",
"}",
"if",
"line",
".",
"startswith",
"(",
"'-e '",
")",
":",
"info",
"[",
"'package'",
"]",
"=",
"line",
".",
"split",
"(",
"'#egg='",
")",
"[",
"1",
"]",
"elif",
"'@git+'",
"in",
"line",
":",
"info",
"[",
"'package'",
"]",
"=",
"line",
"else",
":",
"# Remove versioning from the package",
"pat",
"=",
"'('",
"+",
"'|'",
".",
"join",
"(",
"[",
"'>='",
",",
"'=='",
",",
"'>'",
"]",
")",
"+",
"')'",
"parts",
"=",
"re",
".",
"split",
"(",
"pat",
",",
"line",
",",
"maxsplit",
"=",
"1",
")",
"parts",
"=",
"[",
"p",
".",
"strip",
"(",
")",
"for",
"p",
"in",
"parts",
"]",
"info",
"[",
"'package'",
"]",
"=",
"parts",
"[",
"0",
"]",
"if",
"len",
"(",
"parts",
")",
">",
"1",
":",
"op",
",",
"rest",
"=",
"parts",
"[",
"1",
":",
"]",
"if",
"';'",
"in",
"rest",
":",
"# Handle platform specific dependencies",
"# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies",
"version",
",",
"platform_deps",
"=",
"map",
"(",
"str",
".",
"strip",
",",
"rest",
".",
"split",
"(",
"';'",
")",
")",
"info",
"[",
"'platform_deps'",
"]",
"=",
"platform_deps",
"else",
":",
"version",
"=",
"rest",
"# NOQA",
"info",
"[",
"'version'",
"]",
"=",
"(",
"op",
",",
"version",
")",
"yield",
"info",
"def",
"parse_require_file",
"(",
"fpath",
")",
":",
"with",
"open",
"(",
"fpath",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line",
"and",
"not",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"for",
"info",
"in",
"parse_line",
"(",
"line",
")",
":",
"yield",
"info",
"def",
"gen_packages_items",
"(",
")",
":",
"if",
"exists",
"(",
"require_fpath",
")",
":",
"for",
"info",
"in",
"parse_require_file",
"(",
"require_fpath",
")",
":",
"parts",
"=",
"[",
"info",
"[",
"'package'",
"]",
"]",
"if",
"with_version",
"and",
"'version'",
"in",
"info",
":",
"parts",
".",
"extend",
"(",
"info",
"[",
"'version'",
"]",
")",
"if",
"not",
"sys",
".",
"version",
".",
"startswith",
"(",
"'3.4'",
")",
":",
"# apparently package_deps are broken in 3.4",
"platform_deps",
"=",
"info",
".",
"get",
"(",
"'platform_deps'",
")",
"if",
"platform_deps",
"is",
"not",
"None",
":",
"parts",
".",
"append",
"(",
"';'",
"+",
"platform_deps",
")",
"item",
"=",
"''",
".",
"join",
"(",
"parts",
")",
"yield",
"item",
"packages",
"=",
"list",
"(",
"gen_packages_items",
"(",
")",
")",
"return",
"packages"
] | [
114,
0
] | [
189,
19
] | python | en | ['en', 'en', 'en'] | True |
Line.color | (self) |
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
|
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen | def color(self):
"""
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"] | [
"def",
"color",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"color\"",
"]"
] | [
15,
4
] | [
66,
28
] | python | en | ['en', 'error', 'th'] | False |
Line.dash | (self) |
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
|
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.) | def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"] | [
"def",
"dash",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"dash\"",
"]"
] | [
75,
4
] | [
92,
27
] | python | en | ['en', 'error', 'th'] | False |
Line.smoothing | (self) |
Sets the amount of smoothing for the contour lines, where 0
corresponds to no smoothing.
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
|
Sets the amount of smoothing for the contour lines, where 0
corresponds to no smoothing.
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3] | def smoothing(self):
"""
Sets the amount of smoothing for the contour lines, where 0
corresponds to no smoothing.
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"] | [
"def",
"smoothing",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"smoothing\"",
"]"
] | [
101,
4
] | [
113,
32
] | python | en | ['en', 'error', 'th'] | False |
Line.width | (self) |
Sets the contour line width in (in px) Defaults to 0.5 when
`contours.type` is "levels". Defaults to 2 when `contour.type`
is "constraint".
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
|
Sets the contour line width in (in px) Defaults to 0.5 when
`contours.type` is "levels". Defaults to 2 when `contour.type`
is "constraint".
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf] | def width(self):
"""
Sets the contour line width in (in px) Defaults to 0.5 when
`contours.type` is "levels". Defaults to 2 when `contour.type`
is "constraint".
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"] | [
"def",
"width",
"(",
"self",
")",
":",
"return",
"self",
"[",
"\"width\"",
"]"
] | [
122,
4
] | [
135,
28
] | python | en | ['en', 'error', 'th'] | False |
Line.__init__ | (
self, arg=None, color=None, dash=None, smoothing=None, width=None, **kwargs
) |
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.contour.Line`
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px) Defaults to 0.5
when `contours.type` is "levels". Defaults to 2 when
`contour.type` is "constraint".
Returns
-------
Line
|
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.contour.Line`
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px) Defaults to 0.5
when `contours.type` is "levels". Defaults to 2 when
`contour.type` is "constraint". | def __init__(
self, arg=None, color=None, dash=None, smoothing=None, width=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.contour.Line`
color
Sets the color of the contour level. Has no effect if
`contours.coloring` is set to "lines".
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour lines,
where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px) Defaults to 0.5
when `contours.type` is "levels". Defaults to 2 when
`contour.type` is "constraint".
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.contour.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("smoothing", None)
_v = smoothing if smoothing is not None else _v
if _v is not None:
self["smoothing"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False | [
"def",
"__init__",
"(",
"self",
",",
"arg",
"=",
"None",
",",
"color",
"=",
"None",
",",
"dash",
"=",
"None",
",",
"smoothing",
"=",
"None",
",",
"width",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"Line",
",",
"self",
")",
".",
"__init__",
"(",
"\"line\"",
")",
"if",
"\"_parent\"",
"in",
"kwargs",
":",
"self",
".",
"_parent",
"=",
"kwargs",
"[",
"\"_parent\"",
"]",
"return",
"# Validate arg",
"# ------------",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"{",
"}",
"elif",
"isinstance",
"(",
"arg",
",",
"self",
".",
"__class__",
")",
":",
"arg",
"=",
"arg",
".",
"to_plotly_json",
"(",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"dict",
")",
":",
"arg",
"=",
"_copy",
".",
"copy",
"(",
"arg",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nThe first argument to the plotly.graph_objs.contour.Line \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.contour.Line`\"\"\"",
")",
"# Handle skip_invalid",
"# -------------------",
"self",
".",
"_skip_invalid",
"=",
"kwargs",
".",
"pop",
"(",
"\"skip_invalid\"",
",",
"False",
")",
"self",
".",
"_validate",
"=",
"kwargs",
".",
"pop",
"(",
"\"_validate\"",
",",
"True",
")",
"# Populate data dict with properties",
"# ----------------------------------",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"color\"",
",",
"None",
")",
"_v",
"=",
"color",
"if",
"color",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"color\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"dash\"",
",",
"None",
")",
"_v",
"=",
"dash",
"if",
"dash",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"dash\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"smoothing\"",
",",
"None",
")",
"_v",
"=",
"smoothing",
"if",
"smoothing",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"smoothing\"",
"]",
"=",
"_v",
"_v",
"=",
"arg",
".",
"pop",
"(",
"\"width\"",
",",
"None",
")",
"_v",
"=",
"width",
"if",
"width",
"is",
"not",
"None",
"else",
"_v",
"if",
"_v",
"is",
"not",
"None",
":",
"self",
"[",
"\"width\"",
"]",
"=",
"_v",
"# Process unknown kwargs",
"# ----------------------",
"self",
".",
"_process_kwargs",
"(",
"*",
"*",
"dict",
"(",
"arg",
",",
"*",
"*",
"kwargs",
")",
")",
"# Reset skip_invalid",
"# ------------------",
"self",
".",
"_skip_invalid",
"=",
"False"
] | [
163,
4
] | [
246,
34
] | python | en | ['en', 'error', 'th'] | False |
MenuRequest.__init__ | (self, **kwargs) | Initialize a menu request object. | Initialize a menu request object. | def __init__(self, **kwargs):
"""Initialize a menu request object."""
super(MenuRequest, self).__init__(**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
"MenuRequest",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"*",
"kwargs",
")"
] | [
19,
4
] | [
21,
51
] | python | en | ['en', 'co', 'en'] | True |
set_mapbox_access_token | (token) |
Arguments:
token: A Mapbox token to be used in `plotly.express.scatter_mapbox` and \
`plotly.express.line_mapbox` figures. See \
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/ for more details
|
Arguments:
token: A Mapbox token to be used in `plotly.express.scatter_mapbox` and \
`plotly.express.line_mapbox` figures. See \
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/ for more details
| def set_mapbox_access_token(token):
"""
Arguments:
token: A Mapbox token to be used in `plotly.express.scatter_mapbox` and \
`plotly.express.line_mapbox` figures. See \
https://docs.mapbox.com/help/how-mapbox-works/access-tokens/ for more details
"""
global MAPBOX_TOKEN
MAPBOX_TOKEN = token | [
"def",
"set_mapbox_access_token",
"(",
"token",
")",
":",
"global",
"MAPBOX_TOKEN",
"MAPBOX_TOKEN",
"=",
"token"
] | [
71,
0
] | [
79,
24
] | python | en | ['en', 'error', 'th'] | False |
get_trendline_results | (fig) |
Extracts fit statistics for trendlines (when applied to figures generated with
the `trendline` argument set to `"ols"`).
Arguments:
fig: the output of a `plotly.express` charting call
Returns:
A `pandas.DataFrame` with a column "px_fit_results" containing the `statsmodels`
results objects, along with columns identifying the subset of the data the
trendline was fit on.
|
Extracts fit statistics for trendlines (when applied to figures generated with
the `trendline` argument set to `"ols"`). | def get_trendline_results(fig):
"""
Extracts fit statistics for trendlines (when applied to figures generated with
the `trendline` argument set to `"ols"`).
Arguments:
fig: the output of a `plotly.express` charting call
Returns:
A `pandas.DataFrame` with a column "px_fit_results" containing the `statsmodels`
results objects, along with columns identifying the subset of the data the
trendline was fit on.
"""
return fig._px_trendlines | [
"def",
"get_trendline_results",
"(",
"fig",
")",
":",
"return",
"fig",
".",
"_px_trendlines"
] | [
82,
0
] | [
94,
29
] | python | en | ['en', 'error', 'th'] | False |
invert_label | (args, column) | Invert mapping.
Find key corresponding to value column in dict args["labels"].
Returns `column` if the value does not exist.
| Invert mapping.
Find key corresponding to value column in dict args["labels"].
Returns `column` if the value does not exist.
| def invert_label(args, column):
"""Invert mapping.
Find key corresponding to value column in dict args["labels"].
Returns `column` if the value does not exist.
"""
reversed_labels = {value: key for (key, value) in args["labels"].items()}
try:
return reversed_labels[column]
except Exception:
return column | [
"def",
"invert_label",
"(",
"args",
",",
"column",
")",
":",
"reversed_labels",
"=",
"{",
"value",
":",
"key",
"for",
"(",
"key",
",",
"value",
")",
"in",
"args",
"[",
"\"labels\"",
"]",
".",
"items",
"(",
")",
"}",
"try",
":",
"return",
"reversed_labels",
"[",
"column",
"]",
"except",
"Exception",
":",
"return",
"column"
] | [
119,
0
] | [
128,
21
] | python | en | ['en', 'en', 'en'] | False |
make_trace_kwargs | (args, trace_spec, trace_data, mapping_labels, sizeref) | Populates a dict with arguments to update trace
Parameters
----------
args : dict
args to be used for the trace
trace_spec : NamedTuple
which kind of trace to be used (has constructor, marginal etc.
attributes)
trace_data : pandas DataFrame
data
mapping_labels : dict
to be used for hovertemplate
sizeref : float
marker sizeref
Returns
-------
trace_patch : dict
dict to be used to update trace
fit_results : dict
fit information to be used for trendlines
| Populates a dict with arguments to update trace | def make_trace_kwargs(args, trace_spec, trace_data, mapping_labels, sizeref):
"""Populates a dict with arguments to update trace
Parameters
----------
args : dict
args to be used for the trace
trace_spec : NamedTuple
which kind of trace to be used (has constructor, marginal etc.
attributes)
trace_data : pandas DataFrame
data
mapping_labels : dict
to be used for hovertemplate
sizeref : float
marker sizeref
Returns
-------
trace_patch : dict
dict to be used to update trace
fit_results : dict
fit information to be used for trendlines
"""
if "line_close" in args and args["line_close"]:
trace_data = trace_data.append(trace_data.iloc[0])
trace_patch = trace_spec.trace_patch.copy() or {}
fit_results = None
hover_header = ""
custom_data_len = 0
for attr_name in trace_spec.attrs:
attr_value = args[attr_name]
attr_label = get_decorated_label(args, attr_value, attr_name)
if attr_name == "dimensions":
dims = [
(name, column)
for (name, column) in trace_data.iteritems()
if ((not attr_value) or (name in attr_value))
and (
trace_spec.constructor != go.Parcoords
or _is_continuous(args["data_frame"], name)
)
and (
trace_spec.constructor != go.Parcats
or (attr_value is not None and name in attr_value)
or len(args["data_frame"][name].unique())
<= args["dimensions_max_cardinality"]
)
]
trace_patch["dimensions"] = [
dict(label=get_label(args, name), values=column.values)
for (name, column) in dims
]
if trace_spec.constructor == go.Splom:
for d in trace_patch["dimensions"]:
d["axis"] = dict(matches=True)
mapping_labels["%{xaxis.title.text}"] = "%{x}"
mapping_labels["%{yaxis.title.text}"] = "%{y}"
elif (
attr_value is not None
or (trace_spec.constructor == go.Histogram and attr_name in ["x", "y"])
or (
trace_spec.constructor in [go.Histogram2d, go.Histogram2dContour]
and attr_name == "z"
)
):
if attr_name == "size":
if "marker" not in trace_patch:
trace_patch["marker"] = dict()
trace_patch["marker"]["size"] = trace_data[attr_value]
trace_patch["marker"]["sizemode"] = "area"
trace_patch["marker"]["sizeref"] = sizeref
mapping_labels[attr_label] = "%{marker.size}"
elif attr_name == "marginal_x":
if trace_spec.constructor == go.Histogram:
mapping_labels["count"] = "%{y}"
elif attr_name == "marginal_y":
if trace_spec.constructor == go.Histogram:
mapping_labels["count"] = "%{x}"
elif attr_name == "trendline":
if (
attr_value in ["ols", "lowess"]
and args["x"]
and args["y"]
and len(trace_data[[args["x"], args["y"]]].dropna()) > 1
):
import statsmodels.api as sm
# sorting is bad but trace_specs with "trendline" have no other attrs
sorted_trace_data = trace_data.sort_values(by=args["x"])
y = sorted_trace_data[args["y"]].values
x = sorted_trace_data[args["x"]].values
x_is_date = False
if x.dtype.type == np.datetime64:
x = x.astype(int) / 10 ** 9 # convert to unix epoch seconds
x_is_date = True
elif x.dtype.type == np.object_:
try:
x = x.astype(np.float64)
except ValueError:
raise ValueError(
"Could not convert value of 'x' ('%s') into a numeric type. "
"If 'x' contains stringified dates, please convert to a datetime column."
% args["x"]
)
if y.dtype.type == np.object_:
try:
y = y.astype(np.float64)
except ValueError:
raise ValueError(
"Could not convert value of 'y' into a numeric type."
)
if attr_value == "lowess":
# missing ='drop' is the default value for lowess but not for OLS (None)
# we force it here in case statsmodels change their defaults
trendline = sm.nonparametric.lowess(y, x, missing="drop")
trace_patch["x"] = trendline[:, 0]
trace_patch["y"] = trendline[:, 1]
hover_header = "<b>LOWESS trendline</b><br><br>"
elif attr_value == "ols":
fit_results = sm.OLS(
y, sm.add_constant(x), missing="drop"
).fit()
trace_patch["y"] = fit_results.predict()
trace_patch["x"] = x[
np.logical_not(np.logical_or(np.isnan(y), np.isnan(x)))
]
hover_header = "<b>OLS trendline</b><br>"
if len(fit_results.params) == 2:
hover_header += "%s = %g * %s + %g<br>" % (
args["y"],
fit_results.params[1],
args["x"],
fit_results.params[0],
)
else:
hover_header += "%s = %g<br>" % (
args["y"],
fit_results.params[0],
)
hover_header += (
"R<sup>2</sup>=%f<br><br>" % fit_results.rsquared
)
if x_is_date:
trace_patch["x"] = pd.to_datetime(trace_patch["x"] * 10 ** 9)
mapping_labels[get_label(args, args["x"])] = "%{x}"
mapping_labels[get_label(args, args["y"])] = "%{y} <b>(trend)</b>"
elif attr_name.startswith("error"):
error_xy = attr_name[:7]
arr = "arrayminus" if attr_name.endswith("minus") else "array"
if error_xy not in trace_patch:
trace_patch[error_xy] = {}
trace_patch[error_xy][arr] = trace_data[attr_value]
elif attr_name == "custom_data":
trace_patch["customdata"] = trace_data[attr_value].values
custom_data_len = len(attr_value) # number of custom data columns
elif attr_name == "hover_name":
if trace_spec.constructor not in [
go.Histogram,
go.Histogram2d,
go.Histogram2dContour,
]:
trace_patch["hovertext"] = trace_data[attr_value]
if hover_header == "":
hover_header = "<b>%{hovertext}</b><br><br>"
elif attr_name == "hover_data":
if trace_spec.constructor not in [
go.Histogram,
go.Histogram2d,
go.Histogram2dContour,
]:
hover_is_dict = isinstance(attr_value, dict)
for col in attr_value:
if hover_is_dict and not attr_value[col]:
continue
try:
position = args["custom_data"].index(col)
except (ValueError, AttributeError, KeyError):
position = custom_data_len
custom_data_len += 1
if "customdata" in trace_patch:
trace_patch["customdata"] = np.hstack(
(
trace_patch["customdata"],
trace_data[col].values[:, None],
)
)
else:
trace_patch["customdata"] = trace_data[col].values[
:, None
]
attr_label_col = get_decorated_label(args, col, None)
mapping_labels[attr_label_col] = "%%{customdata[%d]}" % (
position
)
elif attr_name == "color":
if trace_spec.constructor in [go.Choropleth, go.Choroplethmapbox]:
trace_patch["z"] = trace_data[attr_value]
trace_patch["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%{z}"
elif trace_spec.constructor in [
go.Sunburst,
go.Treemap,
go.Pie,
go.Funnelarea,
]:
if "marker" not in trace_patch:
trace_patch["marker"] = dict()
if args.get("color_is_continuous"):
trace_patch["marker"]["colors"] = trace_data[attr_value]
trace_patch["marker"]["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%{color}"
else:
trace_patch["marker"]["colors"] = []
if args["color_discrete_map"] is not None:
mapping = args["color_discrete_map"].copy()
else:
mapping = {}
for cat in trace_data[attr_value]:
if mapping.get(cat) is None:
mapping[cat] = args["color_discrete_sequence"][
len(mapping) % len(args["color_discrete_sequence"])
]
trace_patch["marker"]["colors"].append(mapping[cat])
else:
colorable = "marker"
if trace_spec.constructor in [go.Parcats, go.Parcoords]:
colorable = "line"
if colorable not in trace_patch:
trace_patch[colorable] = dict()
trace_patch[colorable]["color"] = trace_data[attr_value]
trace_patch[colorable]["coloraxis"] = "coloraxis1"
mapping_labels[attr_label] = "%%{%s.color}" % colorable
elif attr_name == "animation_group":
trace_patch["ids"] = trace_data[attr_value]
elif attr_name == "locations":
trace_patch[attr_name] = trace_data[attr_value]
mapping_labels[attr_label] = "%{location}"
elif attr_name == "values":
trace_patch[attr_name] = trace_data[attr_value]
_label = "value" if attr_label == "values" else attr_label
mapping_labels[_label] = "%{value}"
elif attr_name == "parents":
trace_patch[attr_name] = trace_data[attr_value]
_label = "parent" if attr_label == "parents" else attr_label
mapping_labels[_label] = "%{parent}"
elif attr_name == "ids":
trace_patch[attr_name] = trace_data[attr_value]
_label = "id" if attr_label == "ids" else attr_label
mapping_labels[_label] = "%{id}"
elif attr_name == "names":
if trace_spec.constructor in [
go.Sunburst,
go.Treemap,
go.Pie,
go.Funnelarea,
]:
trace_patch["labels"] = trace_data[attr_value]
_label = "label" if attr_label == "names" else attr_label
mapping_labels[_label] = "%{label}"
else:
trace_patch[attr_name] = trace_data[attr_value]
else:
if attr_value:
trace_patch[attr_name] = trace_data[attr_value]
mapping_labels[attr_label] = "%%{%s}" % attr_name
if trace_spec.constructor not in [
go.Parcoords,
go.Parcats,
]:
# Modify mapping_labels according to hover_data keys
# if hover_data is a dict
mapping_labels_copy = OrderedDict(mapping_labels)
if args["hover_data"] and isinstance(args["hover_data"], dict):
for k, v in mapping_labels.items():
# We need to invert the mapping here
k_args = invert_label(args, k)
if k_args in args["hover_data"]:
formatter = args["hover_data"][k_args][0]
if formatter:
if isinstance(formatter, str):
mapping_labels_copy[k] = v.replace("}", "%s}" % formatter)
else:
_ = mapping_labels_copy.pop(k)
hover_lines = [k + "=" + v for k, v in mapping_labels_copy.items()]
trace_patch["hovertemplate"] = hover_header + "<br>".join(hover_lines)
trace_patch["hovertemplate"] += "<extra></extra>"
return trace_patch, fit_results | [
"def",
"make_trace_kwargs",
"(",
"args",
",",
"trace_spec",
",",
"trace_data",
",",
"mapping_labels",
",",
"sizeref",
")",
":",
"if",
"\"line_close\"",
"in",
"args",
"and",
"args",
"[",
"\"line_close\"",
"]",
":",
"trace_data",
"=",
"trace_data",
".",
"append",
"(",
"trace_data",
".",
"iloc",
"[",
"0",
"]",
")",
"trace_patch",
"=",
"trace_spec",
".",
"trace_patch",
".",
"copy",
"(",
")",
"or",
"{",
"}",
"fit_results",
"=",
"None",
"hover_header",
"=",
"\"\"",
"custom_data_len",
"=",
"0",
"for",
"attr_name",
"in",
"trace_spec",
".",
"attrs",
":",
"attr_value",
"=",
"args",
"[",
"attr_name",
"]",
"attr_label",
"=",
"get_decorated_label",
"(",
"args",
",",
"attr_value",
",",
"attr_name",
")",
"if",
"attr_name",
"==",
"\"dimensions\"",
":",
"dims",
"=",
"[",
"(",
"name",
",",
"column",
")",
"for",
"(",
"name",
",",
"column",
")",
"in",
"trace_data",
".",
"iteritems",
"(",
")",
"if",
"(",
"(",
"not",
"attr_value",
")",
"or",
"(",
"name",
"in",
"attr_value",
")",
")",
"and",
"(",
"trace_spec",
".",
"constructor",
"!=",
"go",
".",
"Parcoords",
"or",
"_is_continuous",
"(",
"args",
"[",
"\"data_frame\"",
"]",
",",
"name",
")",
")",
"and",
"(",
"trace_spec",
".",
"constructor",
"!=",
"go",
".",
"Parcats",
"or",
"(",
"attr_value",
"is",
"not",
"None",
"and",
"name",
"in",
"attr_value",
")",
"or",
"len",
"(",
"args",
"[",
"\"data_frame\"",
"]",
"[",
"name",
"]",
".",
"unique",
"(",
")",
")",
"<=",
"args",
"[",
"\"dimensions_max_cardinality\"",
"]",
")",
"]",
"trace_patch",
"[",
"\"dimensions\"",
"]",
"=",
"[",
"dict",
"(",
"label",
"=",
"get_label",
"(",
"args",
",",
"name",
")",
",",
"values",
"=",
"column",
".",
"values",
")",
"for",
"(",
"name",
",",
"column",
")",
"in",
"dims",
"]",
"if",
"trace_spec",
".",
"constructor",
"==",
"go",
".",
"Splom",
":",
"for",
"d",
"in",
"trace_patch",
"[",
"\"dimensions\"",
"]",
":",
"d",
"[",
"\"axis\"",
"]",
"=",
"dict",
"(",
"matches",
"=",
"True",
")",
"mapping_labels",
"[",
"\"%{xaxis.title.text}\"",
"]",
"=",
"\"%{x}\"",
"mapping_labels",
"[",
"\"%{yaxis.title.text}\"",
"]",
"=",
"\"%{y}\"",
"elif",
"(",
"attr_value",
"is",
"not",
"None",
"or",
"(",
"trace_spec",
".",
"constructor",
"==",
"go",
".",
"Histogram",
"and",
"attr_name",
"in",
"[",
"\"x\"",
",",
"\"y\"",
"]",
")",
"or",
"(",
"trace_spec",
".",
"constructor",
"in",
"[",
"go",
".",
"Histogram2d",
",",
"go",
".",
"Histogram2dContour",
"]",
"and",
"attr_name",
"==",
"\"z\"",
")",
")",
":",
"if",
"attr_name",
"==",
"\"size\"",
":",
"if",
"\"marker\"",
"not",
"in",
"trace_patch",
":",
"trace_patch",
"[",
"\"marker\"",
"]",
"=",
"dict",
"(",
")",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"size\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"sizemode\"",
"]",
"=",
"\"area\"",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"sizeref\"",
"]",
"=",
"sizeref",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%{marker.size}\"",
"elif",
"attr_name",
"==",
"\"marginal_x\"",
":",
"if",
"trace_spec",
".",
"constructor",
"==",
"go",
".",
"Histogram",
":",
"mapping_labels",
"[",
"\"count\"",
"]",
"=",
"\"%{y}\"",
"elif",
"attr_name",
"==",
"\"marginal_y\"",
":",
"if",
"trace_spec",
".",
"constructor",
"==",
"go",
".",
"Histogram",
":",
"mapping_labels",
"[",
"\"count\"",
"]",
"=",
"\"%{x}\"",
"elif",
"attr_name",
"==",
"\"trendline\"",
":",
"if",
"(",
"attr_value",
"in",
"[",
"\"ols\"",
",",
"\"lowess\"",
"]",
"and",
"args",
"[",
"\"x\"",
"]",
"and",
"args",
"[",
"\"y\"",
"]",
"and",
"len",
"(",
"trace_data",
"[",
"[",
"args",
"[",
"\"x\"",
"]",
",",
"args",
"[",
"\"y\"",
"]",
"]",
"]",
".",
"dropna",
"(",
")",
")",
">",
"1",
")",
":",
"import",
"statsmodels",
".",
"api",
"as",
"sm",
"# sorting is bad but trace_specs with \"trendline\" have no other attrs",
"sorted_trace_data",
"=",
"trace_data",
".",
"sort_values",
"(",
"by",
"=",
"args",
"[",
"\"x\"",
"]",
")",
"y",
"=",
"sorted_trace_data",
"[",
"args",
"[",
"\"y\"",
"]",
"]",
".",
"values",
"x",
"=",
"sorted_trace_data",
"[",
"args",
"[",
"\"x\"",
"]",
"]",
".",
"values",
"x_is_date",
"=",
"False",
"if",
"x",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"datetime64",
":",
"x",
"=",
"x",
".",
"astype",
"(",
"int",
")",
"/",
"10",
"**",
"9",
"# convert to unix epoch seconds",
"x_is_date",
"=",
"True",
"elif",
"x",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"object_",
":",
"try",
":",
"x",
"=",
"x",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Could not convert value of 'x' ('%s') into a numeric type. \"",
"\"If 'x' contains stringified dates, please convert to a datetime column.\"",
"%",
"args",
"[",
"\"x\"",
"]",
")",
"if",
"y",
".",
"dtype",
".",
"type",
"==",
"np",
".",
"object_",
":",
"try",
":",
"y",
"=",
"y",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Could not convert value of 'y' into a numeric type.\"",
")",
"if",
"attr_value",
"==",
"\"lowess\"",
":",
"# missing ='drop' is the default value for lowess but not for OLS (None)",
"# we force it here in case statsmodels change their defaults",
"trendline",
"=",
"sm",
".",
"nonparametric",
".",
"lowess",
"(",
"y",
",",
"x",
",",
"missing",
"=",
"\"drop\"",
")",
"trace_patch",
"[",
"\"x\"",
"]",
"=",
"trendline",
"[",
":",
",",
"0",
"]",
"trace_patch",
"[",
"\"y\"",
"]",
"=",
"trendline",
"[",
":",
",",
"1",
"]",
"hover_header",
"=",
"\"<b>LOWESS trendline</b><br><br>\"",
"elif",
"attr_value",
"==",
"\"ols\"",
":",
"fit_results",
"=",
"sm",
".",
"OLS",
"(",
"y",
",",
"sm",
".",
"add_constant",
"(",
"x",
")",
",",
"missing",
"=",
"\"drop\"",
")",
".",
"fit",
"(",
")",
"trace_patch",
"[",
"\"y\"",
"]",
"=",
"fit_results",
".",
"predict",
"(",
")",
"trace_patch",
"[",
"\"x\"",
"]",
"=",
"x",
"[",
"np",
".",
"logical_not",
"(",
"np",
".",
"logical_or",
"(",
"np",
".",
"isnan",
"(",
"y",
")",
",",
"np",
".",
"isnan",
"(",
"x",
")",
")",
")",
"]",
"hover_header",
"=",
"\"<b>OLS trendline</b><br>\"",
"if",
"len",
"(",
"fit_results",
".",
"params",
")",
"==",
"2",
":",
"hover_header",
"+=",
"\"%s = %g * %s + %g<br>\"",
"%",
"(",
"args",
"[",
"\"y\"",
"]",
",",
"fit_results",
".",
"params",
"[",
"1",
"]",
",",
"args",
"[",
"\"x\"",
"]",
",",
"fit_results",
".",
"params",
"[",
"0",
"]",
",",
")",
"else",
":",
"hover_header",
"+=",
"\"%s = %g<br>\"",
"%",
"(",
"args",
"[",
"\"y\"",
"]",
",",
"fit_results",
".",
"params",
"[",
"0",
"]",
",",
")",
"hover_header",
"+=",
"(",
"\"R<sup>2</sup>=%f<br><br>\"",
"%",
"fit_results",
".",
"rsquared",
")",
"if",
"x_is_date",
":",
"trace_patch",
"[",
"\"x\"",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"trace_patch",
"[",
"\"x\"",
"]",
"*",
"10",
"**",
"9",
")",
"mapping_labels",
"[",
"get_label",
"(",
"args",
",",
"args",
"[",
"\"x\"",
"]",
")",
"]",
"=",
"\"%{x}\"",
"mapping_labels",
"[",
"get_label",
"(",
"args",
",",
"args",
"[",
"\"y\"",
"]",
")",
"]",
"=",
"\"%{y} <b>(trend)</b>\"",
"elif",
"attr_name",
".",
"startswith",
"(",
"\"error\"",
")",
":",
"error_xy",
"=",
"attr_name",
"[",
":",
"7",
"]",
"arr",
"=",
"\"arrayminus\"",
"if",
"attr_name",
".",
"endswith",
"(",
"\"minus\"",
")",
"else",
"\"array\"",
"if",
"error_xy",
"not",
"in",
"trace_patch",
":",
"trace_patch",
"[",
"error_xy",
"]",
"=",
"{",
"}",
"trace_patch",
"[",
"error_xy",
"]",
"[",
"arr",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"elif",
"attr_name",
"==",
"\"custom_data\"",
":",
"trace_patch",
"[",
"\"customdata\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
".",
"values",
"custom_data_len",
"=",
"len",
"(",
"attr_value",
")",
"# number of custom data columns",
"elif",
"attr_name",
"==",
"\"hover_name\"",
":",
"if",
"trace_spec",
".",
"constructor",
"not",
"in",
"[",
"go",
".",
"Histogram",
",",
"go",
".",
"Histogram2d",
",",
"go",
".",
"Histogram2dContour",
",",
"]",
":",
"trace_patch",
"[",
"\"hovertext\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"if",
"hover_header",
"==",
"\"\"",
":",
"hover_header",
"=",
"\"<b>%{hovertext}</b><br><br>\"",
"elif",
"attr_name",
"==",
"\"hover_data\"",
":",
"if",
"trace_spec",
".",
"constructor",
"not",
"in",
"[",
"go",
".",
"Histogram",
",",
"go",
".",
"Histogram2d",
",",
"go",
".",
"Histogram2dContour",
",",
"]",
":",
"hover_is_dict",
"=",
"isinstance",
"(",
"attr_value",
",",
"dict",
")",
"for",
"col",
"in",
"attr_value",
":",
"if",
"hover_is_dict",
"and",
"not",
"attr_value",
"[",
"col",
"]",
":",
"continue",
"try",
":",
"position",
"=",
"args",
"[",
"\"custom_data\"",
"]",
".",
"index",
"(",
"col",
")",
"except",
"(",
"ValueError",
",",
"AttributeError",
",",
"KeyError",
")",
":",
"position",
"=",
"custom_data_len",
"custom_data_len",
"+=",
"1",
"if",
"\"customdata\"",
"in",
"trace_patch",
":",
"trace_patch",
"[",
"\"customdata\"",
"]",
"=",
"np",
".",
"hstack",
"(",
"(",
"trace_patch",
"[",
"\"customdata\"",
"]",
",",
"trace_data",
"[",
"col",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
",",
")",
")",
"else",
":",
"trace_patch",
"[",
"\"customdata\"",
"]",
"=",
"trace_data",
"[",
"col",
"]",
".",
"values",
"[",
":",
",",
"None",
"]",
"attr_label_col",
"=",
"get_decorated_label",
"(",
"args",
",",
"col",
",",
"None",
")",
"mapping_labels",
"[",
"attr_label_col",
"]",
"=",
"\"%%{customdata[%d]}\"",
"%",
"(",
"position",
")",
"elif",
"attr_name",
"==",
"\"color\"",
":",
"if",
"trace_spec",
".",
"constructor",
"in",
"[",
"go",
".",
"Choropleth",
",",
"go",
".",
"Choroplethmapbox",
"]",
":",
"trace_patch",
"[",
"\"z\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"trace_patch",
"[",
"\"coloraxis\"",
"]",
"=",
"\"coloraxis1\"",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%{z}\"",
"elif",
"trace_spec",
".",
"constructor",
"in",
"[",
"go",
".",
"Sunburst",
",",
"go",
".",
"Treemap",
",",
"go",
".",
"Pie",
",",
"go",
".",
"Funnelarea",
",",
"]",
":",
"if",
"\"marker\"",
"not",
"in",
"trace_patch",
":",
"trace_patch",
"[",
"\"marker\"",
"]",
"=",
"dict",
"(",
")",
"if",
"args",
".",
"get",
"(",
"\"color_is_continuous\"",
")",
":",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"colors\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"coloraxis\"",
"]",
"=",
"\"coloraxis1\"",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%{color}\"",
"else",
":",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"colors\"",
"]",
"=",
"[",
"]",
"if",
"args",
"[",
"\"color_discrete_map\"",
"]",
"is",
"not",
"None",
":",
"mapping",
"=",
"args",
"[",
"\"color_discrete_map\"",
"]",
".",
"copy",
"(",
")",
"else",
":",
"mapping",
"=",
"{",
"}",
"for",
"cat",
"in",
"trace_data",
"[",
"attr_value",
"]",
":",
"if",
"mapping",
".",
"get",
"(",
"cat",
")",
"is",
"None",
":",
"mapping",
"[",
"cat",
"]",
"=",
"args",
"[",
"\"color_discrete_sequence\"",
"]",
"[",
"len",
"(",
"mapping",
")",
"%",
"len",
"(",
"args",
"[",
"\"color_discrete_sequence\"",
"]",
")",
"]",
"trace_patch",
"[",
"\"marker\"",
"]",
"[",
"\"colors\"",
"]",
".",
"append",
"(",
"mapping",
"[",
"cat",
"]",
")",
"else",
":",
"colorable",
"=",
"\"marker\"",
"if",
"trace_spec",
".",
"constructor",
"in",
"[",
"go",
".",
"Parcats",
",",
"go",
".",
"Parcoords",
"]",
":",
"colorable",
"=",
"\"line\"",
"if",
"colorable",
"not",
"in",
"trace_patch",
":",
"trace_patch",
"[",
"colorable",
"]",
"=",
"dict",
"(",
")",
"trace_patch",
"[",
"colorable",
"]",
"[",
"\"color\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"trace_patch",
"[",
"colorable",
"]",
"[",
"\"coloraxis\"",
"]",
"=",
"\"coloraxis1\"",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%%{%s.color}\"",
"%",
"colorable",
"elif",
"attr_name",
"==",
"\"animation_group\"",
":",
"trace_patch",
"[",
"\"ids\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"elif",
"attr_name",
"==",
"\"locations\"",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%{location}\"",
"elif",
"attr_name",
"==",
"\"values\"",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"_label",
"=",
"\"value\"",
"if",
"attr_label",
"==",
"\"values\"",
"else",
"attr_label",
"mapping_labels",
"[",
"_label",
"]",
"=",
"\"%{value}\"",
"elif",
"attr_name",
"==",
"\"parents\"",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"_label",
"=",
"\"parent\"",
"if",
"attr_label",
"==",
"\"parents\"",
"else",
"attr_label",
"mapping_labels",
"[",
"_label",
"]",
"=",
"\"%{parent}\"",
"elif",
"attr_name",
"==",
"\"ids\"",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"_label",
"=",
"\"id\"",
"if",
"attr_label",
"==",
"\"ids\"",
"else",
"attr_label",
"mapping_labels",
"[",
"_label",
"]",
"=",
"\"%{id}\"",
"elif",
"attr_name",
"==",
"\"names\"",
":",
"if",
"trace_spec",
".",
"constructor",
"in",
"[",
"go",
".",
"Sunburst",
",",
"go",
".",
"Treemap",
",",
"go",
".",
"Pie",
",",
"go",
".",
"Funnelarea",
",",
"]",
":",
"trace_patch",
"[",
"\"labels\"",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"_label",
"=",
"\"label\"",
"if",
"attr_label",
"==",
"\"names\"",
"else",
"attr_label",
"mapping_labels",
"[",
"_label",
"]",
"=",
"\"%{label}\"",
"else",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"else",
":",
"if",
"attr_value",
":",
"trace_patch",
"[",
"attr_name",
"]",
"=",
"trace_data",
"[",
"attr_value",
"]",
"mapping_labels",
"[",
"attr_label",
"]",
"=",
"\"%%{%s}\"",
"%",
"attr_name",
"if",
"trace_spec",
".",
"constructor",
"not",
"in",
"[",
"go",
".",
"Parcoords",
",",
"go",
".",
"Parcats",
",",
"]",
":",
"# Modify mapping_labels according to hover_data keys",
"# if hover_data is a dict",
"mapping_labels_copy",
"=",
"OrderedDict",
"(",
"mapping_labels",
")",
"if",
"args",
"[",
"\"hover_data\"",
"]",
"and",
"isinstance",
"(",
"args",
"[",
"\"hover_data\"",
"]",
",",
"dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"mapping_labels",
".",
"items",
"(",
")",
":",
"# We need to invert the mapping here",
"k_args",
"=",
"invert_label",
"(",
"args",
",",
"k",
")",
"if",
"k_args",
"in",
"args",
"[",
"\"hover_data\"",
"]",
":",
"formatter",
"=",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k_args",
"]",
"[",
"0",
"]",
"if",
"formatter",
":",
"if",
"isinstance",
"(",
"formatter",
",",
"str",
")",
":",
"mapping_labels_copy",
"[",
"k",
"]",
"=",
"v",
".",
"replace",
"(",
"\"}\"",
",",
"\"%s}\"",
"%",
"formatter",
")",
"else",
":",
"_",
"=",
"mapping_labels_copy",
".",
"pop",
"(",
"k",
")",
"hover_lines",
"=",
"[",
"k",
"+",
"\"=\"",
"+",
"v",
"for",
"k",
",",
"v",
"in",
"mapping_labels_copy",
".",
"items",
"(",
")",
"]",
"trace_patch",
"[",
"\"hovertemplate\"",
"]",
"=",
"hover_header",
"+",
"\"<br>\"",
".",
"join",
"(",
"hover_lines",
")",
"trace_patch",
"[",
"\"hovertemplate\"",
"]",
"+=",
"\"<extra></extra>\"",
"return",
"trace_patch",
",",
"fit_results"
] | [
195,
0
] | [
486,
35
] | python | en | ['en', 'en', 'en'] | True |
_get_reserved_col_names | (args) |
This function builds a list of columns of the data_frame argument used
as arguments, either as str/int arguments or given as columns
(pandas series type).
|
This function builds a list of columns of the data_frame argument used
as arguments, either as str/int arguments or given as columns
(pandas series type).
| def _get_reserved_col_names(args):
"""
This function builds a list of columns of the data_frame argument used
as arguments, either as str/int arguments or given as columns
(pandas series type).
"""
df = args["data_frame"]
reserved_names = set()
for field in args:
if field not in all_attrables:
continue
names = args[field] if field in array_attrables else [args[field]]
if names is None:
continue
for arg in names:
if arg is None:
continue
elif isinstance(arg, str): # no need to add ints since kw arg are not ints
reserved_names.add(arg)
elif isinstance(arg, pd.Series):
arg_name = arg.name
if arg_name and hasattr(df, arg_name):
in_df = arg is df[arg_name]
if in_df:
reserved_names.add(arg_name)
elif arg is df.index and arg.name is not None:
reserved_names.add(arg.name)
return reserved_names | [
"def",
"_get_reserved_col_names",
"(",
"args",
")",
":",
"df",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"reserved_names",
"=",
"set",
"(",
")",
"for",
"field",
"in",
"args",
":",
"if",
"field",
"not",
"in",
"all_attrables",
":",
"continue",
"names",
"=",
"args",
"[",
"field",
"]",
"if",
"field",
"in",
"array_attrables",
"else",
"[",
"args",
"[",
"field",
"]",
"]",
"if",
"names",
"is",
"None",
":",
"continue",
"for",
"arg",
"in",
"names",
":",
"if",
"arg",
"is",
"None",
":",
"continue",
"elif",
"isinstance",
"(",
"arg",
",",
"str",
")",
":",
"# no need to add ints since kw arg are not ints",
"reserved_names",
".",
"add",
"(",
"arg",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"pd",
".",
"Series",
")",
":",
"arg_name",
"=",
"arg",
".",
"name",
"if",
"arg_name",
"and",
"hasattr",
"(",
"df",
",",
"arg_name",
")",
":",
"in_df",
"=",
"arg",
"is",
"df",
"[",
"arg_name",
"]",
"if",
"in_df",
":",
"reserved_names",
".",
"add",
"(",
"arg_name",
")",
"elif",
"arg",
"is",
"df",
".",
"index",
"and",
"arg",
".",
"name",
"is",
"not",
"None",
":",
"reserved_names",
".",
"add",
"(",
"arg",
".",
"name",
")",
"return",
"reserved_names"
] | [
953,
0
] | [
981,
25
] | python | en | ['en', 'error', 'th'] | False |
_is_col_list | (df_input, arg) | Returns True if arg looks like it's a list of columns or references to columns
in df_input, and False otherwise (in which case it's assumed to be a single column
or reference to a column).
| Returns True if arg looks like it's a list of columns or references to columns
in df_input, and False otherwise (in which case it's assumed to be a single column
or reference to a column).
| def _is_col_list(df_input, arg):
"""Returns True if arg looks like it's a list of columns or references to columns
in df_input, and False otherwise (in which case it's assumed to be a single column
or reference to a column).
"""
if arg is None or isinstance(arg, str) or isinstance(arg, int):
return False
if isinstance(arg, pd.MultiIndex):
return False # just to keep existing behaviour for now
try:
iter(arg)
except TypeError:
return False # not iterable
for c in arg:
if isinstance(c, str) or isinstance(c, int):
if df_input is None or c not in df_input.columns:
return False
else:
try:
iter(c)
except TypeError:
return False # not iterable
return True | [
"def",
"_is_col_list",
"(",
"df_input",
",",
"arg",
")",
":",
"if",
"arg",
"is",
"None",
"or",
"isinstance",
"(",
"arg",
",",
"str",
")",
"or",
"isinstance",
"(",
"arg",
",",
"int",
")",
":",
"return",
"False",
"if",
"isinstance",
"(",
"arg",
",",
"pd",
".",
"MultiIndex",
")",
":",
"return",
"False",
"# just to keep existing behaviour for now",
"try",
":",
"iter",
"(",
"arg",
")",
"except",
"TypeError",
":",
"return",
"False",
"# not iterable",
"for",
"c",
"in",
"arg",
":",
"if",
"isinstance",
"(",
"c",
",",
"str",
")",
"or",
"isinstance",
"(",
"c",
",",
"int",
")",
":",
"if",
"df_input",
"is",
"None",
"or",
"c",
"not",
"in",
"df_input",
".",
"columns",
":",
"return",
"False",
"else",
":",
"try",
":",
"iter",
"(",
"c",
")",
"except",
"TypeError",
":",
"return",
"False",
"# not iterable",
"return",
"True"
] | [
984,
0
] | [
1006,
15
] | python | en | ['en', 'en', 'en'] | True |
_isinstance_listlike | (x) | Returns True if x is an iterable which can be transformed into a pandas Series,
False for the other types of possible values of a `hover_data` dict.
A tuple of length 2 is a special case corresponding to a (format, data) tuple.
| Returns True if x is an iterable which can be transformed into a pandas Series,
False for the other types of possible values of a `hover_data` dict.
A tuple of length 2 is a special case corresponding to a (format, data) tuple.
| def _isinstance_listlike(x):
"""Returns True if x is an iterable which can be transformed into a pandas Series,
False for the other types of possible values of a `hover_data` dict.
A tuple of length 2 is a special case corresponding to a (format, data) tuple.
"""
if (
isinstance(x, str)
or (isinstance(x, tuple) and len(x) == 2)
or isinstance(x, bool)
or x is None
):
return False
else:
return True | [
"def",
"_isinstance_listlike",
"(",
"x",
")",
":",
"if",
"(",
"isinstance",
"(",
"x",
",",
"str",
")",
"or",
"(",
"isinstance",
"(",
"x",
",",
"tuple",
")",
"and",
"len",
"(",
"x",
")",
"==",
"2",
")",
"or",
"isinstance",
"(",
"x",
",",
"bool",
")",
"or",
"x",
"is",
"None",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | [
1009,
0
] | [
1022,
19
] | python | en | ['en', 'en', 'en'] | True |
process_args_into_dataframe | (args, wide_mode, var_name, value_name) |
After this function runs, the `all_attrables` keys of `args` all contain only
references to columns of `df_output`. This function handles the extraction of data
from `args["attrable"]` and column-name-generation as appropriate, and adds the
data to `df_output` and then replaces `args["attrable"]` with the appropriate
reference.
|
After this function runs, the `all_attrables` keys of `args` all contain only
references to columns of `df_output`. This function handles the extraction of data
from `args["attrable"]` and column-name-generation as appropriate, and adds the
data to `df_output` and then replaces `args["attrable"]` with the appropriate
reference.
| def process_args_into_dataframe(args, wide_mode, var_name, value_name):
"""
After this function runs, the `all_attrables` keys of `args` all contain only
references to columns of `df_output`. This function handles the extraction of data
from `args["attrable"]` and column-name-generation as appropriate, and adds the
data to `df_output` and then replaces `args["attrable"]` with the appropriate
reference.
"""
df_input = args["data_frame"]
df_provided = df_input is not None
df_output = pd.DataFrame()
constants = dict()
ranges = list()
wide_id_vars = set()
reserved_names = _get_reserved_col_names(args) if df_provided else set()
# Case of functions with a "dimensions" kw: scatter_matrix, parcats, parcoords
if "dimensions" in args and args["dimensions"] is None:
if not df_provided:
raise ValueError(
"No data were provided. Please provide data either with the `data_frame` or with the `dimensions` argument."
)
else:
df_output[df_input.columns] = df_input[df_input.columns]
# hover_data is a dict
hover_data_is_dict = (
"hover_data" in args
and args["hover_data"]
and isinstance(args["hover_data"], dict)
)
# If dict, convert all values of hover_data to tuples to simplify processing
if hover_data_is_dict:
for k in args["hover_data"]:
if _isinstance_listlike(args["hover_data"][k]):
args["hover_data"][k] = (True, args["hover_data"][k])
if not isinstance(args["hover_data"][k], tuple):
args["hover_data"][k] = (args["hover_data"][k], None)
if df_provided and args["hover_data"][k][1] is not None and k in df_input:
raise ValueError(
"Ambiguous input: values for '%s' appear both in hover_data and data_frame"
% k
)
# Loop over possible arguments
for field_name in all_attrables:
# Massaging variables
argument_list = (
[args.get(field_name)]
if field_name not in array_attrables
else args.get(field_name)
)
# argument not specified, continue
if argument_list is None or argument_list is [None]:
continue
# Argument name: field_name if the argument is not a list
# Else we give names like ["hover_data_0, hover_data_1"] etc.
field_list = (
[field_name]
if field_name not in array_attrables
else [field_name + "_" + str(i) for i in range(len(argument_list))]
)
# argument_list and field_list ready, iterate over them
# Core of the loop starts here
for i, (argument, field) in enumerate(zip(argument_list, field_list)):
length = len(df_output)
if argument is None:
continue
col_name = None
# Case of multiindex
if isinstance(argument, pd.MultiIndex):
raise TypeError(
"Argument '%s' is a pandas MultiIndex. "
"pandas MultiIndex is not supported by plotly express "
"at the moment." % field
)
# ----------------- argument is a special value ----------------------
if isinstance(argument, Constant) or isinstance(argument, Range):
col_name = _check_name_not_reserved(
str(argument.label) if argument.label is not None else field,
reserved_names,
)
if isinstance(argument, Constant):
constants[col_name] = argument.value
else:
ranges.append(col_name)
# ----------------- argument is likely a col name ----------------------
elif isinstance(argument, str) or not hasattr(argument, "__len__"):
if (
field_name == "hover_data"
and hover_data_is_dict
and args["hover_data"][str(argument)][1] is not None
):
# hover_data has onboard data
# previously-checked to have no name-conflict with data_frame
col_name = str(argument)
real_argument = args["hover_data"][col_name][1]
if length and len(real_argument) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of hover_data key `%s` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (
argument,
len(real_argument),
str(list(df_output.columns)),
length,
)
)
if hasattr(real_argument, "values"):
df_output[col_name] = real_argument.values
else:
df_output[col_name] = np.array(real_argument)
elif not df_provided:
raise ValueError(
"String or int arguments are only possible when a "
"DataFrame or an array is provided in the `data_frame` "
"argument. No DataFrame was provided, but argument "
"'%s' is of type str or int." % field
)
# Check validity of column name
elif argument not in df_input.columns:
if wide_mode and argument in (value_name, var_name):
continue
else:
err_msg = (
"Value of '%s' is not the name of a column in 'data_frame'. "
"Expected one of %s but received: %s"
% (field, str(list(df_input.columns)), argument)
)
if argument == "index":
err_msg += "\n To use the index, pass it in directly as `df.index`."
raise ValueError(err_msg)
elif length and len(df_input[argument]) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of column argument `df[%s]` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (
field,
len(df_input[argument]),
str(list(df_output.columns)),
length,
)
)
else:
col_name = str(argument)
df_output[col_name] = df_input[argument].values
# ----------------- argument is likely a column / array / list.... -------
else:
if df_provided and hasattr(argument, "name"):
if argument is df_input.index:
if argument.name is None or argument.name in df_input:
col_name = "index"
else:
col_name = argument.name
col_name = _escape_col_name(
df_input, col_name, [var_name, value_name]
)
else:
if (
argument.name is not None
and argument.name in df_input
and argument is df_input[argument.name]
):
col_name = argument.name
if col_name is None: # numpy array, list...
col_name = _check_name_not_reserved(field, reserved_names)
if length and len(argument) != length:
raise ValueError(
"All arguments should have the same length. "
"The length of argument `%s` is %d, whereas the "
"length of previously-processed arguments %s is %d"
% (field, len(argument), str(list(df_output.columns)), length)
)
if hasattr(argument, "values"):
df_output[str(col_name)] = argument.values
else:
df_output[str(col_name)] = np.array(argument)
# Finally, update argument with column name now that column exists
assert col_name is not None, (
"Data-frame processing failure, likely due to a internal bug. "
"Please report this to "
"https://github.com/plotly/plotly.py/issues/new and we will try to "
"replicate and fix it."
)
if field_name not in array_attrables:
args[field_name] = str(col_name)
elif isinstance(args[field_name], dict):
pass
else:
args[field_name][i] = str(col_name)
if field_name != "wide_variable":
wide_id_vars.add(str(col_name))
for col_name in ranges:
df_output[col_name] = range(len(df_output))
for col_name in constants:
df_output[col_name] = constants[col_name]
return df_output, wide_id_vars | [
"def",
"process_args_into_dataframe",
"(",
"args",
",",
"wide_mode",
",",
"var_name",
",",
"value_name",
")",
":",
"df_input",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"df_provided",
"=",
"df_input",
"is",
"not",
"None",
"df_output",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"constants",
"=",
"dict",
"(",
")",
"ranges",
"=",
"list",
"(",
")",
"wide_id_vars",
"=",
"set",
"(",
")",
"reserved_names",
"=",
"_get_reserved_col_names",
"(",
"args",
")",
"if",
"df_provided",
"else",
"set",
"(",
")",
"# Case of functions with a \"dimensions\" kw: scatter_matrix, parcats, parcoords",
"if",
"\"dimensions\"",
"in",
"args",
"and",
"args",
"[",
"\"dimensions\"",
"]",
"is",
"None",
":",
"if",
"not",
"df_provided",
":",
"raise",
"ValueError",
"(",
"\"No data were provided. Please provide data either with the `data_frame` or with the `dimensions` argument.\"",
")",
"else",
":",
"df_output",
"[",
"df_input",
".",
"columns",
"]",
"=",
"df_input",
"[",
"df_input",
".",
"columns",
"]",
"# hover_data is a dict",
"hover_data_is_dict",
"=",
"(",
"\"hover_data\"",
"in",
"args",
"and",
"args",
"[",
"\"hover_data\"",
"]",
"and",
"isinstance",
"(",
"args",
"[",
"\"hover_data\"",
"]",
",",
"dict",
")",
")",
"# If dict, convert all values of hover_data to tuples to simplify processing",
"if",
"hover_data_is_dict",
":",
"for",
"k",
"in",
"args",
"[",
"\"hover_data\"",
"]",
":",
"if",
"_isinstance_listlike",
"(",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
")",
":",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
"=",
"(",
"True",
",",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
")",
"if",
"not",
"isinstance",
"(",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
",",
"tuple",
")",
":",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
"=",
"(",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
",",
"None",
")",
"if",
"df_provided",
"and",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"k",
"]",
"[",
"1",
"]",
"is",
"not",
"None",
"and",
"k",
"in",
"df_input",
":",
"raise",
"ValueError",
"(",
"\"Ambiguous input: values for '%s' appear both in hover_data and data_frame\"",
"%",
"k",
")",
"# Loop over possible arguments",
"for",
"field_name",
"in",
"all_attrables",
":",
"# Massaging variables",
"argument_list",
"=",
"(",
"[",
"args",
".",
"get",
"(",
"field_name",
")",
"]",
"if",
"field_name",
"not",
"in",
"array_attrables",
"else",
"args",
".",
"get",
"(",
"field_name",
")",
")",
"# argument not specified, continue",
"if",
"argument_list",
"is",
"None",
"or",
"argument_list",
"is",
"[",
"None",
"]",
":",
"continue",
"# Argument name: field_name if the argument is not a list",
"# Else we give names like [\"hover_data_0, hover_data_1\"] etc.",
"field_list",
"=",
"(",
"[",
"field_name",
"]",
"if",
"field_name",
"not",
"in",
"array_attrables",
"else",
"[",
"field_name",
"+",
"\"_\"",
"+",
"str",
"(",
"i",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"argument_list",
")",
")",
"]",
")",
"# argument_list and field_list ready, iterate over them",
"# Core of the loop starts here",
"for",
"i",
",",
"(",
"argument",
",",
"field",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"argument_list",
",",
"field_list",
")",
")",
":",
"length",
"=",
"len",
"(",
"df_output",
")",
"if",
"argument",
"is",
"None",
":",
"continue",
"col_name",
"=",
"None",
"# Case of multiindex",
"if",
"isinstance",
"(",
"argument",
",",
"pd",
".",
"MultiIndex",
")",
":",
"raise",
"TypeError",
"(",
"\"Argument '%s' is a pandas MultiIndex. \"",
"\"pandas MultiIndex is not supported by plotly express \"",
"\"at the moment.\"",
"%",
"field",
")",
"# ----------------- argument is a special value ----------------------",
"if",
"isinstance",
"(",
"argument",
",",
"Constant",
")",
"or",
"isinstance",
"(",
"argument",
",",
"Range",
")",
":",
"col_name",
"=",
"_check_name_not_reserved",
"(",
"str",
"(",
"argument",
".",
"label",
")",
"if",
"argument",
".",
"label",
"is",
"not",
"None",
"else",
"field",
",",
"reserved_names",
",",
")",
"if",
"isinstance",
"(",
"argument",
",",
"Constant",
")",
":",
"constants",
"[",
"col_name",
"]",
"=",
"argument",
".",
"value",
"else",
":",
"ranges",
".",
"append",
"(",
"col_name",
")",
"# ----------------- argument is likely a col name ----------------------",
"elif",
"isinstance",
"(",
"argument",
",",
"str",
")",
"or",
"not",
"hasattr",
"(",
"argument",
",",
"\"__len__\"",
")",
":",
"if",
"(",
"field_name",
"==",
"\"hover_data\"",
"and",
"hover_data_is_dict",
"and",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"str",
"(",
"argument",
")",
"]",
"[",
"1",
"]",
"is",
"not",
"None",
")",
":",
"# hover_data has onboard data",
"# previously-checked to have no name-conflict with data_frame",
"col_name",
"=",
"str",
"(",
"argument",
")",
"real_argument",
"=",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"col_name",
"]",
"[",
"1",
"]",
"if",
"length",
"and",
"len",
"(",
"real_argument",
")",
"!=",
"length",
":",
"raise",
"ValueError",
"(",
"\"All arguments should have the same length. \"",
"\"The length of hover_data key `%s` is %d, whereas the \"",
"\"length of previously-processed arguments %s is %d\"",
"%",
"(",
"argument",
",",
"len",
"(",
"real_argument",
")",
",",
"str",
"(",
"list",
"(",
"df_output",
".",
"columns",
")",
")",
",",
"length",
",",
")",
")",
"if",
"hasattr",
"(",
"real_argument",
",",
"\"values\"",
")",
":",
"df_output",
"[",
"col_name",
"]",
"=",
"real_argument",
".",
"values",
"else",
":",
"df_output",
"[",
"col_name",
"]",
"=",
"np",
".",
"array",
"(",
"real_argument",
")",
"elif",
"not",
"df_provided",
":",
"raise",
"ValueError",
"(",
"\"String or int arguments are only possible when a \"",
"\"DataFrame or an array is provided in the `data_frame` \"",
"\"argument. No DataFrame was provided, but argument \"",
"\"'%s' is of type str or int.\"",
"%",
"field",
")",
"# Check validity of column name",
"elif",
"argument",
"not",
"in",
"df_input",
".",
"columns",
":",
"if",
"wide_mode",
"and",
"argument",
"in",
"(",
"value_name",
",",
"var_name",
")",
":",
"continue",
"else",
":",
"err_msg",
"=",
"(",
"\"Value of '%s' is not the name of a column in 'data_frame'. \"",
"\"Expected one of %s but received: %s\"",
"%",
"(",
"field",
",",
"str",
"(",
"list",
"(",
"df_input",
".",
"columns",
")",
")",
",",
"argument",
")",
")",
"if",
"argument",
"==",
"\"index\"",
":",
"err_msg",
"+=",
"\"\\n To use the index, pass it in directly as `df.index`.\"",
"raise",
"ValueError",
"(",
"err_msg",
")",
"elif",
"length",
"and",
"len",
"(",
"df_input",
"[",
"argument",
"]",
")",
"!=",
"length",
":",
"raise",
"ValueError",
"(",
"\"All arguments should have the same length. \"",
"\"The length of column argument `df[%s]` is %d, whereas the \"",
"\"length of previously-processed arguments %s is %d\"",
"%",
"(",
"field",
",",
"len",
"(",
"df_input",
"[",
"argument",
"]",
")",
",",
"str",
"(",
"list",
"(",
"df_output",
".",
"columns",
")",
")",
",",
"length",
",",
")",
")",
"else",
":",
"col_name",
"=",
"str",
"(",
"argument",
")",
"df_output",
"[",
"col_name",
"]",
"=",
"df_input",
"[",
"argument",
"]",
".",
"values",
"# ----------------- argument is likely a column / array / list.... -------",
"else",
":",
"if",
"df_provided",
"and",
"hasattr",
"(",
"argument",
",",
"\"name\"",
")",
":",
"if",
"argument",
"is",
"df_input",
".",
"index",
":",
"if",
"argument",
".",
"name",
"is",
"None",
"or",
"argument",
".",
"name",
"in",
"df_input",
":",
"col_name",
"=",
"\"index\"",
"else",
":",
"col_name",
"=",
"argument",
".",
"name",
"col_name",
"=",
"_escape_col_name",
"(",
"df_input",
",",
"col_name",
",",
"[",
"var_name",
",",
"value_name",
"]",
")",
"else",
":",
"if",
"(",
"argument",
".",
"name",
"is",
"not",
"None",
"and",
"argument",
".",
"name",
"in",
"df_input",
"and",
"argument",
"is",
"df_input",
"[",
"argument",
".",
"name",
"]",
")",
":",
"col_name",
"=",
"argument",
".",
"name",
"if",
"col_name",
"is",
"None",
":",
"# numpy array, list...",
"col_name",
"=",
"_check_name_not_reserved",
"(",
"field",
",",
"reserved_names",
")",
"if",
"length",
"and",
"len",
"(",
"argument",
")",
"!=",
"length",
":",
"raise",
"ValueError",
"(",
"\"All arguments should have the same length. \"",
"\"The length of argument `%s` is %d, whereas the \"",
"\"length of previously-processed arguments %s is %d\"",
"%",
"(",
"field",
",",
"len",
"(",
"argument",
")",
",",
"str",
"(",
"list",
"(",
"df_output",
".",
"columns",
")",
")",
",",
"length",
")",
")",
"if",
"hasattr",
"(",
"argument",
",",
"\"values\"",
")",
":",
"df_output",
"[",
"str",
"(",
"col_name",
")",
"]",
"=",
"argument",
".",
"values",
"else",
":",
"df_output",
"[",
"str",
"(",
"col_name",
")",
"]",
"=",
"np",
".",
"array",
"(",
"argument",
")",
"# Finally, update argument with column name now that column exists",
"assert",
"col_name",
"is",
"not",
"None",
",",
"(",
"\"Data-frame processing failure, likely due to a internal bug. \"",
"\"Please report this to \"",
"\"https://github.com/plotly/plotly.py/issues/new and we will try to \"",
"\"replicate and fix it.\"",
")",
"if",
"field_name",
"not",
"in",
"array_attrables",
":",
"args",
"[",
"field_name",
"]",
"=",
"str",
"(",
"col_name",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"field_name",
"]",
",",
"dict",
")",
":",
"pass",
"else",
":",
"args",
"[",
"field_name",
"]",
"[",
"i",
"]",
"=",
"str",
"(",
"col_name",
")",
"if",
"field_name",
"!=",
"\"wide_variable\"",
":",
"wide_id_vars",
".",
"add",
"(",
"str",
"(",
"col_name",
")",
")",
"for",
"col_name",
"in",
"ranges",
":",
"df_output",
"[",
"col_name",
"]",
"=",
"range",
"(",
"len",
"(",
"df_output",
")",
")",
"for",
"col_name",
"in",
"constants",
":",
"df_output",
"[",
"col_name",
"]",
"=",
"constants",
"[",
"col_name",
"]",
"return",
"df_output",
",",
"wide_id_vars"
] | [
1031,
0
] | [
1236,
34
] | python | en | ['en', 'error', 'th'] | False |
build_dataframe | (args, constructor) |
Constructs a dataframe and modifies `args` in-place.
The argument values in `args` can be either strings corresponding to
existing columns of a dataframe, or data arrays (lists, numpy arrays,
pandas columns, series).
Parameters
----------
args : OrderedDict
arguments passed to the px function and subsequently modified
constructor : graph_object trace class
the trace type selected for this figure
|
Constructs a dataframe and modifies `args` in-place. | def build_dataframe(args, constructor):
"""
Constructs a dataframe and modifies `args` in-place.
The argument values in `args` can be either strings corresponding to
existing columns of a dataframe, or data arrays (lists, numpy arrays,
pandas columns, series).
Parameters
----------
args : OrderedDict
arguments passed to the px function and subsequently modified
constructor : graph_object trace class
the trace type selected for this figure
"""
# make copies of all the fields via dict() and list()
for field in args:
if field in array_attrables and args[field] is not None:
args[field] = (
dict(args[field])
if isinstance(args[field], dict)
else list(args[field])
)
# Cast data_frame argument to DataFrame (it could be a numpy array, dict etc.)
df_provided = args["data_frame"] is not None
if df_provided and not isinstance(args["data_frame"], pd.DataFrame):
args["data_frame"] = pd.DataFrame(args["data_frame"])
df_input = args["data_frame"]
# now we handle special cases like wide-mode or x-xor-y specification
# by rearranging args to tee things up for process_args_into_dataframe to work
no_x = args.get("x", None) is None
no_y = args.get("y", None) is None
wide_x = False if no_x else _is_col_list(df_input, args["x"])
wide_y = False if no_y else _is_col_list(df_input, args["y"])
wide_mode = False
var_name = None # will likely be "variable" in wide_mode
wide_cross_name = None # will likely be "index" in wide_mode
value_name = None # will likely be "value" in wide_mode
hist2d_types = [go.Histogram2d, go.Histogram2dContour]
if constructor in cartesians:
if wide_x and wide_y:
raise ValueError(
"Cannot accept list of column references or list of columns for both `x` and `y`."
)
if df_provided and no_x and no_y:
wide_mode = True
if isinstance(df_input.columns, pd.MultiIndex):
raise TypeError(
"Data frame columns is a pandas MultiIndex. "
"pandas MultiIndex is not supported by plotly express "
"at the moment."
)
args["wide_variable"] = list(df_input.columns)
var_name = df_input.columns.name
if var_name in [None, "value", "index"] or var_name in df_input:
var_name = "variable"
if constructor == go.Funnel:
wide_orientation = args.get("orientation", None) or "h"
else:
wide_orientation = args.get("orientation", None) or "v"
args["orientation"] = wide_orientation
args["wide_cross"] = None
elif wide_x != wide_y:
wide_mode = True
args["wide_variable"] = args["y"] if wide_y else args["x"]
if df_provided and args["wide_variable"] is df_input.columns:
var_name = df_input.columns.name
if isinstance(args["wide_variable"], pd.Index):
args["wide_variable"] = list(args["wide_variable"])
if var_name in [None, "value", "index"] or (
df_provided and var_name in df_input
):
var_name = "variable"
if constructor == go.Histogram:
wide_orientation = "v" if wide_x else "h"
else:
wide_orientation = "v" if wide_y else "h"
args["y" if wide_y else "x"] = None
args["wide_cross"] = None
if not no_x and not no_y:
wide_cross_name = "__x__" if wide_y else "__y__"
if wide_mode:
value_name = _escape_col_name(df_input, "value", [])
var_name = _escape_col_name(df_input, var_name, [])
missing_bar_dim = None
if constructor in [go.Scatter, go.Bar, go.Funnel] + hist2d_types:
if not wide_mode and (no_x != no_y):
for ax in ["x", "y"]:
if args.get(ax, None) is None:
args[ax] = df_input.index if df_provided else Range()
if constructor == go.Bar:
missing_bar_dim = ax
else:
if args["orientation"] is None:
args["orientation"] = "v" if ax == "x" else "h"
if wide_mode and wide_cross_name is None:
if no_x != no_y and args["orientation"] is None:
args["orientation"] = "v" if no_x else "h"
if df_provided:
if isinstance(df_input.index, pd.MultiIndex):
raise TypeError(
"Data frame index is a pandas MultiIndex. "
"pandas MultiIndex is not supported by plotly express "
"at the moment."
)
args["wide_cross"] = df_input.index
else:
args["wide_cross"] = Range(
label=_escape_col_name(df_input, "index", [var_name, value_name])
)
no_color = False
if type(args.get("color", None)) == str and args["color"] == NO_COLOR:
no_color = True
args["color"] = None
# now that things have been prepped, we do the systematic rewriting of `args`
df_output, wide_id_vars = process_args_into_dataframe(
args, wide_mode, var_name, value_name
)
# now that `df_output` exists and `args` contains only references, we complete
# the special-case and wide-mode handling by further rewriting args and/or mutating
# df_output
count_name = _escape_col_name(df_output, "count", [var_name, value_name])
if not wide_mode and missing_bar_dim and constructor == go.Bar:
# now that we've populated df_output, we check to see if the non-missing
# dimension is categorical: if so, then setting the missing dimension to a
# constant 1 is a less-insane thing to do than setting it to the index by
# default and we let the normal auto-orientation-code do its thing later
other_dim = "x" if missing_bar_dim == "y" else "y"
if not _is_continuous(df_output, args[other_dim]):
args[missing_bar_dim] = count_name
df_output[count_name] = 1
else:
# on the other hand, if the non-missing dimension is continuous, then we
# can use this information to override the normal auto-orientation code
if args["orientation"] is None:
args["orientation"] = "v" if missing_bar_dim == "x" else "h"
if constructor in hist2d_types:
del args["orientation"]
if wide_mode:
# at this point, `df_output` is semi-long/semi-wide, but we know which columns
# are which, so we melt it and reassign `args` to refer to the newly-tidy
# columns, keeping track of various names and manglings set up above
wide_value_vars = [c for c in args["wide_variable"] if c not in wide_id_vars]
del args["wide_variable"]
if wide_cross_name == "__x__":
wide_cross_name = args["x"]
elif wide_cross_name == "__y__":
wide_cross_name = args["y"]
else:
wide_cross_name = args["wide_cross"]
del args["wide_cross"]
dtype = None
for v in wide_value_vars:
v_dtype = df_output[v].dtype.kind
v_dtype = "number" if v_dtype in ["i", "f", "u"] else v_dtype
if dtype is None:
dtype = v_dtype
elif dtype != v_dtype:
raise ValueError(
"Plotly Express cannot process wide-form data with columns of different type."
)
df_output = df_output.melt(
id_vars=wide_id_vars,
value_vars=wide_value_vars,
var_name=var_name,
value_name=value_name,
)
assert len(df_output.columns) == len(set(df_output.columns)), (
"Wide-mode name-inference failure, likely due to a internal bug. "
"Please report this to "
"https://github.com/plotly/plotly.py/issues/new and we will try to "
"replicate and fix it."
)
df_output[var_name] = df_output[var_name].astype(str)
orient_v = wide_orientation == "v"
if constructor in [go.Scatter, go.Funnel] + hist2d_types:
args["x" if orient_v else "y"] = wide_cross_name
args["y" if orient_v else "x"] = value_name
if constructor != go.Histogram2d:
args["color"] = args["color"] or var_name
if "line_group" in args:
args["line_group"] = args["line_group"] or var_name
if constructor == go.Bar:
if _is_continuous(df_output, value_name):
args["x" if orient_v else "y"] = wide_cross_name
args["y" if orient_v else "x"] = value_name
args["color"] = args["color"] or var_name
else:
args["x" if orient_v else "y"] = value_name
args["y" if orient_v else "x"] = count_name
df_output[count_name] = 1
args["color"] = args["color"] or var_name
if constructor in [go.Violin, go.Box]:
args["x" if orient_v else "y"] = wide_cross_name or var_name
args["y" if orient_v else "x"] = value_name
if constructor == go.Histogram:
args["x" if orient_v else "y"] = value_name
args["y" if orient_v else "x"] = wide_cross_name
args["color"] = args["color"] or var_name
if no_color:
args["color"] = None
args["data_frame"] = df_output
return args | [
"def",
"build_dataframe",
"(",
"args",
",",
"constructor",
")",
":",
"# make copies of all the fields via dict() and list()",
"for",
"field",
"in",
"args",
":",
"if",
"field",
"in",
"array_attrables",
"and",
"args",
"[",
"field",
"]",
"is",
"not",
"None",
":",
"args",
"[",
"field",
"]",
"=",
"(",
"dict",
"(",
"args",
"[",
"field",
"]",
")",
"if",
"isinstance",
"(",
"args",
"[",
"field",
"]",
",",
"dict",
")",
"else",
"list",
"(",
"args",
"[",
"field",
"]",
")",
")",
"# Cast data_frame argument to DataFrame (it could be a numpy array, dict etc.)",
"df_provided",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"is",
"not",
"None",
"if",
"df_provided",
"and",
"not",
"isinstance",
"(",
"args",
"[",
"\"data_frame\"",
"]",
",",
"pd",
".",
"DataFrame",
")",
":",
"args",
"[",
"\"data_frame\"",
"]",
"=",
"pd",
".",
"DataFrame",
"(",
"args",
"[",
"\"data_frame\"",
"]",
")",
"df_input",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"# now we handle special cases like wide-mode or x-xor-y specification",
"# by rearranging args to tee things up for process_args_into_dataframe to work",
"no_x",
"=",
"args",
".",
"get",
"(",
"\"x\"",
",",
"None",
")",
"is",
"None",
"no_y",
"=",
"args",
".",
"get",
"(",
"\"y\"",
",",
"None",
")",
"is",
"None",
"wide_x",
"=",
"False",
"if",
"no_x",
"else",
"_is_col_list",
"(",
"df_input",
",",
"args",
"[",
"\"x\"",
"]",
")",
"wide_y",
"=",
"False",
"if",
"no_y",
"else",
"_is_col_list",
"(",
"df_input",
",",
"args",
"[",
"\"y\"",
"]",
")",
"wide_mode",
"=",
"False",
"var_name",
"=",
"None",
"# will likely be \"variable\" in wide_mode",
"wide_cross_name",
"=",
"None",
"# will likely be \"index\" in wide_mode",
"value_name",
"=",
"None",
"# will likely be \"value\" in wide_mode",
"hist2d_types",
"=",
"[",
"go",
".",
"Histogram2d",
",",
"go",
".",
"Histogram2dContour",
"]",
"if",
"constructor",
"in",
"cartesians",
":",
"if",
"wide_x",
"and",
"wide_y",
":",
"raise",
"ValueError",
"(",
"\"Cannot accept list of column references or list of columns for both `x` and `y`.\"",
")",
"if",
"df_provided",
"and",
"no_x",
"and",
"no_y",
":",
"wide_mode",
"=",
"True",
"if",
"isinstance",
"(",
"df_input",
".",
"columns",
",",
"pd",
".",
"MultiIndex",
")",
":",
"raise",
"TypeError",
"(",
"\"Data frame columns is a pandas MultiIndex. \"",
"\"pandas MultiIndex is not supported by plotly express \"",
"\"at the moment.\"",
")",
"args",
"[",
"\"wide_variable\"",
"]",
"=",
"list",
"(",
"df_input",
".",
"columns",
")",
"var_name",
"=",
"df_input",
".",
"columns",
".",
"name",
"if",
"var_name",
"in",
"[",
"None",
",",
"\"value\"",
",",
"\"index\"",
"]",
"or",
"var_name",
"in",
"df_input",
":",
"var_name",
"=",
"\"variable\"",
"if",
"constructor",
"==",
"go",
".",
"Funnel",
":",
"wide_orientation",
"=",
"args",
".",
"get",
"(",
"\"orientation\"",
",",
"None",
")",
"or",
"\"h\"",
"else",
":",
"wide_orientation",
"=",
"args",
".",
"get",
"(",
"\"orientation\"",
",",
"None",
")",
"or",
"\"v\"",
"args",
"[",
"\"orientation\"",
"]",
"=",
"wide_orientation",
"args",
"[",
"\"wide_cross\"",
"]",
"=",
"None",
"elif",
"wide_x",
"!=",
"wide_y",
":",
"wide_mode",
"=",
"True",
"args",
"[",
"\"wide_variable\"",
"]",
"=",
"args",
"[",
"\"y\"",
"]",
"if",
"wide_y",
"else",
"args",
"[",
"\"x\"",
"]",
"if",
"df_provided",
"and",
"args",
"[",
"\"wide_variable\"",
"]",
"is",
"df_input",
".",
"columns",
":",
"var_name",
"=",
"df_input",
".",
"columns",
".",
"name",
"if",
"isinstance",
"(",
"args",
"[",
"\"wide_variable\"",
"]",
",",
"pd",
".",
"Index",
")",
":",
"args",
"[",
"\"wide_variable\"",
"]",
"=",
"list",
"(",
"args",
"[",
"\"wide_variable\"",
"]",
")",
"if",
"var_name",
"in",
"[",
"None",
",",
"\"value\"",
",",
"\"index\"",
"]",
"or",
"(",
"df_provided",
"and",
"var_name",
"in",
"df_input",
")",
":",
"var_name",
"=",
"\"variable\"",
"if",
"constructor",
"==",
"go",
".",
"Histogram",
":",
"wide_orientation",
"=",
"\"v\"",
"if",
"wide_x",
"else",
"\"h\"",
"else",
":",
"wide_orientation",
"=",
"\"v\"",
"if",
"wide_y",
"else",
"\"h\"",
"args",
"[",
"\"y\"",
"if",
"wide_y",
"else",
"\"x\"",
"]",
"=",
"None",
"args",
"[",
"\"wide_cross\"",
"]",
"=",
"None",
"if",
"not",
"no_x",
"and",
"not",
"no_y",
":",
"wide_cross_name",
"=",
"\"__x__\"",
"if",
"wide_y",
"else",
"\"__y__\"",
"if",
"wide_mode",
":",
"value_name",
"=",
"_escape_col_name",
"(",
"df_input",
",",
"\"value\"",
",",
"[",
"]",
")",
"var_name",
"=",
"_escape_col_name",
"(",
"df_input",
",",
"var_name",
",",
"[",
"]",
")",
"missing_bar_dim",
"=",
"None",
"if",
"constructor",
"in",
"[",
"go",
".",
"Scatter",
",",
"go",
".",
"Bar",
",",
"go",
".",
"Funnel",
"]",
"+",
"hist2d_types",
":",
"if",
"not",
"wide_mode",
"and",
"(",
"no_x",
"!=",
"no_y",
")",
":",
"for",
"ax",
"in",
"[",
"\"x\"",
",",
"\"y\"",
"]",
":",
"if",
"args",
".",
"get",
"(",
"ax",
",",
"None",
")",
"is",
"None",
":",
"args",
"[",
"ax",
"]",
"=",
"df_input",
".",
"index",
"if",
"df_provided",
"else",
"Range",
"(",
")",
"if",
"constructor",
"==",
"go",
".",
"Bar",
":",
"missing_bar_dim",
"=",
"ax",
"else",
":",
"if",
"args",
"[",
"\"orientation\"",
"]",
"is",
"None",
":",
"args",
"[",
"\"orientation\"",
"]",
"=",
"\"v\"",
"if",
"ax",
"==",
"\"x\"",
"else",
"\"h\"",
"if",
"wide_mode",
"and",
"wide_cross_name",
"is",
"None",
":",
"if",
"no_x",
"!=",
"no_y",
"and",
"args",
"[",
"\"orientation\"",
"]",
"is",
"None",
":",
"args",
"[",
"\"orientation\"",
"]",
"=",
"\"v\"",
"if",
"no_x",
"else",
"\"h\"",
"if",
"df_provided",
":",
"if",
"isinstance",
"(",
"df_input",
".",
"index",
",",
"pd",
".",
"MultiIndex",
")",
":",
"raise",
"TypeError",
"(",
"\"Data frame index is a pandas MultiIndex. \"",
"\"pandas MultiIndex is not supported by plotly express \"",
"\"at the moment.\"",
")",
"args",
"[",
"\"wide_cross\"",
"]",
"=",
"df_input",
".",
"index",
"else",
":",
"args",
"[",
"\"wide_cross\"",
"]",
"=",
"Range",
"(",
"label",
"=",
"_escape_col_name",
"(",
"df_input",
",",
"\"index\"",
",",
"[",
"var_name",
",",
"value_name",
"]",
")",
")",
"no_color",
"=",
"False",
"if",
"type",
"(",
"args",
".",
"get",
"(",
"\"color\"",
",",
"None",
")",
")",
"==",
"str",
"and",
"args",
"[",
"\"color\"",
"]",
"==",
"NO_COLOR",
":",
"no_color",
"=",
"True",
"args",
"[",
"\"color\"",
"]",
"=",
"None",
"# now that things have been prepped, we do the systematic rewriting of `args`",
"df_output",
",",
"wide_id_vars",
"=",
"process_args_into_dataframe",
"(",
"args",
",",
"wide_mode",
",",
"var_name",
",",
"value_name",
")",
"# now that `df_output` exists and `args` contains only references, we complete",
"# the special-case and wide-mode handling by further rewriting args and/or mutating",
"# df_output",
"count_name",
"=",
"_escape_col_name",
"(",
"df_output",
",",
"\"count\"",
",",
"[",
"var_name",
",",
"value_name",
"]",
")",
"if",
"not",
"wide_mode",
"and",
"missing_bar_dim",
"and",
"constructor",
"==",
"go",
".",
"Bar",
":",
"# now that we've populated df_output, we check to see if the non-missing",
"# dimension is categorical: if so, then setting the missing dimension to a",
"# constant 1 is a less-insane thing to do than setting it to the index by",
"# default and we let the normal auto-orientation-code do its thing later",
"other_dim",
"=",
"\"x\"",
"if",
"missing_bar_dim",
"==",
"\"y\"",
"else",
"\"y\"",
"if",
"not",
"_is_continuous",
"(",
"df_output",
",",
"args",
"[",
"other_dim",
"]",
")",
":",
"args",
"[",
"missing_bar_dim",
"]",
"=",
"count_name",
"df_output",
"[",
"count_name",
"]",
"=",
"1",
"else",
":",
"# on the other hand, if the non-missing dimension is continuous, then we",
"# can use this information to override the normal auto-orientation code",
"if",
"args",
"[",
"\"orientation\"",
"]",
"is",
"None",
":",
"args",
"[",
"\"orientation\"",
"]",
"=",
"\"v\"",
"if",
"missing_bar_dim",
"==",
"\"x\"",
"else",
"\"h\"",
"if",
"constructor",
"in",
"hist2d_types",
":",
"del",
"args",
"[",
"\"orientation\"",
"]",
"if",
"wide_mode",
":",
"# at this point, `df_output` is semi-long/semi-wide, but we know which columns",
"# are which, so we melt it and reassign `args` to refer to the newly-tidy",
"# columns, keeping track of various names and manglings set up above",
"wide_value_vars",
"=",
"[",
"c",
"for",
"c",
"in",
"args",
"[",
"\"wide_variable\"",
"]",
"if",
"c",
"not",
"in",
"wide_id_vars",
"]",
"del",
"args",
"[",
"\"wide_variable\"",
"]",
"if",
"wide_cross_name",
"==",
"\"__x__\"",
":",
"wide_cross_name",
"=",
"args",
"[",
"\"x\"",
"]",
"elif",
"wide_cross_name",
"==",
"\"__y__\"",
":",
"wide_cross_name",
"=",
"args",
"[",
"\"y\"",
"]",
"else",
":",
"wide_cross_name",
"=",
"args",
"[",
"\"wide_cross\"",
"]",
"del",
"args",
"[",
"\"wide_cross\"",
"]",
"dtype",
"=",
"None",
"for",
"v",
"in",
"wide_value_vars",
":",
"v_dtype",
"=",
"df_output",
"[",
"v",
"]",
".",
"dtype",
".",
"kind",
"v_dtype",
"=",
"\"number\"",
"if",
"v_dtype",
"in",
"[",
"\"i\"",
",",
"\"f\"",
",",
"\"u\"",
"]",
"else",
"v_dtype",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"v_dtype",
"elif",
"dtype",
"!=",
"v_dtype",
":",
"raise",
"ValueError",
"(",
"\"Plotly Express cannot process wide-form data with columns of different type.\"",
")",
"df_output",
"=",
"df_output",
".",
"melt",
"(",
"id_vars",
"=",
"wide_id_vars",
",",
"value_vars",
"=",
"wide_value_vars",
",",
"var_name",
"=",
"var_name",
",",
"value_name",
"=",
"value_name",
",",
")",
"assert",
"len",
"(",
"df_output",
".",
"columns",
")",
"==",
"len",
"(",
"set",
"(",
"df_output",
".",
"columns",
")",
")",
",",
"(",
"\"Wide-mode name-inference failure, likely due to a internal bug. \"",
"\"Please report this to \"",
"\"https://github.com/plotly/plotly.py/issues/new and we will try to \"",
"\"replicate and fix it.\"",
")",
"df_output",
"[",
"var_name",
"]",
"=",
"df_output",
"[",
"var_name",
"]",
".",
"astype",
"(",
"str",
")",
"orient_v",
"=",
"wide_orientation",
"==",
"\"v\"",
"if",
"constructor",
"in",
"[",
"go",
".",
"Scatter",
",",
"go",
".",
"Funnel",
"]",
"+",
"hist2d_types",
":",
"args",
"[",
"\"x\"",
"if",
"orient_v",
"else",
"\"y\"",
"]",
"=",
"wide_cross_name",
"args",
"[",
"\"y\"",
"if",
"orient_v",
"else",
"\"x\"",
"]",
"=",
"value_name",
"if",
"constructor",
"!=",
"go",
".",
"Histogram2d",
":",
"args",
"[",
"\"color\"",
"]",
"=",
"args",
"[",
"\"color\"",
"]",
"or",
"var_name",
"if",
"\"line_group\"",
"in",
"args",
":",
"args",
"[",
"\"line_group\"",
"]",
"=",
"args",
"[",
"\"line_group\"",
"]",
"or",
"var_name",
"if",
"constructor",
"==",
"go",
".",
"Bar",
":",
"if",
"_is_continuous",
"(",
"df_output",
",",
"value_name",
")",
":",
"args",
"[",
"\"x\"",
"if",
"orient_v",
"else",
"\"y\"",
"]",
"=",
"wide_cross_name",
"args",
"[",
"\"y\"",
"if",
"orient_v",
"else",
"\"x\"",
"]",
"=",
"value_name",
"args",
"[",
"\"color\"",
"]",
"=",
"args",
"[",
"\"color\"",
"]",
"or",
"var_name",
"else",
":",
"args",
"[",
"\"x\"",
"if",
"orient_v",
"else",
"\"y\"",
"]",
"=",
"value_name",
"args",
"[",
"\"y\"",
"if",
"orient_v",
"else",
"\"x\"",
"]",
"=",
"count_name",
"df_output",
"[",
"count_name",
"]",
"=",
"1",
"args",
"[",
"\"color\"",
"]",
"=",
"args",
"[",
"\"color\"",
"]",
"or",
"var_name",
"if",
"constructor",
"in",
"[",
"go",
".",
"Violin",
",",
"go",
".",
"Box",
"]",
":",
"args",
"[",
"\"x\"",
"if",
"orient_v",
"else",
"\"y\"",
"]",
"=",
"wide_cross_name",
"or",
"var_name",
"args",
"[",
"\"y\"",
"if",
"orient_v",
"else",
"\"x\"",
"]",
"=",
"value_name",
"if",
"constructor",
"==",
"go",
".",
"Histogram",
":",
"args",
"[",
"\"x\"",
"if",
"orient_v",
"else",
"\"y\"",
"]",
"=",
"value_name",
"args",
"[",
"\"y\"",
"if",
"orient_v",
"else",
"\"x\"",
"]",
"=",
"wide_cross_name",
"args",
"[",
"\"color\"",
"]",
"=",
"args",
"[",
"\"color\"",
"]",
"or",
"var_name",
"if",
"no_color",
":",
"args",
"[",
"\"color\"",
"]",
"=",
"None",
"args",
"[",
"\"data_frame\"",
"]",
"=",
"df_output",
"return",
"args"
] | [
1239,
0
] | [
1454,
15
] | python | en | ['en', 'error', 'th'] | False |
process_dataframe_hierarchy | (args) |
Build dataframe for sunburst or treemap when the path argument is provided.
|
Build dataframe for sunburst or treemap when the path argument is provided.
| def process_dataframe_hierarchy(args):
"""
Build dataframe for sunburst or treemap when the path argument is provided.
"""
df = args["data_frame"]
path = args["path"][::-1]
_check_dataframe_all_leaves(df[path[::-1]])
discrete_color = False
new_path = []
for col_name in path:
new_col_name = col_name + "_path_copy"
new_path.append(new_col_name)
df[new_col_name] = df[col_name]
path = new_path
# ------------ Define aggregation functions --------------------------------
def aggfunc_discrete(x):
uniques = x.unique()
if len(uniques) == 1:
return uniques[0]
else:
return "(?)"
agg_f = {}
aggfunc_color = None
if args["values"]:
try:
df[args["values"]] = pd.to_numeric(df[args["values"]])
except ValueError:
raise ValueError(
"Column `%s` of `df` could not be converted to a numerical data type."
% args["values"]
)
if args["color"]:
if args["color"] == args["values"]:
new_value_col_name = args["values"] + "_sum"
df[new_value_col_name] = df[args["values"]]
args["values"] = new_value_col_name
count_colname = args["values"]
else:
# we need a count column for the first groupby and the weighted mean of color
# trick to be sure the col name is unused: take the sum of existing names
count_colname = (
"count"
if "count" not in df.columns
else "".join([str(el) for el in list(df.columns)])
)
# we can modify df because it's a copy of the px argument
df[count_colname] = 1
args["values"] = count_colname
agg_f[count_colname] = "sum"
if args["color"]:
if not _is_continuous(df, args["color"]):
aggfunc_color = aggfunc_discrete
discrete_color = True
else:
def aggfunc_continuous(x):
return np.average(x, weights=df.loc[x.index, count_colname])
aggfunc_color = aggfunc_continuous
agg_f[args["color"]] = aggfunc_color
# Other columns (for color, hover_data, custom_data etc.)
cols = list(set(df.columns).difference(path))
for col in cols: # for hover_data, custom_data etc.
if col not in agg_f:
agg_f[col] = aggfunc_discrete
# Avoid collisions with reserved names - columns in the path have been copied already
cols = list(set(cols) - set(["labels", "parent", "id"]))
# ----------------------------------------------------------------------------
df_all_trees = pd.DataFrame(columns=["labels", "parent", "id"] + cols)
# Set column type here (useful for continuous vs discrete colorscale)
for col in cols:
df_all_trees[col] = df_all_trees[col].astype(df[col].dtype)
for i, level in enumerate(path):
df_tree = pd.DataFrame(columns=df_all_trees.columns)
dfg = df.groupby(path[i:]).agg(agg_f)
dfg = dfg.reset_index()
# Path label massaging
df_tree["labels"] = dfg[level].copy().astype(str)
df_tree["parent"] = ""
df_tree["id"] = dfg[level].copy().astype(str)
if i < len(path) - 1:
j = i + 1
while j < len(path):
df_tree["parent"] = (
dfg[path[j]].copy().astype(str) + "/" + df_tree["parent"]
)
df_tree["id"] = dfg[path[j]].copy().astype(str) + "/" + df_tree["id"]
j += 1
df_tree["parent"] = df_tree["parent"].str.rstrip("/")
if cols:
df_tree[cols] = dfg[cols]
df_all_trees = df_all_trees.append(df_tree, ignore_index=True)
# we want to make sure than (?) is the first color of the sequence
if args["color"] and discrete_color:
sort_col_name = "sort_color_if_discrete_color"
while sort_col_name in df_all_trees.columns:
sort_col_name += "0"
df_all_trees[sort_col_name] = df[args["color"]].astype(str)
df_all_trees = df_all_trees.sort_values(by=sort_col_name)
# Now modify arguments
args["data_frame"] = df_all_trees
args["path"] = None
args["ids"] = "id"
args["names"] = "labels"
args["parents"] = "parent"
if args["color"]:
if not args["hover_data"]:
args["hover_data"] = [args["color"]]
elif isinstance(args["hover_data"], dict):
if not args["hover_data"].get(args["color"]):
args["hover_data"][args["color"]] = (True, None)
else:
args["hover_data"].append(args["color"])
return args | [
"def",
"process_dataframe_hierarchy",
"(",
"args",
")",
":",
"df",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"path",
"=",
"args",
"[",
"\"path\"",
"]",
"[",
":",
":",
"-",
"1",
"]",
"_check_dataframe_all_leaves",
"(",
"df",
"[",
"path",
"[",
":",
":",
"-",
"1",
"]",
"]",
")",
"discrete_color",
"=",
"False",
"new_path",
"=",
"[",
"]",
"for",
"col_name",
"in",
"path",
":",
"new_col_name",
"=",
"col_name",
"+",
"\"_path_copy\"",
"new_path",
".",
"append",
"(",
"new_col_name",
")",
"df",
"[",
"new_col_name",
"]",
"=",
"df",
"[",
"col_name",
"]",
"path",
"=",
"new_path",
"# ------------ Define aggregation functions --------------------------------",
"def",
"aggfunc_discrete",
"(",
"x",
")",
":",
"uniques",
"=",
"x",
".",
"unique",
"(",
")",
"if",
"len",
"(",
"uniques",
")",
"==",
"1",
":",
"return",
"uniques",
"[",
"0",
"]",
"else",
":",
"return",
"\"(?)\"",
"agg_f",
"=",
"{",
"}",
"aggfunc_color",
"=",
"None",
"if",
"args",
"[",
"\"values\"",
"]",
":",
"try",
":",
"df",
"[",
"args",
"[",
"\"values\"",
"]",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"df",
"[",
"args",
"[",
"\"values\"",
"]",
"]",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Column `%s` of `df` could not be converted to a numerical data type.\"",
"%",
"args",
"[",
"\"values\"",
"]",
")",
"if",
"args",
"[",
"\"color\"",
"]",
":",
"if",
"args",
"[",
"\"color\"",
"]",
"==",
"args",
"[",
"\"values\"",
"]",
":",
"new_value_col_name",
"=",
"args",
"[",
"\"values\"",
"]",
"+",
"\"_sum\"",
"df",
"[",
"new_value_col_name",
"]",
"=",
"df",
"[",
"args",
"[",
"\"values\"",
"]",
"]",
"args",
"[",
"\"values\"",
"]",
"=",
"new_value_col_name",
"count_colname",
"=",
"args",
"[",
"\"values\"",
"]",
"else",
":",
"# we need a count column for the first groupby and the weighted mean of color",
"# trick to be sure the col name is unused: take the sum of existing names",
"count_colname",
"=",
"(",
"\"count\"",
"if",
"\"count\"",
"not",
"in",
"df",
".",
"columns",
"else",
"\"\"",
".",
"join",
"(",
"[",
"str",
"(",
"el",
")",
"for",
"el",
"in",
"list",
"(",
"df",
".",
"columns",
")",
"]",
")",
")",
"# we can modify df because it's a copy of the px argument",
"df",
"[",
"count_colname",
"]",
"=",
"1",
"args",
"[",
"\"values\"",
"]",
"=",
"count_colname",
"agg_f",
"[",
"count_colname",
"]",
"=",
"\"sum\"",
"if",
"args",
"[",
"\"color\"",
"]",
":",
"if",
"not",
"_is_continuous",
"(",
"df",
",",
"args",
"[",
"\"color\"",
"]",
")",
":",
"aggfunc_color",
"=",
"aggfunc_discrete",
"discrete_color",
"=",
"True",
"else",
":",
"def",
"aggfunc_continuous",
"(",
"x",
")",
":",
"return",
"np",
".",
"average",
"(",
"x",
",",
"weights",
"=",
"df",
".",
"loc",
"[",
"x",
".",
"index",
",",
"count_colname",
"]",
")",
"aggfunc_color",
"=",
"aggfunc_continuous",
"agg_f",
"[",
"args",
"[",
"\"color\"",
"]",
"]",
"=",
"aggfunc_color",
"# Other columns (for color, hover_data, custom_data etc.)",
"cols",
"=",
"list",
"(",
"set",
"(",
"df",
".",
"columns",
")",
".",
"difference",
"(",
"path",
")",
")",
"for",
"col",
"in",
"cols",
":",
"# for hover_data, custom_data etc.",
"if",
"col",
"not",
"in",
"agg_f",
":",
"agg_f",
"[",
"col",
"]",
"=",
"aggfunc_discrete",
"# Avoid collisions with reserved names - columns in the path have been copied already",
"cols",
"=",
"list",
"(",
"set",
"(",
"cols",
")",
"-",
"set",
"(",
"[",
"\"labels\"",
",",
"\"parent\"",
",",
"\"id\"",
"]",
")",
")",
"# ----------------------------------------------------------------------------",
"df_all_trees",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"[",
"\"labels\"",
",",
"\"parent\"",
",",
"\"id\"",
"]",
"+",
"cols",
")",
"# Set column type here (useful for continuous vs discrete colorscale)",
"for",
"col",
"in",
"cols",
":",
"df_all_trees",
"[",
"col",
"]",
"=",
"df_all_trees",
"[",
"col",
"]",
".",
"astype",
"(",
"df",
"[",
"col",
"]",
".",
"dtype",
")",
"for",
"i",
",",
"level",
"in",
"enumerate",
"(",
"path",
")",
":",
"df_tree",
"=",
"pd",
".",
"DataFrame",
"(",
"columns",
"=",
"df_all_trees",
".",
"columns",
")",
"dfg",
"=",
"df",
".",
"groupby",
"(",
"path",
"[",
"i",
":",
"]",
")",
".",
"agg",
"(",
"agg_f",
")",
"dfg",
"=",
"dfg",
".",
"reset_index",
"(",
")",
"# Path label massaging",
"df_tree",
"[",
"\"labels\"",
"]",
"=",
"dfg",
"[",
"level",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"str",
")",
"df_tree",
"[",
"\"parent\"",
"]",
"=",
"\"\"",
"df_tree",
"[",
"\"id\"",
"]",
"=",
"dfg",
"[",
"level",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"str",
")",
"if",
"i",
"<",
"len",
"(",
"path",
")",
"-",
"1",
":",
"j",
"=",
"i",
"+",
"1",
"while",
"j",
"<",
"len",
"(",
"path",
")",
":",
"df_tree",
"[",
"\"parent\"",
"]",
"=",
"(",
"dfg",
"[",
"path",
"[",
"j",
"]",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"str",
")",
"+",
"\"/\"",
"+",
"df_tree",
"[",
"\"parent\"",
"]",
")",
"df_tree",
"[",
"\"id\"",
"]",
"=",
"dfg",
"[",
"path",
"[",
"j",
"]",
"]",
".",
"copy",
"(",
")",
".",
"astype",
"(",
"str",
")",
"+",
"\"/\"",
"+",
"df_tree",
"[",
"\"id\"",
"]",
"j",
"+=",
"1",
"df_tree",
"[",
"\"parent\"",
"]",
"=",
"df_tree",
"[",
"\"parent\"",
"]",
".",
"str",
".",
"rstrip",
"(",
"\"/\"",
")",
"if",
"cols",
":",
"df_tree",
"[",
"cols",
"]",
"=",
"dfg",
"[",
"cols",
"]",
"df_all_trees",
"=",
"df_all_trees",
".",
"append",
"(",
"df_tree",
",",
"ignore_index",
"=",
"True",
")",
"# we want to make sure than (?) is the first color of the sequence",
"if",
"args",
"[",
"\"color\"",
"]",
"and",
"discrete_color",
":",
"sort_col_name",
"=",
"\"sort_color_if_discrete_color\"",
"while",
"sort_col_name",
"in",
"df_all_trees",
".",
"columns",
":",
"sort_col_name",
"+=",
"\"0\"",
"df_all_trees",
"[",
"sort_col_name",
"]",
"=",
"df",
"[",
"args",
"[",
"\"color\"",
"]",
"]",
".",
"astype",
"(",
"str",
")",
"df_all_trees",
"=",
"df_all_trees",
".",
"sort_values",
"(",
"by",
"=",
"sort_col_name",
")",
"# Now modify arguments",
"args",
"[",
"\"data_frame\"",
"]",
"=",
"df_all_trees",
"args",
"[",
"\"path\"",
"]",
"=",
"None",
"args",
"[",
"\"ids\"",
"]",
"=",
"\"id\"",
"args",
"[",
"\"names\"",
"]",
"=",
"\"labels\"",
"args",
"[",
"\"parents\"",
"]",
"=",
"\"parent\"",
"if",
"args",
"[",
"\"color\"",
"]",
":",
"if",
"not",
"args",
"[",
"\"hover_data\"",
"]",
":",
"args",
"[",
"\"hover_data\"",
"]",
"=",
"[",
"args",
"[",
"\"color\"",
"]",
"]",
"elif",
"isinstance",
"(",
"args",
"[",
"\"hover_data\"",
"]",
",",
"dict",
")",
":",
"if",
"not",
"args",
"[",
"\"hover_data\"",
"]",
".",
"get",
"(",
"args",
"[",
"\"color\"",
"]",
")",
":",
"args",
"[",
"\"hover_data\"",
"]",
"[",
"args",
"[",
"\"color\"",
"]",
"]",
"=",
"(",
"True",
",",
"None",
")",
"else",
":",
"args",
"[",
"\"hover_data\"",
"]",
".",
"append",
"(",
"args",
"[",
"\"color\"",
"]",
")",
"return",
"args"
] | [
1481,
0
] | [
1603,
15
] | python | en | ['en', 'error', 'th'] | False |
process_dataframe_timeline | (args) |
Massage input for bar traces for px.timeline()
|
Massage input for bar traces for px.timeline()
| def process_dataframe_timeline(args):
"""
Massage input for bar traces for px.timeline()
"""
args["is_timeline"] = True
if args["x_start"] is None or args["x_end"] is None:
raise ValueError("Both x_start and x_end are required")
try:
x_start = pd.to_datetime(args["data_frame"][args["x_start"]])
x_end = pd.to_datetime(args["data_frame"][args["x_end"]])
except (ValueError, TypeError):
raise TypeError(
"Both x_start and x_end must refer to data convertible to datetimes."
)
# note that we are not adding any columns to the data frame here, so no risk of overwrite
args["data_frame"][args["x_end"]] = (x_end - x_start).astype("timedelta64[ms]")
args["x"] = args["x_end"]
del args["x_end"]
args["base"] = args["x_start"]
del args["x_start"]
return args | [
"def",
"process_dataframe_timeline",
"(",
"args",
")",
":",
"args",
"[",
"\"is_timeline\"",
"]",
"=",
"True",
"if",
"args",
"[",
"\"x_start\"",
"]",
"is",
"None",
"or",
"args",
"[",
"\"x_end\"",
"]",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Both x_start and x_end are required\"",
")",
"try",
":",
"x_start",
"=",
"pd",
".",
"to_datetime",
"(",
"args",
"[",
"\"data_frame\"",
"]",
"[",
"args",
"[",
"\"x_start\"",
"]",
"]",
")",
"x_end",
"=",
"pd",
".",
"to_datetime",
"(",
"args",
"[",
"\"data_frame\"",
"]",
"[",
"args",
"[",
"\"x_end\"",
"]",
"]",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"TypeError",
"(",
"\"Both x_start and x_end must refer to data convertible to datetimes.\"",
")",
"# note that we are not adding any columns to the data frame here, so no risk of overwrite",
"args",
"[",
"\"data_frame\"",
"]",
"[",
"args",
"[",
"\"x_end\"",
"]",
"]",
"=",
"(",
"x_end",
"-",
"x_start",
")",
".",
"astype",
"(",
"\"timedelta64[ms]\"",
")",
"args",
"[",
"\"x\"",
"]",
"=",
"args",
"[",
"\"x_end\"",
"]",
"del",
"args",
"[",
"\"x_end\"",
"]",
"args",
"[",
"\"base\"",
"]",
"=",
"args",
"[",
"\"x_start\"",
"]",
"del",
"args",
"[",
"\"x_start\"",
"]",
"return",
"args"
] | [
1606,
0
] | [
1628,
15
] | python | en | ['en', 'error', 'th'] | False |
get_orderings | (args, grouper, grouped) |
`orders` is the user-supplied ordering (with the remaining data-frame-supplied
ordering appended if the column is used for grouping). It includes anything the user
gave, for any variable, including values not present in the dataset. It is used
downstream to set e.g. `categoryarray` for cartesian axes
`group_names` is the set of groups, ordered by the order above
`group_values` is a subset of `orders` in both keys and values. It contains a key
for every grouped mapping and its values are the sorted *data* values for these
mappings.
|
`orders` is the user-supplied ordering (with the remaining data-frame-supplied
ordering appended if the column is used for grouping). It includes anything the user
gave, for any variable, including values not present in the dataset. It is used
downstream to set e.g. `categoryarray` for cartesian axes | def get_orderings(args, grouper, grouped):
"""
`orders` is the user-supplied ordering (with the remaining data-frame-supplied
ordering appended if the column is used for grouping). It includes anything the user
gave, for any variable, including values not present in the dataset. It is used
downstream to set e.g. `categoryarray` for cartesian axes
`group_names` is the set of groups, ordered by the order above
`group_values` is a subset of `orders` in both keys and values. It contains a key
for every grouped mapping and its values are the sorted *data* values for these
mappings.
"""
orders = {} if "category_orders" not in args else args["category_orders"].copy()
group_names = []
group_values = {}
for group_name in grouped.groups:
if len(grouper) == 1:
group_name = (group_name,)
group_names.append(group_name)
for col in grouper:
if col != one_group:
uniques = args["data_frame"][col].unique()
if col not in orders:
orders[col] = list(uniques)
else:
for val in uniques:
if val not in orders[col]:
orders[col].append(val)
group_values[col] = sorted(uniques, key=orders[col].index)
for i, col in reversed(list(enumerate(grouper))):
if col != one_group:
group_names = sorted(
group_names,
key=lambda g: orders[col].index(g[i]) if g[i] in orders[col] else -1,
)
return orders, group_names, group_values | [
"def",
"get_orderings",
"(",
"args",
",",
"grouper",
",",
"grouped",
")",
":",
"orders",
"=",
"{",
"}",
"if",
"\"category_orders\"",
"not",
"in",
"args",
"else",
"args",
"[",
"\"category_orders\"",
"]",
".",
"copy",
"(",
")",
"group_names",
"=",
"[",
"]",
"group_values",
"=",
"{",
"}",
"for",
"group_name",
"in",
"grouped",
".",
"groups",
":",
"if",
"len",
"(",
"grouper",
")",
"==",
"1",
":",
"group_name",
"=",
"(",
"group_name",
",",
")",
"group_names",
".",
"append",
"(",
"group_name",
")",
"for",
"col",
"in",
"grouper",
":",
"if",
"col",
"!=",
"one_group",
":",
"uniques",
"=",
"args",
"[",
"\"data_frame\"",
"]",
"[",
"col",
"]",
".",
"unique",
"(",
")",
"if",
"col",
"not",
"in",
"orders",
":",
"orders",
"[",
"col",
"]",
"=",
"list",
"(",
"uniques",
")",
"else",
":",
"for",
"val",
"in",
"uniques",
":",
"if",
"val",
"not",
"in",
"orders",
"[",
"col",
"]",
":",
"orders",
"[",
"col",
"]",
".",
"append",
"(",
"val",
")",
"group_values",
"[",
"col",
"]",
"=",
"sorted",
"(",
"uniques",
",",
"key",
"=",
"orders",
"[",
"col",
"]",
".",
"index",
")",
"for",
"i",
",",
"col",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"grouper",
")",
")",
")",
":",
"if",
"col",
"!=",
"one_group",
":",
"group_names",
"=",
"sorted",
"(",
"group_names",
",",
"key",
"=",
"lambda",
"g",
":",
"orders",
"[",
"col",
"]",
".",
"index",
"(",
"g",
"[",
"i",
"]",
")",
"if",
"g",
"[",
"i",
"]",
"in",
"orders",
"[",
"col",
"]",
"else",
"-",
"1",
",",
")",
"return",
"orders",
",",
"group_names",
",",
"group_values"
] | [
1784,
0
] | [
1822,
44
] | python | en | ['en', 'error', 'th'] | False |
AcuteEvalBuilder.rebuild_core | (self) |
Rebuild the frontend for this task.
|
Rebuild the frontend for this task.
| def rebuild_core(self):
"""
Rebuild the frontend for this task.
"""
return_dir = os.getcwd()
os.chdir(FRONTEND_SOURCE_DIR)
if os.path.exists(FRONTEND_BUILD_DIR):
shutil.rmtree(FRONTEND_BUILD_DIR)
packages_installed = subprocess.call(["npm", "install"])
if packages_installed != 0:
raise Exception(
"please make sure npm is installed, otherwise view "
"the above error for more info."
)
webpack_complete = subprocess.call(["npm", "run", "dev"])
if webpack_complete != 0:
raise Exception(
"Webpack appears to have failed to build your "
"frontend. See the above error for more information."
)
os.chdir(return_dir) | [
"def",
"rebuild_core",
"(",
"self",
")",
":",
"return_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"FRONTEND_SOURCE_DIR",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"FRONTEND_BUILD_DIR",
")",
":",
"shutil",
".",
"rmtree",
"(",
"FRONTEND_BUILD_DIR",
")",
"packages_installed",
"=",
"subprocess",
".",
"call",
"(",
"[",
"\"npm\"",
",",
"\"install\"",
"]",
")",
"if",
"packages_installed",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"\"please make sure npm is installed, otherwise view \"",
"\"the above error for more info.\"",
")",
"webpack_complete",
"=",
"subprocess",
".",
"call",
"(",
"[",
"\"npm\"",
",",
"\"run\"",
",",
"\"dev\"",
"]",
")",
"if",
"webpack_complete",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"\"Webpack appears to have failed to build your \"",
"\"frontend. See the above error for more information.\"",
")",
"os",
".",
"chdir",
"(",
"return_dir",
")"
] | [
26,
4
] | [
46,
28
] | python | en | ['en', 'error', 'th'] | False |
AcuteEvalBuilder.build_in_dir | (self, build_dir: str) |
Build the frontend if it doesn't exist, then copy into the server directory.
|
Build the frontend if it doesn't exist, then copy into the server directory.
| def build_in_dir(self, build_dir: str):
"""
Build the frontend if it doesn't exist, then copy into the server directory.
"""
# Only build this task if it hasn't already been built
if True: # not os.path.exists(FRONTEND_BUILD_DIR):
self.rebuild_core()
# Copy the built core and the given task file to the target path
bundle_js_file = os.path.join(FRONTEND_BUILD_DIR, "bundle.js")
target_resource_dir = os.path.join(build_dir, "static")
target_path = os.path.join(target_resource_dir, "bundle.js")
shutil.copy2(bundle_js_file, target_path)
copied_static_file = os.path.join(
FRONTEND_SOURCE_DIR, "src", "static", "index.html"
)
target_path = os.path.join(target_resource_dir, "index.html")
shutil.copy2(copied_static_file, target_path)
# Write a built file confirmation
with open(os.path.join(build_dir, self.BUILT_FILE), "w+") as built_file:
built_file.write(self.BUILT_MESSAGE) | [
"def",
"build_in_dir",
"(",
"self",
",",
"build_dir",
":",
"str",
")",
":",
"# Only build this task if it hasn't already been built",
"if",
"True",
":",
"# not os.path.exists(FRONTEND_BUILD_DIR):",
"self",
".",
"rebuild_core",
"(",
")",
"# Copy the built core and the given task file to the target path",
"bundle_js_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FRONTEND_BUILD_DIR",
",",
"\"bundle.js\"",
")",
"target_resource_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"build_dir",
",",
"\"static\"",
")",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_resource_dir",
",",
"\"bundle.js\"",
")",
"shutil",
".",
"copy2",
"(",
"bundle_js_file",
",",
"target_path",
")",
"copied_static_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"FRONTEND_SOURCE_DIR",
",",
"\"src\"",
",",
"\"static\"",
",",
"\"index.html\"",
")",
"target_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_resource_dir",
",",
"\"index.html\"",
")",
"shutil",
".",
"copy2",
"(",
"copied_static_file",
",",
"target_path",
")",
"# Write a built file confirmation",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"build_dir",
",",
"self",
".",
"BUILT_FILE",
")",
",",
"\"w+\"",
")",
"as",
"built_file",
":",
"built_file",
".",
"write",
"(",
"self",
".",
"BUILT_MESSAGE",
")"
] | [
48,
4
] | [
70,
48
] | python | en | ['en', 'error', 'th'] | False |
AcuteEvalBuilder.task_dir_is_valid | (task_dir: str) |
Acute eval is always valid, we don't have any special resources.
|
Acute eval is always valid, we don't have any special resources.
| def task_dir_is_valid(task_dir: str) -> bool:
"""
Acute eval is always valid, we don't have any special resources.
"""
return True | [
"def",
"task_dir_is_valid",
"(",
"task_dir",
":",
"str",
")",
"->",
"bool",
":",
"return",
"True"
] | [
74,
4
] | [
78,
19
] | python | en | ['en', 'error', 'th'] | False |
BartModel.output | (self, tensor: torch.Tensor) |
Compute output logits.
Override standard TGM output to _not_ prevent generation of BOS.
|
Compute output logits. | def output(self, tensor: torch.Tensor) -> torch.Tensor:
"""
Compute output logits.
Override standard TGM output to _not_ prevent generation of BOS.
"""
# project back to vocabulary
output = F.linear(tensor, self.embeddings.weight)
return output | [
"def",
"output",
"(",
"self",
",",
"tensor",
":",
"torch",
".",
"Tensor",
")",
"->",
"torch",
".",
"Tensor",
":",
"# project back to vocabulary",
"output",
"=",
"F",
".",
"linear",
"(",
"tensor",
",",
"self",
".",
"embeddings",
".",
"weight",
")",
"return",
"output"
] | [
18,
4
] | [
26,
21
] | python | en | ['en', 'error', 'th'] | False |
BartModel._get_initial_forced_decoder_input | (self, bsz: int, inputs: torch.LongTensor) |
Return initial input to the decoder.
Override TGA._get_initial_forced_decoder_input to seed EOS BOS.
:param bsz:
batchsize
:param inputs:
inputs to decode
:return initial_input:
initial input for the decoder.
|
Return initial input to the decoder. | def _get_initial_forced_decoder_input(self, bsz: int, inputs: torch.LongTensor):
"""
Return initial input to the decoder.
Override TGA._get_initial_forced_decoder_input to seed EOS BOS.
:param bsz:
batchsize
:param inputs:
inputs to decode
:return initial_input:
initial input for the decoder.
"""
tens = (
torch.LongTensor([self.END_IDX, self.START_IDX])
.to(inputs)
.detach()
.expand(bsz, 2)
)
return torch.cat([tens, inputs], 1) | [
"def",
"_get_initial_forced_decoder_input",
"(",
"self",
",",
"bsz",
":",
"int",
",",
"inputs",
":",
"torch",
".",
"LongTensor",
")",
":",
"tens",
"=",
"(",
"torch",
".",
"LongTensor",
"(",
"[",
"self",
".",
"END_IDX",
",",
"self",
".",
"START_IDX",
"]",
")",
".",
"to",
"(",
"inputs",
")",
".",
"detach",
"(",
")",
".",
"expand",
"(",
"bsz",
",",
"2",
")",
")",
"return",
"torch",
".",
"cat",
"(",
"[",
"tens",
",",
"inputs",
"]",
",",
"1",
")"
] | [
28,
4
] | [
48,
43
] | python | en | ['en', 'error', 'th'] | False |
BartModel.reorder_decoder_incremental_state | (
self,
incremental_state: Dict[str, Any],
inds: Union[List[int], torch.LongTensor],
) |
Incremental state is weird to handle when we seed decoder with two inputs
initially.
|
Incremental state is weird to handle when we seed decoder with two inputs
initially.
| def reorder_decoder_incremental_state(
self,
incremental_state: Dict[str, Any],
inds: Union[List[int], torch.LongTensor],
) -> Optional[Dict[str, Any]]:
"""
Incremental state is weird to handle when we seed decoder with two inputs
initially.
"""
# we only have this method called when it's actually being used
assert incremental_state is not None
assert len(incremental_state) > 0
for incr_state_l in incremental_state.values():
assert 'self_attn' in incr_state_l
assert 'prev_mask' in incr_state_l['self_attn']
self_attn_mask = incr_state_l['self_attn']['prev_mask']
# check this is on the very first run with incremental state
if self_attn_mask.ndim == 3 and tuple(self_attn_mask.shape[1:]) == (2, 2):
# cut off the inappropriate incremental state
incr_state_l['self_attn']['prev_mask'] = self_attn_mask[:, -1:, :]
return super().reorder_decoder_incremental_state(incremental_state, inds) | [
"def",
"reorder_decoder_incremental_state",
"(",
"self",
",",
"incremental_state",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"inds",
":",
"Union",
"[",
"List",
"[",
"int",
"]",
",",
"torch",
".",
"LongTensor",
"]",
",",
")",
"->",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"# we only have this method called when it's actually being used",
"assert",
"incremental_state",
"is",
"not",
"None",
"assert",
"len",
"(",
"incremental_state",
")",
">",
"0",
"for",
"incr_state_l",
"in",
"incremental_state",
".",
"values",
"(",
")",
":",
"assert",
"'self_attn'",
"in",
"incr_state_l",
"assert",
"'prev_mask'",
"in",
"incr_state_l",
"[",
"'self_attn'",
"]",
"self_attn_mask",
"=",
"incr_state_l",
"[",
"'self_attn'",
"]",
"[",
"'prev_mask'",
"]",
"# check this is on the very first run with incremental state",
"if",
"self_attn_mask",
".",
"ndim",
"==",
"3",
"and",
"tuple",
"(",
"self_attn_mask",
".",
"shape",
"[",
"1",
":",
"]",
")",
"==",
"(",
"2",
",",
"2",
")",
":",
"# cut off the inappropriate incremental state",
"incr_state_l",
"[",
"'self_attn'",
"]",
"[",
"'prev_mask'",
"]",
"=",
"self_attn_mask",
"[",
":",
",",
"-",
"1",
":",
",",
":",
"]",
"return",
"super",
"(",
")",
".",
"reorder_decoder_incremental_state",
"(",
"incremental_state",
",",
"inds",
")"
] | [
50,
4
] | [
72,
81
] | python | en | ['en', 'error', 'th'] | False |
setup_interweb_args | (shared) |
Build and parse CLI opts.
|
Build and parse CLI opts.
| def setup_interweb_args(shared):
"""
Build and parse CLI opts.
"""
parser = setup_args()
parser.description = 'Interactive chat with a model in a web browser'
parser.add_argument('--port', type=int, default=PORT, help='Port to listen on.')
parser.add_argument(
'--host',
default=HOST_NAME,
type=str,
help='Host from which allow requests, use 0.0.0.0 to allow all IPs',
)
return parser | [
"def",
"setup_interweb_args",
"(",
"shared",
")",
":",
"parser",
"=",
"setup_args",
"(",
")",
"parser",
".",
"description",
"=",
"'Interactive chat with a model in a web browser'",
"parser",
".",
"add_argument",
"(",
"'--port'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"PORT",
",",
"help",
"=",
"'Port to listen on.'",
")",
"parser",
".",
"add_argument",
"(",
"'--host'",
",",
"default",
"=",
"HOST_NAME",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'Host from which allow requests, use 0.0.0.0 to allow all IPs'",
",",
")",
"return",
"parser"
] | [
237,
0
] | [
250,
17
] | python | en | ['en', 'error', 'th'] | False |
MyHandler.do_HEAD | (self) |
Handle HEAD requests.
|
Handle HEAD requests.
| def do_HEAD(self):
"""
Handle HEAD requests.
"""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers() | [
"def",
"do_HEAD",
"(",
"self",
")",
":",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"'Content-type'",
",",
"'text/html'",
")",
"self",
".",
"end_headers",
"(",
")"
] | [
179,
4
] | [
185,
26
] | python | en | ['en', 'error', 'th'] | False |
MyHandler.do_POST | (self) |
Handle POST request, especially replying to a chat message.
|
Handle POST request, especially replying to a chat message.
| def do_POST(self):
"""
Handle POST request, especially replying to a chat message.
"""
if self.path == '/interact':
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
model_response = self._interactive_running(
SHARED.get('opt'), body.decode('utf-8')
)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
json_str = json.dumps(model_response)
self.wfile.write(bytes(json_str, 'utf-8'))
elif self.path == '/reset':
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
SHARED['agent'].reset()
self.wfile.write(bytes("{}", 'utf-8'))
else:
return self._respond({'status': 500}) | [
"def",
"do_POST",
"(",
"self",
")",
":",
"if",
"self",
".",
"path",
"==",
"'/interact'",
":",
"content_length",
"=",
"int",
"(",
"self",
".",
"headers",
"[",
"'Content-Length'",
"]",
")",
"body",
"=",
"self",
".",
"rfile",
".",
"read",
"(",
"content_length",
")",
"model_response",
"=",
"self",
".",
"_interactive_running",
"(",
"SHARED",
".",
"get",
"(",
"'opt'",
")",
",",
"body",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"'Content-type'",
",",
"'application/json'",
")",
"self",
".",
"end_headers",
"(",
")",
"json_str",
"=",
"json",
".",
"dumps",
"(",
"model_response",
")",
"self",
".",
"wfile",
".",
"write",
"(",
"bytes",
"(",
"json_str",
",",
"'utf-8'",
")",
")",
"elif",
"self",
".",
"path",
"==",
"'/reset'",
":",
"self",
".",
"send_response",
"(",
"200",
")",
"self",
".",
"send_header",
"(",
"'Content-type'",
",",
"'application/json'",
")",
"self",
".",
"end_headers",
"(",
")",
"SHARED",
"[",
"'agent'",
"]",
".",
"reset",
"(",
")",
"self",
".",
"wfile",
".",
"write",
"(",
"bytes",
"(",
"\"{}\"",
",",
"'utf-8'",
")",
")",
"else",
":",
"return",
"self",
".",
"_respond",
"(",
"{",
"'status'",
":",
"500",
"}",
")"
] | [
187,
4
] | [
210,
49
] | python | en | ['en', 'error', 'th'] | False |
MyHandler.do_GET | (self) |
Respond to GET request, especially the initial load.
|
Respond to GET request, especially the initial load.
| def do_GET(self):
"""
Respond to GET request, especially the initial load.
"""
paths = {
'/': {'status': 200},
'/favicon.ico': {'status': 202}, # Need for chrome
}
if self.path in paths:
self._respond(paths[self.path])
else:
self._respond({'status': 500}) | [
"def",
"do_GET",
"(",
"self",
")",
":",
"paths",
"=",
"{",
"'/'",
":",
"{",
"'status'",
":",
"200",
"}",
",",
"'/favicon.ico'",
":",
"{",
"'status'",
":",
"202",
"}",
",",
"# Need for chrome",
"}",
"if",
"self",
".",
"path",
"in",
"paths",
":",
"self",
".",
"_respond",
"(",
"paths",
"[",
"self",
".",
"path",
"]",
")",
"else",
":",
"self",
".",
"_respond",
"(",
"{",
"'status'",
":",
"500",
"}",
")"
] | [
212,
4
] | [
223,
42
] | python | en | ['en', 'error', 'th'] | False |
ModelTester.cloud_segmentation_test | (self, net, test_loader, config, num_votes=30, debug=False) |
Test method for cloud segmentation models
|
Test method for cloud segmentation models
| def cloud_segmentation_test(self, net, test_loader, config, num_votes=30, debug=False):
"""
Test method for cloud segmentation models
"""
############
# Initialize
############
# Choose test smoothing parameter (0 for no smothing, 0.99 for big smoothing)
test_smooth = 0.95
test_radius_ratio = 0.7
softmax = torch.nn.Softmax(1)
# Number of classes including ignored labels
nc_tot = test_loader.dataset.num_classes
# Number of classes predicted by the model
nc_model = config.num_classes
print("Expected class #'s ", nc_tot, nc_model)
print(test_loader.dataset.input_labels)
# Initiate global prediction over test clouds
self.test_probs = [np.zeros((l.shape[0], nc_model)) for l in test_loader.dataset.input_labels]
# Test saving path
if config.saving:
test_path = join('test', config.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
if not exists(join(test_path, 'predictions')):
makedirs(join(test_path, 'predictions'))
if not exists(join(test_path, 'probs')):
makedirs(join(test_path, 'probs'))
if not exists(join(test_path, 'potentials')):
makedirs(join(test_path, 'potentials'))
else:
test_path = None
# If on validation directly compute score
if test_loader.dataset.set == 'validation':
val_proportions = np.zeros(nc_model, dtype=np.float32)
i = 0
for label_value in test_loader.dataset.label_values:
if label_value not in test_loader.dataset.ignored_labels:
val_proportions[i] = np.sum([np.sum(labels == label_value)
for labels in test_loader.dataset.validation_labels])
i += 1
else:
val_proportions = None
#####################
# Network predictions
#####################
test_epoch = 0
last_min = -0.5
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
# Start test loop
while True:
print('Initialize workers')
for i, batch in enumerate(test_loader):
# New time
t = t[-1:]
t += [time.time()]
if i == 0:
print('Done in {:.1f}s'.format(t[1] - t[0]))
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
t += [time.time()]
# Get probs and labels
stacked_probs = softmax(outputs).cpu().detach().numpy()
s_points = batch.points[0].cpu().numpy()
lengths = batch.lengths[0].cpu().numpy()
in_inds = batch.input_inds.cpu().numpy()
cloud_inds = batch.cloud_inds.cpu().numpy()
if 'cuda' in self.device.type:
torch.cuda.synchronize(self.device)
# Get predictions and labels per instance
# ***************************************
i0 = 0
for b_i, length in enumerate(lengths):
# Get prediction
points = s_points[i0:i0 + length]
probs = stacked_probs[i0:i0 + length]
inds = in_inds[i0:i0 + length]
c_i = cloud_inds[b_i]
if 0 < test_radius_ratio < 1:
mask = np.sum(points ** 2, axis=1) < (test_radius_ratio * config.in_radius) ** 2
inds = inds[mask]
probs = probs[mask]
# Update current probs in whole cloud
self.test_probs[c_i][inds] = test_smooth * self.test_probs[c_i][inds] + (1 - test_smooth) * probs
i0 += length
# Average timing
t += [time.time()]
if i < 2:
mean_dt = np.array(t[1:]) - np.array(t[:-1])
else:
mean_dt = 0.9 * mean_dt + 0.1 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})'
print(message.format(test_epoch, i,
100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
1000 * (mean_dt[2])))
# Update minimum od potentials
new_min = torch.min(test_loader.dataset.min_potentials)
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min))
#print([np.mean(pots) for pots in test_loader.dataset.potentials])
# Save predicted cloud
if last_min + 1 < new_min:
# Update last_min
last_min += 1
# Show vote results (On subcloud so it is not the good values here)
if test_loader.dataset.set == 'validation':
print('\nConfusion on sub clouds')
Confs = []
for i, file_path in enumerate(test_loader.dataset.files):
# Insert false columns for ignored labels
probs = np.array(self.test_probs[i], copy=True)
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
if label_value in test_loader.dataset.ignored_labels:
probs = np.insert(probs, l_ind, 0, axis=1)
print(probs.shape)
print(probs[0])
print(np.argmax(probs,axis=1)[0])
print(test_loader.dataset.label_values)
# Predicted labels
preds = test_loader.dataset.label_values[np.argmax(probs, axis=1)].astype(np.int32)
# Targets
targets = test_loader.dataset.input_labels[i].astype(np.int32)
print(np.unique(preds))
print(preds[0])
print(np.unique(targets))
print(targets)
# Confs
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)]
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0).astype(np.float32)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
if label_value in test_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
# Rescale with the right number of point per class
C *= np.expand_dims(val_proportions / (np.sum(C, axis=1) + 1e-6), 1)
# Compute IoUs
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print(s + '\n')
# Save real IoU once in a while
if int(np.ceil(new_min)) % 10 == 0:
# Project predictions
print('\nReproject Vote #{:d}'.format(int(np.floor(new_min))))
t1 = time.time()
proj_probs = []
for i, file_path in enumerate(test_loader.dataset.files):
print(i, file_path, test_loader.dataset.test_proj[i].shape, self.test_probs[i].shape)
print(test_loader.dataset.test_proj[i].dtype, np.max(test_loader.dataset.test_proj[i]))
print(test_loader.dataset.test_proj[i][:5])
# Reproject probs on the evaluations points
probs = self.test_probs[i][test_loader.dataset.test_proj[i], :]
proj_probs += [probs]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Show vote results
if test_loader.dataset.set == 'validation':
print('Confusion on full clouds')
t1 = time.time()
Confs = []
for i, file_path in enumerate(test_loader.dataset.files):
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
if label_value in test_loader.dataset.ignored_labels:
proj_probs[i] = np.insert(proj_probs[i], l_ind, 0, axis=1)
# Get the predicted labels
preds = test_loader.dataset.label_values[np.argmax(proj_probs[i], axis=1)].astype(np.int32)
# Confusion
targets = test_loader.dataset.validation_labels[i].astype(np.int32)
Confs += [fast_confusion(targets, preds, test_loader.dataset.label_values)]
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
# Regroup confusions
C = np.sum(np.stack(Confs), axis=0)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
if label_value in test_loader.dataset.ignored_labels:
C = np.delete(C, l_ind, axis=0)
C = np.delete(C, l_ind, axis=1)
IoUs = IoU_from_confusions(C)
mIoU = np.mean(IoUs)
s = '{:5.2f} | '.format(100 * mIoU)
for IoU in IoUs:
s += '{:5.2f} '.format(100 * IoU)
print('-' * len(s))
print(s)
print('-' * len(s) + '\n')
# Save predictions
print('Saving clouds')
t1 = time.time()
for i, file_path in enumerate(test_loader.dataset.files):
# Get file
points = test_loader.dataset.load_evaluation_points(file_path)
# Get the predicted labels
# valid_labels = np.array([label for label in test_loader.dataset.label_values if label not in test_loader.dataset.ignored_labels])
preds = test_loader.dataset.valid_labels[np.argmax(proj_probs[i], axis=1)].astype(np.int32)
# Save plys
cloud_name = file_path.split('/')[-1]
test_name = join(test_path, 'predictions', cloud_name)
# write_ply(test_name,
# [points, preds],
# ['x', 'y', 'z', 'preds'])
dimnames = 'X,Y,Z,Classification'
dimformats = 'f8,f8,f8,u1'
foo = np.core.records.fromarrays(np.vstack((points.T,preds.T)),names=dimnames,formats=dimformats)
write_las(test_name, foo)
# test_name2 = join(test_path, 'probs', cloud_name)
# prob_names = ['_'.join(test_loader.dataset.label_to_names[label].split())
# for label in test_loader.dataset.label_values]
# write_ply(test_name2,
# [points, proj_probs[i]],
# ['x', 'y', 'z'] + prob_names)
# Save potentials
pot_points = np.array(test_loader.dataset.pot_trees[i].data, copy=False)
pot_name = join(test_path, 'potentials', cloud_name)
pots = test_loader.dataset.potentials[i].numpy().astype(np.float32)
dimnames = 'X,Y,Z,Potentials'
dimformats = 'f8,f8,f8,f8'
foo = np.core.records.fromarrays(np.vstack((pot_points.T,pots.T)),names=dimnames,formats=dimformats)
write_las(pot_name, foo)
# write_ply(pot_name,
# [pot_points.astype(np.float32), pots],
# ['x', 'y', 'z', 'pots'])
# Save ascii preds
if test_loader.dataset.set == 'test':
if test_loader.dataset.name.startswith('Semantic3D'):
ascii_name = join(test_path, 'predictions', test_loader.dataset.ascii_files[cloud_name])
else:
ascii_name = join(test_path, 'predictions', cloud_name[:-4] + '.txt')
np.savetxt(ascii_name, preds, fmt='%d')
t2 = time.time()
print('Done in {:.1f} s\n'.format(t2 - t1))
test_epoch += 1
# Break when reaching number of desired votes
if last_min > num_votes:
break
return | [
"def",
"cloud_segmentation_test",
"(",
"self",
",",
"net",
",",
"test_loader",
",",
"config",
",",
"num_votes",
"=",
"30",
",",
"debug",
"=",
"False",
")",
":",
"############",
"# Initialize",
"############",
"# Choose test smoothing parameter (0 for no smothing, 0.99 for big smoothing)",
"test_smooth",
"=",
"0.95",
"test_radius_ratio",
"=",
"0.7",
"softmax",
"=",
"torch",
".",
"nn",
".",
"Softmax",
"(",
"1",
")",
"# Number of classes including ignored labels",
"nc_tot",
"=",
"test_loader",
".",
"dataset",
".",
"num_classes",
"# Number of classes predicted by the model",
"nc_model",
"=",
"config",
".",
"num_classes",
"print",
"(",
"\"Expected class #'s \"",
",",
"nc_tot",
",",
"nc_model",
")",
"print",
"(",
"test_loader",
".",
"dataset",
".",
"input_labels",
")",
"# Initiate global prediction over test clouds",
"self",
".",
"test_probs",
"=",
"[",
"np",
".",
"zeros",
"(",
"(",
"l",
".",
"shape",
"[",
"0",
"]",
",",
"nc_model",
")",
")",
"for",
"l",
"in",
"test_loader",
".",
"dataset",
".",
"input_labels",
"]",
"# Test saving path",
"if",
"config",
".",
"saving",
":",
"test_path",
"=",
"join",
"(",
"'test'",
",",
"config",
".",
"saving_path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"if",
"not",
"exists",
"(",
"test_path",
")",
":",
"makedirs",
"(",
"test_path",
")",
"if",
"not",
"exists",
"(",
"join",
"(",
"test_path",
",",
"'predictions'",
")",
")",
":",
"makedirs",
"(",
"join",
"(",
"test_path",
",",
"'predictions'",
")",
")",
"if",
"not",
"exists",
"(",
"join",
"(",
"test_path",
",",
"'probs'",
")",
")",
":",
"makedirs",
"(",
"join",
"(",
"test_path",
",",
"'probs'",
")",
")",
"if",
"not",
"exists",
"(",
"join",
"(",
"test_path",
",",
"'potentials'",
")",
")",
":",
"makedirs",
"(",
"join",
"(",
"test_path",
",",
"'potentials'",
")",
")",
"else",
":",
"test_path",
"=",
"None",
"# If on validation directly compute score",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"val_proportions",
"=",
"np",
".",
"zeros",
"(",
"nc_model",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"i",
"=",
"0",
"for",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"label_values",
":",
"if",
"label_value",
"not",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"val_proportions",
"[",
"i",
"]",
"=",
"np",
".",
"sum",
"(",
"[",
"np",
".",
"sum",
"(",
"labels",
"==",
"label_value",
")",
"for",
"labels",
"in",
"test_loader",
".",
"dataset",
".",
"validation_labels",
"]",
")",
"i",
"+=",
"1",
"else",
":",
"val_proportions",
"=",
"None",
"#####################",
"# Network predictions",
"#####################",
"test_epoch",
"=",
"0",
"last_min",
"=",
"-",
"0.5",
"t",
"=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"last_display",
"=",
"time",
".",
"time",
"(",
")",
"mean_dt",
"=",
"np",
".",
"zeros",
"(",
"1",
")",
"# Start test loop",
"while",
"True",
":",
"print",
"(",
"'Initialize workers'",
")",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"test_loader",
")",
":",
"# New time",
"t",
"=",
"t",
"[",
"-",
"1",
":",
"]",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"if",
"i",
"==",
"0",
":",
"print",
"(",
"'Done in {:.1f}s'",
".",
"format",
"(",
"t",
"[",
"1",
"]",
"-",
"t",
"[",
"0",
"]",
")",
")",
"if",
"'cuda'",
"in",
"self",
".",
"device",
".",
"type",
":",
"batch",
".",
"to",
"(",
"self",
".",
"device",
")",
"# Forward pass",
"outputs",
"=",
"net",
"(",
"batch",
",",
"config",
")",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"# Get probs and labels",
"stacked_probs",
"=",
"softmax",
"(",
"outputs",
")",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
"s_points",
"=",
"batch",
".",
"points",
"[",
"0",
"]",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"lengths",
"=",
"batch",
".",
"lengths",
"[",
"0",
"]",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"in_inds",
"=",
"batch",
".",
"input_inds",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"cloud_inds",
"=",
"batch",
".",
"cloud_inds",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"if",
"'cuda'",
"in",
"self",
".",
"device",
".",
"type",
":",
"torch",
".",
"cuda",
".",
"synchronize",
"(",
"self",
".",
"device",
")",
"# Get predictions and labels per instance",
"# ***************************************",
"i0",
"=",
"0",
"for",
"b_i",
",",
"length",
"in",
"enumerate",
"(",
"lengths",
")",
":",
"# Get prediction",
"points",
"=",
"s_points",
"[",
"i0",
":",
"i0",
"+",
"length",
"]",
"probs",
"=",
"stacked_probs",
"[",
"i0",
":",
"i0",
"+",
"length",
"]",
"inds",
"=",
"in_inds",
"[",
"i0",
":",
"i0",
"+",
"length",
"]",
"c_i",
"=",
"cloud_inds",
"[",
"b_i",
"]",
"if",
"0",
"<",
"test_radius_ratio",
"<",
"1",
":",
"mask",
"=",
"np",
".",
"sum",
"(",
"points",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"<",
"(",
"test_radius_ratio",
"*",
"config",
".",
"in_radius",
")",
"**",
"2",
"inds",
"=",
"inds",
"[",
"mask",
"]",
"probs",
"=",
"probs",
"[",
"mask",
"]",
"# Update current probs in whole cloud",
"self",
".",
"test_probs",
"[",
"c_i",
"]",
"[",
"inds",
"]",
"=",
"test_smooth",
"*",
"self",
".",
"test_probs",
"[",
"c_i",
"]",
"[",
"inds",
"]",
"+",
"(",
"1",
"-",
"test_smooth",
")",
"*",
"probs",
"i0",
"+=",
"length",
"# Average timing",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"if",
"i",
"<",
"2",
":",
"mean_dt",
"=",
"np",
".",
"array",
"(",
"t",
"[",
"1",
":",
"]",
")",
"-",
"np",
".",
"array",
"(",
"t",
"[",
":",
"-",
"1",
"]",
")",
"else",
":",
"mean_dt",
"=",
"0.9",
"*",
"mean_dt",
"+",
"0.1",
"*",
"(",
"np",
".",
"array",
"(",
"t",
"[",
"1",
":",
"]",
")",
"-",
"np",
".",
"array",
"(",
"t",
"[",
":",
"-",
"1",
"]",
")",
")",
"# Display",
"if",
"(",
"t",
"[",
"-",
"1",
"]",
"-",
"last_display",
")",
">",
"1.0",
":",
"last_display",
"=",
"t",
"[",
"-",
"1",
"]",
"message",
"=",
"'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f})'",
"print",
"(",
"message",
".",
"format",
"(",
"test_epoch",
",",
"i",
",",
"100",
"*",
"i",
"/",
"config",
".",
"validation_size",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"0",
"]",
")",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"1",
"]",
")",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"2",
"]",
")",
")",
")",
"# Update minimum od potentials",
"new_min",
"=",
"torch",
".",
"min",
"(",
"test_loader",
".",
"dataset",
".",
"min_potentials",
")",
"print",
"(",
"'Test epoch {:d}, end. Min potential = {:.1f}'",
".",
"format",
"(",
"test_epoch",
",",
"new_min",
")",
")",
"#print([np.mean(pots) for pots in test_loader.dataset.potentials])",
"# Save predicted cloud",
"if",
"last_min",
"+",
"1",
"<",
"new_min",
":",
"# Update last_min",
"last_min",
"+=",
"1",
"# Show vote results (On subcloud so it is not the good values here)",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"print",
"(",
"'\\nConfusion on sub clouds'",
")",
"Confs",
"=",
"[",
"]",
"for",
"i",
",",
"file_path",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"files",
")",
":",
"# Insert false columns for ignored labels",
"probs",
"=",
"np",
".",
"array",
"(",
"self",
".",
"test_probs",
"[",
"i",
"]",
",",
"copy",
"=",
"True",
")",
"for",
"l_ind",
",",
"label_value",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"probs",
"=",
"np",
".",
"insert",
"(",
"probs",
",",
"l_ind",
",",
"0",
",",
"axis",
"=",
"1",
")",
"print",
"(",
"probs",
".",
"shape",
")",
"print",
"(",
"probs",
"[",
"0",
"]",
")",
"print",
"(",
"np",
".",
"argmax",
"(",
"probs",
",",
"axis",
"=",
"1",
")",
"[",
"0",
"]",
")",
"print",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
"# Predicted labels",
"preds",
"=",
"test_loader",
".",
"dataset",
".",
"label_values",
"[",
"np",
".",
"argmax",
"(",
"probs",
",",
"axis",
"=",
"1",
")",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Targets",
"targets",
"=",
"test_loader",
".",
"dataset",
".",
"input_labels",
"[",
"i",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"print",
"(",
"np",
".",
"unique",
"(",
"preds",
")",
")",
"print",
"(",
"preds",
"[",
"0",
"]",
")",
"print",
"(",
"np",
".",
"unique",
"(",
"targets",
")",
")",
"print",
"(",
"targets",
")",
"# Confs",
"Confs",
"+=",
"[",
"fast_confusion",
"(",
"targets",
",",
"preds",
",",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
"]",
"# Regroup confusions",
"C",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"stack",
"(",
"Confs",
")",
",",
"axis",
"=",
"0",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"# Remove ignored labels from confusions",
"for",
"l_ind",
",",
"label_value",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
")",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"C",
"=",
"np",
".",
"delete",
"(",
"C",
",",
"l_ind",
",",
"axis",
"=",
"0",
")",
"C",
"=",
"np",
".",
"delete",
"(",
"C",
",",
"l_ind",
",",
"axis",
"=",
"1",
")",
"# Rescale with the right number of point per class",
"C",
"*=",
"np",
".",
"expand_dims",
"(",
"val_proportions",
"/",
"(",
"np",
".",
"sum",
"(",
"C",
",",
"axis",
"=",
"1",
")",
"+",
"1e-6",
")",
",",
"1",
")",
"# Compute IoUs",
"IoUs",
"=",
"IoU_from_confusions",
"(",
"C",
")",
"mIoU",
"=",
"np",
".",
"mean",
"(",
"IoUs",
")",
"s",
"=",
"'{:5.2f} | '",
".",
"format",
"(",
"100",
"*",
"mIoU",
")",
"for",
"IoU",
"in",
"IoUs",
":",
"s",
"+=",
"'{:5.2f} '",
".",
"format",
"(",
"100",
"*",
"IoU",
")",
"print",
"(",
"s",
"+",
"'\\n'",
")",
"# Save real IoU once in a while",
"if",
"int",
"(",
"np",
".",
"ceil",
"(",
"new_min",
")",
")",
"%",
"10",
"==",
"0",
":",
"# Project predictions",
"print",
"(",
"'\\nReproject Vote #{:d}'",
".",
"format",
"(",
"int",
"(",
"np",
".",
"floor",
"(",
"new_min",
")",
")",
")",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"proj_probs",
"=",
"[",
"]",
"for",
"i",
",",
"file_path",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"files",
")",
":",
"print",
"(",
"i",
",",
"file_path",
",",
"test_loader",
".",
"dataset",
".",
"test_proj",
"[",
"i",
"]",
".",
"shape",
",",
"self",
".",
"test_probs",
"[",
"i",
"]",
".",
"shape",
")",
"print",
"(",
"test_loader",
".",
"dataset",
".",
"test_proj",
"[",
"i",
"]",
".",
"dtype",
",",
"np",
".",
"max",
"(",
"test_loader",
".",
"dataset",
".",
"test_proj",
"[",
"i",
"]",
")",
")",
"print",
"(",
"test_loader",
".",
"dataset",
".",
"test_proj",
"[",
"i",
"]",
"[",
":",
"5",
"]",
")",
"# Reproject probs on the evaluations points",
"probs",
"=",
"self",
".",
"test_probs",
"[",
"i",
"]",
"[",
"test_loader",
".",
"dataset",
".",
"test_proj",
"[",
"i",
"]",
",",
":",
"]",
"proj_probs",
"+=",
"[",
"probs",
"]",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'Done in {:.1f} s\\n'",
".",
"format",
"(",
"t2",
"-",
"t1",
")",
")",
"# Show vote results",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"print",
"(",
"'Confusion on full clouds'",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"Confs",
"=",
"[",
"]",
"for",
"i",
",",
"file_path",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"files",
")",
":",
"# Insert false columns for ignored labels",
"for",
"l_ind",
",",
"label_value",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"proj_probs",
"[",
"i",
"]",
"=",
"np",
".",
"insert",
"(",
"proj_probs",
"[",
"i",
"]",
",",
"l_ind",
",",
"0",
",",
"axis",
"=",
"1",
")",
"# Get the predicted labels",
"preds",
"=",
"test_loader",
".",
"dataset",
".",
"label_values",
"[",
"np",
".",
"argmax",
"(",
"proj_probs",
"[",
"i",
"]",
",",
"axis",
"=",
"1",
")",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Confusion",
"targets",
"=",
"test_loader",
".",
"dataset",
".",
"validation_labels",
"[",
"i",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"Confs",
"+=",
"[",
"fast_confusion",
"(",
"targets",
",",
"preds",
",",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
"]",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'Done in {:.1f} s\\n'",
".",
"format",
"(",
"t2",
"-",
"t1",
")",
")",
"# Regroup confusions",
"C",
"=",
"np",
".",
"sum",
"(",
"np",
".",
"stack",
"(",
"Confs",
")",
",",
"axis",
"=",
"0",
")",
"# Remove ignored labels from confusions",
"for",
"l_ind",
",",
"label_value",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
")",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"C",
"=",
"np",
".",
"delete",
"(",
"C",
",",
"l_ind",
",",
"axis",
"=",
"0",
")",
"C",
"=",
"np",
".",
"delete",
"(",
"C",
",",
"l_ind",
",",
"axis",
"=",
"1",
")",
"IoUs",
"=",
"IoU_from_confusions",
"(",
"C",
")",
"mIoU",
"=",
"np",
".",
"mean",
"(",
"IoUs",
")",
"s",
"=",
"'{:5.2f} | '",
".",
"format",
"(",
"100",
"*",
"mIoU",
")",
"for",
"IoU",
"in",
"IoUs",
":",
"s",
"+=",
"'{:5.2f} '",
".",
"format",
"(",
"100",
"*",
"IoU",
")",
"print",
"(",
"'-'",
"*",
"len",
"(",
"s",
")",
")",
"print",
"(",
"s",
")",
"print",
"(",
"'-'",
"*",
"len",
"(",
"s",
")",
"+",
"'\\n'",
")",
"# Save predictions",
"print",
"(",
"'Saving clouds'",
")",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"for",
"i",
",",
"file_path",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"files",
")",
":",
"# Get file",
"points",
"=",
"test_loader",
".",
"dataset",
".",
"load_evaluation_points",
"(",
"file_path",
")",
"# Get the predicted labels",
"# valid_labels = np.array([label for label in test_loader.dataset.label_values if label not in test_loader.dataset.ignored_labels])",
"preds",
"=",
"test_loader",
".",
"dataset",
".",
"valid_labels",
"[",
"np",
".",
"argmax",
"(",
"proj_probs",
"[",
"i",
"]",
",",
"axis",
"=",
"1",
")",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Save plys",
"cloud_name",
"=",
"file_path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"test_name",
"=",
"join",
"(",
"test_path",
",",
"'predictions'",
",",
"cloud_name",
")",
"# write_ply(test_name,",
"# [points, preds],",
"# ['x', 'y', 'z', 'preds'])",
"dimnames",
"=",
"'X,Y,Z,Classification'",
"dimformats",
"=",
"'f8,f8,f8,u1'",
"foo",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromarrays",
"(",
"np",
".",
"vstack",
"(",
"(",
"points",
".",
"T",
",",
"preds",
".",
"T",
")",
")",
",",
"names",
"=",
"dimnames",
",",
"formats",
"=",
"dimformats",
")",
"write_las",
"(",
"test_name",
",",
"foo",
")",
"# test_name2 = join(test_path, 'probs', cloud_name)",
"# prob_names = ['_'.join(test_loader.dataset.label_to_names[label].split())",
"# for label in test_loader.dataset.label_values]",
"# write_ply(test_name2,",
"# [points, proj_probs[i]],",
"# ['x', 'y', 'z'] + prob_names)",
"# Save potentials",
"pot_points",
"=",
"np",
".",
"array",
"(",
"test_loader",
".",
"dataset",
".",
"pot_trees",
"[",
"i",
"]",
".",
"data",
",",
"copy",
"=",
"False",
")",
"pot_name",
"=",
"join",
"(",
"test_path",
",",
"'potentials'",
",",
"cloud_name",
")",
"pots",
"=",
"test_loader",
".",
"dataset",
".",
"potentials",
"[",
"i",
"]",
".",
"numpy",
"(",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"dimnames",
"=",
"'X,Y,Z,Potentials'",
"dimformats",
"=",
"'f8,f8,f8,f8'",
"foo",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromarrays",
"(",
"np",
".",
"vstack",
"(",
"(",
"pot_points",
".",
"T",
",",
"pots",
".",
"T",
")",
")",
",",
"names",
"=",
"dimnames",
",",
"formats",
"=",
"dimformats",
")",
"write_las",
"(",
"pot_name",
",",
"foo",
")",
"# write_ply(pot_name,",
"# [pot_points.astype(np.float32), pots],",
"# ['x', 'y', 'z', 'pots'])",
"# Save ascii preds",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'test'",
":",
"if",
"test_loader",
".",
"dataset",
".",
"name",
".",
"startswith",
"(",
"'Semantic3D'",
")",
":",
"ascii_name",
"=",
"join",
"(",
"test_path",
",",
"'predictions'",
",",
"test_loader",
".",
"dataset",
".",
"ascii_files",
"[",
"cloud_name",
"]",
")",
"else",
":",
"ascii_name",
"=",
"join",
"(",
"test_path",
",",
"'predictions'",
",",
"cloud_name",
"[",
":",
"-",
"4",
"]",
"+",
"'.txt'",
")",
"np",
".",
"savetxt",
"(",
"ascii_name",
",",
"preds",
",",
"fmt",
"=",
"'%d'",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"'Done in {:.1f} s\\n'",
".",
"format",
"(",
"t2",
"-",
"t1",
")",
")",
"test_epoch",
"+=",
"1",
"# Break when reaching number of desired votes",
"if",
"last_min",
">",
"num_votes",
":",
"break",
"return"
] | [
179,
4
] | [
489,
14
] | python | en | ['en', 'error', 'th'] | False |
ModelTester.slam_segmentation_test | (self, net, test_loader, config, num_votes=100, debug=True) |
Test method for slam segmentation models
|
Test method for slam segmentation models
| def slam_segmentation_test(self, net, test_loader, config, num_votes=100, debug=True):
"""
Test method for slam segmentation models
"""
############
# Initialize
############
# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)
test_smooth = 0.5
last_min = -0.5
softmax = torch.nn.Softmax(1)
# Number of classes including ignored labels
nc_tot = test_loader.dataset.num_classes
nc_model = net.C
# Test saving path
test_path = None
report_path = None
if config.saving:
test_path = join('test', config.saving_path.split('/')[-1])
if not exists(test_path):
makedirs(test_path)
report_path = join(test_path, 'reports')
if not exists(report_path):
makedirs(report_path)
if test_loader.dataset.set == 'validation':
for folder in ['val_predictions', 'val_probs']:
if not exists(join(test_path, folder)):
makedirs(join(test_path, folder))
else:
for folder in ['predictions', 'probs']:
if not exists(join(test_path, folder)):
makedirs(join(test_path, folder))
# Init validation container
all_f_preds = []
all_f_labels = []
if test_loader.dataset.set == 'validation':
for i, seq_frames in enumerate(test_loader.dataset.frames):
all_f_preds.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])
all_f_labels.append([np.zeros((0,), dtype=np.int32) for _ in seq_frames])
#####################
# Network predictions
#####################
predictions = []
targets = []
test_epoch = 0
t = [time.time()]
last_display = time.time()
mean_dt = np.zeros(1)
# Start test loop
while True:
print('Initialize workers')
for i, batch in enumerate(test_loader):
# New time
t = t[-1:]
t += [time.time()]
if i == 0:
print('Done in {:.1f}s'.format(t[1] - t[0]))
if 'cuda' in self.device.type:
batch.to(self.device)
# Forward pass
outputs = net(batch, config)
# Get probs and labels
stk_probs = softmax(outputs).cpu().detach().numpy()
lengths = batch.lengths[0].cpu().numpy()
f_inds = batch.frame_inds.cpu().numpy()
r_inds_list = batch.reproj_inds
r_mask_list = batch.reproj_masks
labels_list = batch.val_labels
if 'cuda' in self.device.type:
torch.cuda.synchronize(self.device)
t += [time.time()]
# Get predictions and labels per instance
# ***************************************
i0 = 0
for b_i, length in enumerate(lengths):
# Get prediction
probs = stk_probs[i0:i0 + length]
proj_inds = r_inds_list[b_i]
proj_mask = r_mask_list[b_i]
frame_labels = labels_list[b_i]
s_ind = f_inds[b_i, 0]
f_ind = f_inds[b_i, 1]
# Project predictions on the frame points
proj_probs = probs[proj_inds]
# Safe check if only one point:
if proj_probs.ndim < 2:
proj_probs = np.expand_dims(proj_probs, 0)
# Save probs in a binary file (uint8 format for lighter weight)
seq_name = test_loader.dataset.sequences[s_ind]
if test_loader.dataset.set == 'validation':
folder = 'val_probs'
pred_folder = 'val_predictions'
else:
folder = 'probs'
pred_folder = 'predictions'
filename = '{:s}_{:07d}.npy'.format(seq_name, f_ind)
filepath = join(test_path, folder, filename)
if exists(filepath):
frame_probs_uint8 = np.load(filepath)
else:
frame_probs_uint8 = np.zeros((proj_mask.shape[0], nc_model), dtype=np.uint8)
frame_probs = frame_probs_uint8[proj_mask, :].astype(np.float32) / 255
frame_probs = test_smooth * frame_probs + (1 - test_smooth) * proj_probs
frame_probs_uint8[proj_mask, :] = (frame_probs * 255).astype(np.uint8)
np.save(filepath, frame_probs_uint8)
# Save some prediction in ply format for visual
if test_loader.dataset.set == 'validation':
# Insert false columns for ignored labels
frame_probs_uint8_bis = frame_probs_uint8.copy()
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
if label_value in test_loader.dataset.ignored_labels:
frame_probs_uint8_bis = np.insert(frame_probs_uint8_bis, l_ind, 0, axis=1)
# Predicted labels
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8_bis,
axis=1)].astype(np.int32)
# Save some of the frame pots
if f_ind % 20 == 0:
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')
frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4))
predpath = join(test_path, pred_folder, filename[:-4] + '.ply')
#pots = test_loader.dataset.f_potentials[s_ind][f_ind]
pots = np.zeros((0,))
if pots.shape[0] > 0:
write_ply(predpath,
[frame_points[:, :3], frame_labels, frame_preds, pots],
['x', 'y', 'z', 'gt', 'pre', 'pots'])
else:
write_ply(predpath,
[frame_points[:, :3], frame_labels, frame_preds],
['x', 'y', 'z', 'gt', 'pre'])
# Also Save lbl probabilities
probpath = join(test_path, folder, filename[:-4] + '_probs.ply')
lbl_names = [test_loader.dataset.label_to_names[l]
for l in test_loader.dataset.label_values
if l not in test_loader.dataset.ignored_labels]
write_ply(probpath,
[frame_points[:, :3], frame_probs_uint8],
['x', 'y', 'z'] + lbl_names)
# keep frame preds in memory
all_f_preds[s_ind][f_ind] = frame_preds
all_f_labels[s_ind][f_ind] = frame_labels
else:
# Save some of the frame preds
if f_inds[b_i, 1] % 100 == 0:
# Insert false columns for ignored labels
for l_ind, label_value in enumerate(test_loader.dataset.label_values):
if label_value in test_loader.dataset.ignored_labels:
frame_probs_uint8 = np.insert(frame_probs_uint8, l_ind, 0, axis=1)
# Predicted labels
frame_preds = test_loader.dataset.label_values[np.argmax(frame_probs_uint8,
axis=1)].astype(np.int32)
# Load points
seq_path = join(test_loader.dataset.path, 'sequences', test_loader.dataset.sequences[s_ind])
velo_file = join(seq_path, 'velodyne', test_loader.dataset.frames[s_ind][f_ind] + '.bin')
frame_points = np.fromfile(velo_file, dtype=np.float32)
frame_points = frame_points.reshape((-1, 4))
predpath = join(test_path, pred_folder, filename[:-4] + '.ply')
#pots = test_loader.dataset.f_potentials[s_ind][f_ind]
pots = np.zeros((0,))
if pots.shape[0] > 0:
write_ply(predpath,
[frame_points[:, :3], frame_preds, pots],
['x', 'y', 'z', 'pre', 'pots'])
else:
write_ply(predpath,
[frame_points[:, :3], frame_preds],
['x', 'y', 'z', 'pre'])
# Stack all prediction for this epoch
i0 += length
# Average timing
t += [time.time()]
mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))
# Display
if (t[-1] - last_display) > 1.0:
last_display = t[-1]
message = 'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%'
min_pot = int(torch.floor(torch.min(test_loader.dataset.potentials)))
pot_num = torch.sum(test_loader.dataset.potentials > min_pot + 0.5).type(torch.int32).item()
current_num = pot_num + (i + 1 - config.validation_size) * config.val_batch_num
print(message.format(test_epoch, i,
100 * i / config.validation_size,
1000 * (mean_dt[0]),
1000 * (mean_dt[1]),
1000 * (mean_dt[2]),
min_pot,
100.0 * current_num / len(test_loader.dataset.potentials)))
# Update minimum od potentials
new_min = torch.min(test_loader.dataset.potentials)
print('Test epoch {:d}, end. Min potential = {:.1f}'.format(test_epoch, new_min))
if last_min + 1 < new_min:
# Update last_min
last_min += 1
if test_loader.dataset.set == 'validation' and last_min % 1 == 0:
#####################################
# Results on the whole validation set
#####################################
# Confusions for our subparts of validation set
Confs = np.zeros((len(predictions), nc_tot, nc_tot), dtype=np.int32)
for i, (preds, truth) in enumerate(zip(predictions, targets)):
# Confusions
Confs[i, :, :] = fast_confusion(truth, preds, test_loader.dataset.label_values).astype(np.int32)
# Show vote results
print('\nCompute confusion')
val_preds = []
val_labels = []
t1 = time.time()
for i, seq_frames in enumerate(test_loader.dataset.frames):
val_preds += [np.hstack(all_f_preds[i])]
val_labels += [np.hstack(all_f_labels[i])]
val_preds = np.hstack(val_preds)
val_labels = np.hstack(val_labels)
t2 = time.time()
C_tot = fast_confusion(val_labels, val_preds, test_loader.dataset.label_values)
t3 = time.time()
print(' Stacking time : {:.1f}s'.format(t2 - t1))
print('Confusion time : {:.1f}s'.format(t3 - t2))
s1 = '\n'
for cc in C_tot:
for c in cc:
s1 += '{:7.0f} '.format(c)
s1 += '\n'
if debug:
print(s1)
# Remove ignored labels from confusions
for l_ind, label_value in reversed(list(enumerate(test_loader.dataset.label_values))):
if label_value in test_loader.dataset.ignored_labels:
C_tot = np.delete(C_tot, l_ind, axis=0)
C_tot = np.delete(C_tot, l_ind, axis=1)
# Objects IoU
val_IoUs = IoU_from_confusions(C_tot)
# Compute IoUs
mIoU = np.mean(val_IoUs)
s2 = '{:5.2f} | '.format(100 * mIoU)
for IoU in val_IoUs:
s2 += '{:5.2f} '.format(100 * IoU)
print(s2 + '\n')
# Save a report
report_file = join(report_path, 'report_{:04d}.txt'.format(int(np.floor(last_min))))
str = 'Report of the confusion and metrics\n'
str += '***********************************\n\n\n'
str += 'Confusion matrix:\n\n'
str += s1
str += '\nIoU values:\n\n'
str += s2
str += '\n\n'
with open(report_file, 'w') as f:
f.write(str)
test_epoch += 1
# Break when reaching number of desired votes
if last_min > num_votes:
break
return | [
"def",
"slam_segmentation_test",
"(",
"self",
",",
"net",
",",
"test_loader",
",",
"config",
",",
"num_votes",
"=",
"100",
",",
"debug",
"=",
"True",
")",
":",
"############",
"# Initialize",
"############",
"# Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)",
"test_smooth",
"=",
"0.5",
"last_min",
"=",
"-",
"0.5",
"softmax",
"=",
"torch",
".",
"nn",
".",
"Softmax",
"(",
"1",
")",
"# Number of classes including ignored labels",
"nc_tot",
"=",
"test_loader",
".",
"dataset",
".",
"num_classes",
"nc_model",
"=",
"net",
".",
"C",
"# Test saving path",
"test_path",
"=",
"None",
"report_path",
"=",
"None",
"if",
"config",
".",
"saving",
":",
"test_path",
"=",
"join",
"(",
"'test'",
",",
"config",
".",
"saving_path",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
")",
"if",
"not",
"exists",
"(",
"test_path",
")",
":",
"makedirs",
"(",
"test_path",
")",
"report_path",
"=",
"join",
"(",
"test_path",
",",
"'reports'",
")",
"if",
"not",
"exists",
"(",
"report_path",
")",
":",
"makedirs",
"(",
"report_path",
")",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"for",
"folder",
"in",
"[",
"'val_predictions'",
",",
"'val_probs'",
"]",
":",
"if",
"not",
"exists",
"(",
"join",
"(",
"test_path",
",",
"folder",
")",
")",
":",
"makedirs",
"(",
"join",
"(",
"test_path",
",",
"folder",
")",
")",
"else",
":",
"for",
"folder",
"in",
"[",
"'predictions'",
",",
"'probs'",
"]",
":",
"if",
"not",
"exists",
"(",
"join",
"(",
"test_path",
",",
"folder",
")",
")",
":",
"makedirs",
"(",
"join",
"(",
"test_path",
",",
"folder",
")",
")",
"# Init validation container",
"all_f_preds",
"=",
"[",
"]",
"all_f_labels",
"=",
"[",
"]",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"for",
"i",
",",
"seq_frames",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"frames",
")",
":",
"all_f_preds",
".",
"append",
"(",
"[",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"_",
"in",
"seq_frames",
"]",
")",
"all_f_labels",
".",
"append",
"(",
"[",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"_",
"in",
"seq_frames",
"]",
")",
"#####################",
"# Network predictions",
"#####################",
"predictions",
"=",
"[",
"]",
"targets",
"=",
"[",
"]",
"test_epoch",
"=",
"0",
"t",
"=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"last_display",
"=",
"time",
".",
"time",
"(",
")",
"mean_dt",
"=",
"np",
".",
"zeros",
"(",
"1",
")",
"# Start test loop",
"while",
"True",
":",
"print",
"(",
"'Initialize workers'",
")",
"for",
"i",
",",
"batch",
"in",
"enumerate",
"(",
"test_loader",
")",
":",
"# New time",
"t",
"=",
"t",
"[",
"-",
"1",
":",
"]",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"if",
"i",
"==",
"0",
":",
"print",
"(",
"'Done in {:.1f}s'",
".",
"format",
"(",
"t",
"[",
"1",
"]",
"-",
"t",
"[",
"0",
"]",
")",
")",
"if",
"'cuda'",
"in",
"self",
".",
"device",
".",
"type",
":",
"batch",
".",
"to",
"(",
"self",
".",
"device",
")",
"# Forward pass",
"outputs",
"=",
"net",
"(",
"batch",
",",
"config",
")",
"# Get probs and labels",
"stk_probs",
"=",
"softmax",
"(",
"outputs",
")",
".",
"cpu",
"(",
")",
".",
"detach",
"(",
")",
".",
"numpy",
"(",
")",
"lengths",
"=",
"batch",
".",
"lengths",
"[",
"0",
"]",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"f_inds",
"=",
"batch",
".",
"frame_inds",
".",
"cpu",
"(",
")",
".",
"numpy",
"(",
")",
"r_inds_list",
"=",
"batch",
".",
"reproj_inds",
"r_mask_list",
"=",
"batch",
".",
"reproj_masks",
"labels_list",
"=",
"batch",
".",
"val_labels",
"if",
"'cuda'",
"in",
"self",
".",
"device",
".",
"type",
":",
"torch",
".",
"cuda",
".",
"synchronize",
"(",
"self",
".",
"device",
")",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"# Get predictions and labels per instance",
"# ***************************************",
"i0",
"=",
"0",
"for",
"b_i",
",",
"length",
"in",
"enumerate",
"(",
"lengths",
")",
":",
"# Get prediction",
"probs",
"=",
"stk_probs",
"[",
"i0",
":",
"i0",
"+",
"length",
"]",
"proj_inds",
"=",
"r_inds_list",
"[",
"b_i",
"]",
"proj_mask",
"=",
"r_mask_list",
"[",
"b_i",
"]",
"frame_labels",
"=",
"labels_list",
"[",
"b_i",
"]",
"s_ind",
"=",
"f_inds",
"[",
"b_i",
",",
"0",
"]",
"f_ind",
"=",
"f_inds",
"[",
"b_i",
",",
"1",
"]",
"# Project predictions on the frame points",
"proj_probs",
"=",
"probs",
"[",
"proj_inds",
"]",
"# Safe check if only one point:",
"if",
"proj_probs",
".",
"ndim",
"<",
"2",
":",
"proj_probs",
"=",
"np",
".",
"expand_dims",
"(",
"proj_probs",
",",
"0",
")",
"# Save probs in a binary file (uint8 format for lighter weight)",
"seq_name",
"=",
"test_loader",
".",
"dataset",
".",
"sequences",
"[",
"s_ind",
"]",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"folder",
"=",
"'val_probs'",
"pred_folder",
"=",
"'val_predictions'",
"else",
":",
"folder",
"=",
"'probs'",
"pred_folder",
"=",
"'predictions'",
"filename",
"=",
"'{:s}_{:07d}.npy'",
".",
"format",
"(",
"seq_name",
",",
"f_ind",
")",
"filepath",
"=",
"join",
"(",
"test_path",
",",
"folder",
",",
"filename",
")",
"if",
"exists",
"(",
"filepath",
")",
":",
"frame_probs_uint8",
"=",
"np",
".",
"load",
"(",
"filepath",
")",
"else",
":",
"frame_probs_uint8",
"=",
"np",
".",
"zeros",
"(",
"(",
"proj_mask",
".",
"shape",
"[",
"0",
"]",
",",
"nc_model",
")",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"frame_probs",
"=",
"frame_probs_uint8",
"[",
"proj_mask",
",",
":",
"]",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"/",
"255",
"frame_probs",
"=",
"test_smooth",
"*",
"frame_probs",
"+",
"(",
"1",
"-",
"test_smooth",
")",
"*",
"proj_probs",
"frame_probs_uint8",
"[",
"proj_mask",
",",
":",
"]",
"=",
"(",
"frame_probs",
"*",
"255",
")",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"np",
".",
"save",
"(",
"filepath",
",",
"frame_probs_uint8",
")",
"# Save some prediction in ply format for visual",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
":",
"# Insert false columns for ignored labels",
"frame_probs_uint8_bis",
"=",
"frame_probs_uint8",
".",
"copy",
"(",
")",
"for",
"l_ind",
",",
"label_value",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"frame_probs_uint8_bis",
"=",
"np",
".",
"insert",
"(",
"frame_probs_uint8_bis",
",",
"l_ind",
",",
"0",
",",
"axis",
"=",
"1",
")",
"# Predicted labels",
"frame_preds",
"=",
"test_loader",
".",
"dataset",
".",
"label_values",
"[",
"np",
".",
"argmax",
"(",
"frame_probs_uint8_bis",
",",
"axis",
"=",
"1",
")",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Save some of the frame pots",
"if",
"f_ind",
"%",
"20",
"==",
"0",
":",
"seq_path",
"=",
"join",
"(",
"test_loader",
".",
"dataset",
".",
"path",
",",
"'sequences'",
",",
"test_loader",
".",
"dataset",
".",
"sequences",
"[",
"s_ind",
"]",
")",
"velo_file",
"=",
"join",
"(",
"seq_path",
",",
"'velodyne'",
",",
"test_loader",
".",
"dataset",
".",
"frames",
"[",
"s_ind",
"]",
"[",
"f_ind",
"]",
"+",
"'.bin'",
")",
"frame_points",
"=",
"np",
".",
"fromfile",
"(",
"velo_file",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"frame_points",
"=",
"frame_points",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"4",
")",
")",
"predpath",
"=",
"join",
"(",
"test_path",
",",
"pred_folder",
",",
"filename",
"[",
":",
"-",
"4",
"]",
"+",
"'.ply'",
")",
"#pots = test_loader.dataset.f_potentials[s_ind][f_ind]",
"pots",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"if",
"pots",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"write_ply",
"(",
"predpath",
",",
"[",
"frame_points",
"[",
":",
",",
":",
"3",
"]",
",",
"frame_labels",
",",
"frame_preds",
",",
"pots",
"]",
",",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'gt'",
",",
"'pre'",
",",
"'pots'",
"]",
")",
"else",
":",
"write_ply",
"(",
"predpath",
",",
"[",
"frame_points",
"[",
":",
",",
":",
"3",
"]",
",",
"frame_labels",
",",
"frame_preds",
"]",
",",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'gt'",
",",
"'pre'",
"]",
")",
"# Also Save lbl probabilities",
"probpath",
"=",
"join",
"(",
"test_path",
",",
"folder",
",",
"filename",
"[",
":",
"-",
"4",
"]",
"+",
"'_probs.ply'",
")",
"lbl_names",
"=",
"[",
"test_loader",
".",
"dataset",
".",
"label_to_names",
"[",
"l",
"]",
"for",
"l",
"in",
"test_loader",
".",
"dataset",
".",
"label_values",
"if",
"l",
"not",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
"]",
"write_ply",
"(",
"probpath",
",",
"[",
"frame_points",
"[",
":",
",",
":",
"3",
"]",
",",
"frame_probs_uint8",
"]",
",",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
"]",
"+",
"lbl_names",
")",
"# keep frame preds in memory",
"all_f_preds",
"[",
"s_ind",
"]",
"[",
"f_ind",
"]",
"=",
"frame_preds",
"all_f_labels",
"[",
"s_ind",
"]",
"[",
"f_ind",
"]",
"=",
"frame_labels",
"else",
":",
"# Save some of the frame preds",
"if",
"f_inds",
"[",
"b_i",
",",
"1",
"]",
"%",
"100",
"==",
"0",
":",
"# Insert false columns for ignored labels",
"for",
"l_ind",
",",
"label_value",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"frame_probs_uint8",
"=",
"np",
".",
"insert",
"(",
"frame_probs_uint8",
",",
"l_ind",
",",
"0",
",",
"axis",
"=",
"1",
")",
"# Predicted labels",
"frame_preds",
"=",
"test_loader",
".",
"dataset",
".",
"label_values",
"[",
"np",
".",
"argmax",
"(",
"frame_probs_uint8",
",",
"axis",
"=",
"1",
")",
"]",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Load points",
"seq_path",
"=",
"join",
"(",
"test_loader",
".",
"dataset",
".",
"path",
",",
"'sequences'",
",",
"test_loader",
".",
"dataset",
".",
"sequences",
"[",
"s_ind",
"]",
")",
"velo_file",
"=",
"join",
"(",
"seq_path",
",",
"'velodyne'",
",",
"test_loader",
".",
"dataset",
".",
"frames",
"[",
"s_ind",
"]",
"[",
"f_ind",
"]",
"+",
"'.bin'",
")",
"frame_points",
"=",
"np",
".",
"fromfile",
"(",
"velo_file",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"frame_points",
"=",
"frame_points",
".",
"reshape",
"(",
"(",
"-",
"1",
",",
"4",
")",
")",
"predpath",
"=",
"join",
"(",
"test_path",
",",
"pred_folder",
",",
"filename",
"[",
":",
"-",
"4",
"]",
"+",
"'.ply'",
")",
"#pots = test_loader.dataset.f_potentials[s_ind][f_ind]",
"pots",
"=",
"np",
".",
"zeros",
"(",
"(",
"0",
",",
")",
")",
"if",
"pots",
".",
"shape",
"[",
"0",
"]",
">",
"0",
":",
"write_ply",
"(",
"predpath",
",",
"[",
"frame_points",
"[",
":",
",",
":",
"3",
"]",
",",
"frame_preds",
",",
"pots",
"]",
",",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'pre'",
",",
"'pots'",
"]",
")",
"else",
":",
"write_ply",
"(",
"predpath",
",",
"[",
"frame_points",
"[",
":",
",",
":",
"3",
"]",
",",
"frame_preds",
"]",
",",
"[",
"'x'",
",",
"'y'",
",",
"'z'",
",",
"'pre'",
"]",
")",
"# Stack all prediction for this epoch",
"i0",
"+=",
"length",
"# Average timing",
"t",
"+=",
"[",
"time",
".",
"time",
"(",
")",
"]",
"mean_dt",
"=",
"0.95",
"*",
"mean_dt",
"+",
"0.05",
"*",
"(",
"np",
".",
"array",
"(",
"t",
"[",
"1",
":",
"]",
")",
"-",
"np",
".",
"array",
"(",
"t",
"[",
":",
"-",
"1",
"]",
")",
")",
"# Display",
"if",
"(",
"t",
"[",
"-",
"1",
"]",
"-",
"last_display",
")",
">",
"1.0",
":",
"last_display",
"=",
"t",
"[",
"-",
"1",
"]",
"message",
"=",
"'e{:03d}-i{:04d} => {:.1f}% (timings : {:4.2f} {:4.2f} {:4.2f}) / pots {:d} => {:.1f}%'",
"min_pot",
"=",
"int",
"(",
"torch",
".",
"floor",
"(",
"torch",
".",
"min",
"(",
"test_loader",
".",
"dataset",
".",
"potentials",
")",
")",
")",
"pot_num",
"=",
"torch",
".",
"sum",
"(",
"test_loader",
".",
"dataset",
".",
"potentials",
">",
"min_pot",
"+",
"0.5",
")",
".",
"type",
"(",
"torch",
".",
"int32",
")",
".",
"item",
"(",
")",
"current_num",
"=",
"pot_num",
"+",
"(",
"i",
"+",
"1",
"-",
"config",
".",
"validation_size",
")",
"*",
"config",
".",
"val_batch_num",
"print",
"(",
"message",
".",
"format",
"(",
"test_epoch",
",",
"i",
",",
"100",
"*",
"i",
"/",
"config",
".",
"validation_size",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"0",
"]",
")",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"1",
"]",
")",
",",
"1000",
"*",
"(",
"mean_dt",
"[",
"2",
"]",
")",
",",
"min_pot",
",",
"100.0",
"*",
"current_num",
"/",
"len",
"(",
"test_loader",
".",
"dataset",
".",
"potentials",
")",
")",
")",
"# Update minimum od potentials",
"new_min",
"=",
"torch",
".",
"min",
"(",
"test_loader",
".",
"dataset",
".",
"potentials",
")",
"print",
"(",
"'Test epoch {:d}, end. Min potential = {:.1f}'",
".",
"format",
"(",
"test_epoch",
",",
"new_min",
")",
")",
"if",
"last_min",
"+",
"1",
"<",
"new_min",
":",
"# Update last_min",
"last_min",
"+=",
"1",
"if",
"test_loader",
".",
"dataset",
".",
"set",
"==",
"'validation'",
"and",
"last_min",
"%",
"1",
"==",
"0",
":",
"#####################################",
"# Results on the whole validation set",
"#####################################",
"# Confusions for our subparts of validation set",
"Confs",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"predictions",
")",
",",
"nc_tot",
",",
"nc_tot",
")",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"i",
",",
"(",
"preds",
",",
"truth",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"predictions",
",",
"targets",
")",
")",
":",
"# Confusions",
"Confs",
"[",
"i",
",",
":",
",",
":",
"]",
"=",
"fast_confusion",
"(",
"truth",
",",
"preds",
",",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"# Show vote results",
"print",
"(",
"'\\nCompute confusion'",
")",
"val_preds",
"=",
"[",
"]",
"val_labels",
"=",
"[",
"]",
"t1",
"=",
"time",
".",
"time",
"(",
")",
"for",
"i",
",",
"seq_frames",
"in",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"frames",
")",
":",
"val_preds",
"+=",
"[",
"np",
".",
"hstack",
"(",
"all_f_preds",
"[",
"i",
"]",
")",
"]",
"val_labels",
"+=",
"[",
"np",
".",
"hstack",
"(",
"all_f_labels",
"[",
"i",
"]",
")",
"]",
"val_preds",
"=",
"np",
".",
"hstack",
"(",
"val_preds",
")",
"val_labels",
"=",
"np",
".",
"hstack",
"(",
"val_labels",
")",
"t2",
"=",
"time",
".",
"time",
"(",
")",
"C_tot",
"=",
"fast_confusion",
"(",
"val_labels",
",",
"val_preds",
",",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
"t3",
"=",
"time",
".",
"time",
"(",
")",
"print",
"(",
"' Stacking time : {:.1f}s'",
".",
"format",
"(",
"t2",
"-",
"t1",
")",
")",
"print",
"(",
"'Confusion time : {:.1f}s'",
".",
"format",
"(",
"t3",
"-",
"t2",
")",
")",
"s1",
"=",
"'\\n'",
"for",
"cc",
"in",
"C_tot",
":",
"for",
"c",
"in",
"cc",
":",
"s1",
"+=",
"'{:7.0f} '",
".",
"format",
"(",
"c",
")",
"s1",
"+=",
"'\\n'",
"if",
"debug",
":",
"print",
"(",
"s1",
")",
"# Remove ignored labels from confusions",
"for",
"l_ind",
",",
"label_value",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"test_loader",
".",
"dataset",
".",
"label_values",
")",
")",
")",
":",
"if",
"label_value",
"in",
"test_loader",
".",
"dataset",
".",
"ignored_labels",
":",
"C_tot",
"=",
"np",
".",
"delete",
"(",
"C_tot",
",",
"l_ind",
",",
"axis",
"=",
"0",
")",
"C_tot",
"=",
"np",
".",
"delete",
"(",
"C_tot",
",",
"l_ind",
",",
"axis",
"=",
"1",
")",
"# Objects IoU",
"val_IoUs",
"=",
"IoU_from_confusions",
"(",
"C_tot",
")",
"# Compute IoUs",
"mIoU",
"=",
"np",
".",
"mean",
"(",
"val_IoUs",
")",
"s2",
"=",
"'{:5.2f} | '",
".",
"format",
"(",
"100",
"*",
"mIoU",
")",
"for",
"IoU",
"in",
"val_IoUs",
":",
"s2",
"+=",
"'{:5.2f} '",
".",
"format",
"(",
"100",
"*",
"IoU",
")",
"print",
"(",
"s2",
"+",
"'\\n'",
")",
"# Save a report",
"report_file",
"=",
"join",
"(",
"report_path",
",",
"'report_{:04d}.txt'",
".",
"format",
"(",
"int",
"(",
"np",
".",
"floor",
"(",
"last_min",
")",
")",
")",
")",
"str",
"=",
"'Report of the confusion and metrics\\n'",
"str",
"+=",
"'***********************************\\n\\n\\n'",
"str",
"+=",
"'Confusion matrix:\\n\\n'",
"str",
"+=",
"s1",
"str",
"+=",
"'\\nIoU values:\\n\\n'",
"str",
"+=",
"s2",
"str",
"+=",
"'\\n\\n'",
"with",
"open",
"(",
"report_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
")",
"test_epoch",
"+=",
"1",
"# Break when reaching number of desired votes",
"if",
"last_min",
">",
"num_votes",
":",
"break",
"return"
] | [
491,
4
] | [
799,
14
] | python | en | ['en', 'error', 'th'] | False |
to_html | (
fig,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
default_width="100%",
default_height="100%",
validate=True,
) |
Convert a figure to an HTML string representation.
Parameters
----------
fig:
Figure object or dict representing a figure
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
Returns
-------
str
Representation of figure as an HTML div string
|
Convert a figure to an HTML string representation. | def to_html(
fig,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
default_width="100%",
default_height="100%",
validate=True,
):
"""
Convert a figure to an HTML string representation.
Parameters
----------
fig:
Figure object or dict representing a figure
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
Returns
-------
str
Representation of figure as an HTML div string
"""
# ## Validate figure ##
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# ## Generate div id ##
plotdivid = str(uuid.uuid4())
# ## Serialize figure ##
jdata = json.dumps(
fig_dict.get("data", []), cls=utils.PlotlyJSONEncoder, sort_keys=True
)
jlayout = json.dumps(
fig_dict.get("layout", {}), cls=utils.PlotlyJSONEncoder, sort_keys=True
)
if fig_dict.get("frames", None):
jframes = json.dumps(fig_dict.get("frames", []), cls=utils.PlotlyJSONEncoder)
else:
jframes = None
# ## Serialize figure config ##
config = _get_jconfig(config)
# Set responsive
config.setdefault("responsive", True)
# Get div width/height
layout_dict = fig_dict.get("layout", {})
template_dict = fig_dict.get("layout", {}).get("template", {}).get("layout", {})
div_width = layout_dict.get("width", template_dict.get("width", default_width))
div_height = layout_dict.get("height", template_dict.get("height", default_height))
# Add 'px' suffix to numeric widths
try:
float(div_width)
except (ValueError, TypeError):
pass
else:
div_width = str(div_width) + "px"
try:
float(div_height)
except (ValueError, TypeError):
pass
else:
div_height = str(div_height) + "px"
# ## Get platform URL ##
if config.get("showLink", False) or config.get("showSendToCloud", False):
# Figure is going to include a Chart Studio link or send-to-cloud button,
# So we need to configure the PLOTLYENV.BASE_URL property
base_url_line = """
window.PLOTLYENV.BASE_URL='{plotly_platform_url}';\
""".format(
plotly_platform_url=config.get("plotlyServerURL", "https://plot.ly")
)
else:
# Figure is not going to include a Chart Studio link or send-to-cloud button,
# In this case we don't want https://plot.ly to show up anywhere in the HTML
# output
config.pop("plotlyServerURL", None)
config.pop("linkText", None)
config.pop("showLink", None)
base_url_line = ""
# ## Build script body ##
# This is the part that actually calls Plotly.js
# build post script snippet(s)
then_post_script = ""
if post_script:
if not isinstance(post_script, (list, tuple)):
post_script = [post_script]
for ps in post_script:
then_post_script += """.then(function(){{
{post_script}
}})""".format(
post_script=ps.replace("{plot_id}", plotdivid)
)
then_addframes = ""
then_animate = ""
if jframes:
then_addframes = """.then(function(){{
Plotly.addFrames('{id}', {frames});
}})""".format(
id=plotdivid, frames=jframes
)
if auto_play:
if animation_opts:
animation_opts_arg = ", " + json.dumps(animation_opts)
else:
animation_opts_arg = ""
then_animate = """.then(function(){{
Plotly.animate('{id}', null{animation_opts});
}})""".format(
id=plotdivid, animation_opts=animation_opts_arg
)
# Serialize config dict to JSON
jconfig = json.dumps(config)
script = """
if (document.getElementById("{id}")) {{
Plotly.newPlot(
'{id}',
{data},
{layout},
{config}
){then_addframes}{then_animate}{then_post_script}
}}""".format(
id=plotdivid,
data=jdata,
layout=jlayout,
config=jconfig,
then_addframes=then_addframes,
then_animate=then_animate,
then_post_script=then_post_script,
)
# ## Handle loading/initializing plotly.js ##
include_plotlyjs_orig = include_plotlyjs
if isinstance(include_plotlyjs, six.string_types):
include_plotlyjs = include_plotlyjs.lower()
# Start/end of requirejs block (if any)
require_start = ""
require_end = ""
# Init and load
load_plotlyjs = ""
# Init plotlyjs. This block needs to run before plotly.js is loaded in
# order for MathJax configuration to work properly
if include_plotlyjs == "require":
require_start = 'require(["plotly"], function(Plotly) {'
require_end = "});"
elif include_plotlyjs == "cdn":
load_plotlyjs = """\
{win_config}
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>\
""".format(
win_config=_window_plotly_config
)
elif include_plotlyjs == "directory":
load_plotlyjs = """\
{win_config}
<script src="plotly.min.js"></script>\
""".format(
win_config=_window_plotly_config
)
elif isinstance(include_plotlyjs, six.string_types) and include_plotlyjs.endswith(
".js"
):
load_plotlyjs = """\
{win_config}
<script src="{url}"></script>\
""".format(
win_config=_window_plotly_config, url=include_plotlyjs_orig
)
elif include_plotlyjs:
load_plotlyjs = """\
{win_config}
<script type="text/javascript">{plotlyjs}</script>\
""".format(
win_config=_window_plotly_config, plotlyjs=get_plotlyjs()
)
# ## Handle loading/initializing MathJax ##
include_mathjax_orig = include_mathjax
if isinstance(include_mathjax, six.string_types):
include_mathjax = include_mathjax.lower()
mathjax_template = """\
<script src="{url}?config=TeX-AMS-MML_SVG"></script>"""
if include_mathjax == "cdn":
mathjax_script = (
mathjax_template.format(
url=(
"https://cdnjs.cloudflare.com" "/ajax/libs/mathjax/2.7.5/MathJax.js"
)
)
+ _mathjax_config
)
elif isinstance(include_mathjax, six.string_types) and include_mathjax.endswith(
".js"
):
mathjax_script = (
mathjax_template.format(url=include_mathjax_orig) + _mathjax_config
)
elif not include_mathjax:
mathjax_script = ""
else:
raise ValueError(
"""\
Invalid value of type {typ} received as the include_mathjax argument
Received value: {val}
include_mathjax may be specified as False, 'cdn', or a string ending with '.js'
""".format(
typ=type(include_mathjax), val=repr(include_mathjax)
)
)
plotly_html_div = """\
<div>
{mathjax_script}
{load_plotlyjs}
<div id="{id}" class="plotly-graph-div" \
style="height:{height}; width:{width};"></div>
<script type="text/javascript">
{require_start}
window.PLOTLYENV=window.PLOTLYENV || {{}};{base_url_line}
{script};
{require_end}
</script>
</div>""".format(
mathjax_script=mathjax_script,
load_plotlyjs=load_plotlyjs,
id=plotdivid,
width=div_width,
height=div_height,
base_url_line=base_url_line,
require_start=require_start,
script=script,
require_end=require_end,
)
if full_html:
return """\
<html>
<head><meta charset="utf-8" /></head>
<body>
{div}
</body>
</html>""".format(
div=plotly_html_div
)
else:
return plotly_html_div | [
"def",
"to_html",
"(",
"fig",
",",
"config",
"=",
"None",
",",
"auto_play",
"=",
"True",
",",
"include_plotlyjs",
"=",
"True",
",",
"include_mathjax",
"=",
"False",
",",
"post_script",
"=",
"None",
",",
"full_html",
"=",
"True",
",",
"animation_opts",
"=",
"None",
",",
"default_width",
"=",
"\"100%\"",
",",
"default_height",
"=",
"\"100%\"",
",",
"validate",
"=",
"True",
",",
")",
":",
"# ## Validate figure ##",
"fig_dict",
"=",
"validate_coerce_fig_to_dict",
"(",
"fig",
",",
"validate",
")",
"# ## Generate div id ##",
"plotdivid",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"# ## Serialize figure ##",
"jdata",
"=",
"json",
".",
"dumps",
"(",
"fig_dict",
".",
"get",
"(",
"\"data\"",
",",
"[",
"]",
")",
",",
"cls",
"=",
"utils",
".",
"PlotlyJSONEncoder",
",",
"sort_keys",
"=",
"True",
")",
"jlayout",
"=",
"json",
".",
"dumps",
"(",
"fig_dict",
".",
"get",
"(",
"\"layout\"",
",",
"{",
"}",
")",
",",
"cls",
"=",
"utils",
".",
"PlotlyJSONEncoder",
",",
"sort_keys",
"=",
"True",
")",
"if",
"fig_dict",
".",
"get",
"(",
"\"frames\"",
",",
"None",
")",
":",
"jframes",
"=",
"json",
".",
"dumps",
"(",
"fig_dict",
".",
"get",
"(",
"\"frames\"",
",",
"[",
"]",
")",
",",
"cls",
"=",
"utils",
".",
"PlotlyJSONEncoder",
")",
"else",
":",
"jframes",
"=",
"None",
"# ## Serialize figure config ##",
"config",
"=",
"_get_jconfig",
"(",
"config",
")",
"# Set responsive",
"config",
".",
"setdefault",
"(",
"\"responsive\"",
",",
"True",
")",
"# Get div width/height",
"layout_dict",
"=",
"fig_dict",
".",
"get",
"(",
"\"layout\"",
",",
"{",
"}",
")",
"template_dict",
"=",
"fig_dict",
".",
"get",
"(",
"\"layout\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"template\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"layout\"",
",",
"{",
"}",
")",
"div_width",
"=",
"layout_dict",
".",
"get",
"(",
"\"width\"",
",",
"template_dict",
".",
"get",
"(",
"\"width\"",
",",
"default_width",
")",
")",
"div_height",
"=",
"layout_dict",
".",
"get",
"(",
"\"height\"",
",",
"template_dict",
".",
"get",
"(",
"\"height\"",
",",
"default_height",
")",
")",
"# Add 'px' suffix to numeric widths",
"try",
":",
"float",
"(",
"div_width",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"else",
":",
"div_width",
"=",
"str",
"(",
"div_width",
")",
"+",
"\"px\"",
"try",
":",
"float",
"(",
"div_height",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"pass",
"else",
":",
"div_height",
"=",
"str",
"(",
"div_height",
")",
"+",
"\"px\"",
"# ## Get platform URL ##",
"if",
"config",
".",
"get",
"(",
"\"showLink\"",
",",
"False",
")",
"or",
"config",
".",
"get",
"(",
"\"showSendToCloud\"",
",",
"False",
")",
":",
"# Figure is going to include a Chart Studio link or send-to-cloud button,",
"# So we need to configure the PLOTLYENV.BASE_URL property",
"base_url_line",
"=",
"\"\"\"\n window.PLOTLYENV.BASE_URL='{plotly_platform_url}';\\\n\"\"\"",
".",
"format",
"(",
"plotly_platform_url",
"=",
"config",
".",
"get",
"(",
"\"plotlyServerURL\"",
",",
"\"https://plot.ly\"",
")",
")",
"else",
":",
"# Figure is not going to include a Chart Studio link or send-to-cloud button,",
"# In this case we don't want https://plot.ly to show up anywhere in the HTML",
"# output",
"config",
".",
"pop",
"(",
"\"plotlyServerURL\"",
",",
"None",
")",
"config",
".",
"pop",
"(",
"\"linkText\"",
",",
"None",
")",
"config",
".",
"pop",
"(",
"\"showLink\"",
",",
"None",
")",
"base_url_line",
"=",
"\"\"",
"# ## Build script body ##",
"# This is the part that actually calls Plotly.js",
"# build post script snippet(s)",
"then_post_script",
"=",
"\"\"",
"if",
"post_script",
":",
"if",
"not",
"isinstance",
"(",
"post_script",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"post_script",
"=",
"[",
"post_script",
"]",
"for",
"ps",
"in",
"post_script",
":",
"then_post_script",
"+=",
"\"\"\".then(function(){{\n {post_script}\n }})\"\"\"",
".",
"format",
"(",
"post_script",
"=",
"ps",
".",
"replace",
"(",
"\"{plot_id}\"",
",",
"plotdivid",
")",
")",
"then_addframes",
"=",
"\"\"",
"then_animate",
"=",
"\"\"",
"if",
"jframes",
":",
"then_addframes",
"=",
"\"\"\".then(function(){{\n Plotly.addFrames('{id}', {frames});\n }})\"\"\"",
".",
"format",
"(",
"id",
"=",
"plotdivid",
",",
"frames",
"=",
"jframes",
")",
"if",
"auto_play",
":",
"if",
"animation_opts",
":",
"animation_opts_arg",
"=",
"\", \"",
"+",
"json",
".",
"dumps",
"(",
"animation_opts",
")",
"else",
":",
"animation_opts_arg",
"=",
"\"\"",
"then_animate",
"=",
"\"\"\".then(function(){{\n Plotly.animate('{id}', null{animation_opts});\n }})\"\"\"",
".",
"format",
"(",
"id",
"=",
"plotdivid",
",",
"animation_opts",
"=",
"animation_opts_arg",
")",
"# Serialize config dict to JSON",
"jconfig",
"=",
"json",
".",
"dumps",
"(",
"config",
")",
"script",
"=",
"\"\"\"\n if (document.getElementById(\"{id}\")) {{\n Plotly.newPlot(\n '{id}',\n {data},\n {layout},\n {config}\n ){then_addframes}{then_animate}{then_post_script}\n }}\"\"\"",
".",
"format",
"(",
"id",
"=",
"plotdivid",
",",
"data",
"=",
"jdata",
",",
"layout",
"=",
"jlayout",
",",
"config",
"=",
"jconfig",
",",
"then_addframes",
"=",
"then_addframes",
",",
"then_animate",
"=",
"then_animate",
",",
"then_post_script",
"=",
"then_post_script",
",",
")",
"# ## Handle loading/initializing plotly.js ##",
"include_plotlyjs_orig",
"=",
"include_plotlyjs",
"if",
"isinstance",
"(",
"include_plotlyjs",
",",
"six",
".",
"string_types",
")",
":",
"include_plotlyjs",
"=",
"include_plotlyjs",
".",
"lower",
"(",
")",
"# Start/end of requirejs block (if any)",
"require_start",
"=",
"\"\"",
"require_end",
"=",
"\"\"",
"# Init and load",
"load_plotlyjs",
"=",
"\"\"",
"# Init plotlyjs. This block needs to run before plotly.js is loaded in",
"# order for MathJax configuration to work properly",
"if",
"include_plotlyjs",
"==",
"\"require\"",
":",
"require_start",
"=",
"'require([\"plotly\"], function(Plotly) {'",
"require_end",
"=",
"\"});\"",
"elif",
"include_plotlyjs",
"==",
"\"cdn\"",
":",
"load_plotlyjs",
"=",
"\"\"\"\\\n {win_config}\n <script src=\"https://cdn.plot.ly/plotly-latest.min.js\"></script>\\\n \"\"\"",
".",
"format",
"(",
"win_config",
"=",
"_window_plotly_config",
")",
"elif",
"include_plotlyjs",
"==",
"\"directory\"",
":",
"load_plotlyjs",
"=",
"\"\"\"\\\n {win_config}\n <script src=\"plotly.min.js\"></script>\\\n \"\"\"",
".",
"format",
"(",
"win_config",
"=",
"_window_plotly_config",
")",
"elif",
"isinstance",
"(",
"include_plotlyjs",
",",
"six",
".",
"string_types",
")",
"and",
"include_plotlyjs",
".",
"endswith",
"(",
"\".js\"",
")",
":",
"load_plotlyjs",
"=",
"\"\"\"\\\n {win_config}\n <script src=\"{url}\"></script>\\\n \"\"\"",
".",
"format",
"(",
"win_config",
"=",
"_window_plotly_config",
",",
"url",
"=",
"include_plotlyjs_orig",
")",
"elif",
"include_plotlyjs",
":",
"load_plotlyjs",
"=",
"\"\"\"\\\n {win_config}\n <script type=\"text/javascript\">{plotlyjs}</script>\\\n \"\"\"",
".",
"format",
"(",
"win_config",
"=",
"_window_plotly_config",
",",
"plotlyjs",
"=",
"get_plotlyjs",
"(",
")",
")",
"# ## Handle loading/initializing MathJax ##",
"include_mathjax_orig",
"=",
"include_mathjax",
"if",
"isinstance",
"(",
"include_mathjax",
",",
"six",
".",
"string_types",
")",
":",
"include_mathjax",
"=",
"include_mathjax",
".",
"lower",
"(",
")",
"mathjax_template",
"=",
"\"\"\"\\\n <script src=\"{url}?config=TeX-AMS-MML_SVG\"></script>\"\"\"",
"if",
"include_mathjax",
"==",
"\"cdn\"",
":",
"mathjax_script",
"=",
"(",
"mathjax_template",
".",
"format",
"(",
"url",
"=",
"(",
"\"https://cdnjs.cloudflare.com\"",
"\"/ajax/libs/mathjax/2.7.5/MathJax.js\"",
")",
")",
"+",
"_mathjax_config",
")",
"elif",
"isinstance",
"(",
"include_mathjax",
",",
"six",
".",
"string_types",
")",
"and",
"include_mathjax",
".",
"endswith",
"(",
"\".js\"",
")",
":",
"mathjax_script",
"=",
"(",
"mathjax_template",
".",
"format",
"(",
"url",
"=",
"include_mathjax_orig",
")",
"+",
"_mathjax_config",
")",
"elif",
"not",
"include_mathjax",
":",
"mathjax_script",
"=",
"\"\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"\"\"\\\nInvalid value of type {typ} received as the include_mathjax argument\n Received value: {val}\n\ninclude_mathjax may be specified as False, 'cdn', or a string ending with '.js' \n \"\"\"",
".",
"format",
"(",
"typ",
"=",
"type",
"(",
"include_mathjax",
")",
",",
"val",
"=",
"repr",
"(",
"include_mathjax",
")",
")",
")",
"plotly_html_div",
"=",
"\"\"\"\\\n<div>\n {mathjax_script}\n {load_plotlyjs}\n <div id=\"{id}\" class=\"plotly-graph-div\" \\\nstyle=\"height:{height}; width:{width};\"></div>\n <script type=\"text/javascript\">\n {require_start}\n window.PLOTLYENV=window.PLOTLYENV || {{}};{base_url_line}\n {script};\n {require_end}\n </script>\n </div>\"\"\"",
".",
"format",
"(",
"mathjax_script",
"=",
"mathjax_script",
",",
"load_plotlyjs",
"=",
"load_plotlyjs",
",",
"id",
"=",
"plotdivid",
",",
"width",
"=",
"div_width",
",",
"height",
"=",
"div_height",
",",
"base_url_line",
"=",
"base_url_line",
",",
"require_start",
"=",
"require_start",
",",
"script",
"=",
"script",
",",
"require_end",
"=",
"require_end",
",",
")",
"if",
"full_html",
":",
"return",
"\"\"\"\\\n<html>\n<head><meta charset=\"utf-8\" /></head>\n<body>\n {div}\n</body>\n</html>\"\"\"",
".",
"format",
"(",
"div",
"=",
"plotly_html_div",
")",
"else",
":",
"return",
"plotly_html_div"
] | [
25,
0
] | [
376,
30
] | python | en | ['en', 'error', 'th'] | False |
write_html | (
fig,
file,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
validate=True,
default_width="100%",
default_height="100%",
auto_open=False,
) |
Write a figure to an HTML file representation
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. an open file descriptor)
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file path
and `full_html` is True then
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
path and `full_html` is True, then the plotly.min.js bundle is copied
into the directory of the resulting HTML file. If a file named
plotly.min.js already exists in the output directory then this file
is left unmodified and no copy is performed. HTML files generated
with this option can be used offline, but they require a copy of
the plotly.min.js bundle in the same directory. This option is
useful when many figures will be saved as HTML files in the same
directory because the plotly.js source code will be included only
once per output directory, rather than once per output file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
auto_open: bool (default True
If True, open the saved file in a web browser after saving.
This argument only applies if `full_html` is True.
Returns
-------
str
Representation of figure as an HTML div string
|
Write a figure to an HTML file representation | def write_html(
fig,
file,
config=None,
auto_play=True,
include_plotlyjs=True,
include_mathjax=False,
post_script=None,
full_html=True,
animation_opts=None,
validate=True,
default_width="100%",
default_height="100%",
auto_open=False,
):
"""
Write a figure to an HTML file representation
Parameters
----------
fig:
Figure object or dict representing a figure
file: str or writeable
A string representing a local file path or a writeable object
(e.g. an open file descriptor)
config: dict or None (default None)
Plotly.js figure config options
auto_play: bool (default=True)
Whether to automatically start the animation sequence on page load
if the figure contains frames. Has no effect if the figure does not
contain frames.
include_plotlyjs: bool or string (default True)
Specifies how the plotly.js library is included/loaded in the output
div string.
If True, a script tag containing the plotly.js source code (~3MB)
is included in the output. HTML files generated with this option are
fully self-contained and can be used offline.
If 'cdn', a script tag that references the plotly.js CDN is included
in the output. HTML files generated with this option are about 3MB
smaller than those generated with include_plotlyjs=True, but they
require an active internet connection in order to load the plotly.js
library.
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file path
and `full_html` is True then
If 'directory', a script tag is included that references an external
plotly.min.js bundle that is assumed to reside in the same
directory as the HTML file. If `file` is a string to a local file
path and `full_html` is True, then the plotly.min.js bundle is copied
into the directory of the resulting HTML file. If a file named
plotly.min.js already exists in the output directory then this file
is left unmodified and no copy is performed. HTML files generated
with this option can be used offline, but they require a copy of
the plotly.min.js bundle in the same directory. This option is
useful when many figures will be saved as HTML files in the same
directory because the plotly.js source code will be included only
once per output directory, rather than once per output file.
If 'require', Plotly.js is loaded using require.js. This option
assumes that require.js is globally available and that it has been
globally configured to know how to find Plotly.js as 'plotly'.
This option is not advised when full_html=True as it will result
in a non-functional html file.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point
the resulting HTML file to an alternative CDN or local bundle.
If False, no script tag referencing plotly.js is included. This is
useful when the resulting div string will be placed inside an HTML
document that already loads plotly.js. This option is not advised
when full_html=True as it will result in a non-functional html file.
include_mathjax: bool or string (default False)
Specifies how the MathJax.js library is included in the output html
div string. MathJax is required in order to display labels
with LaTeX typesetting.
If False, no script tag referencing MathJax.js will be included in the
output.
If 'cdn', a script tag that references a MathJax CDN location will be
included in the output. HTML div strings generated with this option
will be able to display LaTeX typesetting as long as internet access
is available.
If a string that ends in '.js', a script tag is included that
references the specified path. This approach can be used to point the
resulting HTML div string to an alternative CDN.
post_script: str or list or None (default None)
JavaScript snippet(s) to be included in the resulting div just after
plot creation. The string(s) may include '{plot_id}' placeholders
that will then be replaced by the `id` of the div element that the
plotly.js figure is associated with. One application for this script
is to install custom plotly.js event handlers.
full_html: bool (default True)
If True, produce a string containing a complete HTML document
starting with an <html> tag. If False, produce a string containing
a single <div> element.
animation_opts: dict or None (default None)
dict of custom animation parameters to be passed to the function
Plotly.animate in Plotly.js. See
https://github.com/plotly/plotly.js/blob/master/src/plots/animation_attributes.js
for available options. Has no effect if the figure does not contain
frames, or auto_play is False.
default_width, default_height: number or str (default '100%')
The default figure width/height to use if the provided figure does not
specify its own layout.width/layout.height property. May be
specified in pixels as an integer (e.g. 500), or as a css width style
string (e.g. '500px', '100%').
validate: bool (default True)
True if the figure should be validated before being converted to
JSON, False otherwise.
auto_open: bool (default True
If True, open the saved file in a web browser after saving.
This argument only applies if `full_html` is True.
Returns
-------
str
Representation of figure as an HTML div string
"""
# Build HTML string
html_str = to_html(
fig,
config=config,
auto_play=auto_play,
include_plotlyjs=include_plotlyjs,
include_mathjax=include_mathjax,
post_script=post_script,
full_html=full_html,
animation_opts=animation_opts,
default_width=default_width,
default_height=default_height,
validate=validate,
)
# Check if file is a string
file_is_str = isinstance(file, six.string_types)
# Write HTML string
if file_is_str:
with open(file, "w") as f:
f.write(html_str)
else:
file.write(html_str)
# Check if we should copy plotly.min.js to output directory
if file_is_str and full_html and include_plotlyjs == "directory":
bundle_path = os.path.join(os.path.dirname(file), "plotly.min.js")
if not os.path.exists(bundle_path):
with open(bundle_path, "w") as f:
f.write(get_plotlyjs())
# Handle auto_open
if file_is_str and full_html and auto_open:
url = "file://" + os.path.abspath(file)
webbrowser.open(url) | [
"def",
"write_html",
"(",
"fig",
",",
"file",
",",
"config",
"=",
"None",
",",
"auto_play",
"=",
"True",
",",
"include_plotlyjs",
"=",
"True",
",",
"include_mathjax",
"=",
"False",
",",
"post_script",
"=",
"None",
",",
"full_html",
"=",
"True",
",",
"animation_opts",
"=",
"None",
",",
"validate",
"=",
"True",
",",
"default_width",
"=",
"\"100%\"",
",",
"default_height",
"=",
"\"100%\"",
",",
"auto_open",
"=",
"False",
",",
")",
":",
"# Build HTML string",
"html_str",
"=",
"to_html",
"(",
"fig",
",",
"config",
"=",
"config",
",",
"auto_play",
"=",
"auto_play",
",",
"include_plotlyjs",
"=",
"include_plotlyjs",
",",
"include_mathjax",
"=",
"include_mathjax",
",",
"post_script",
"=",
"post_script",
",",
"full_html",
"=",
"full_html",
",",
"animation_opts",
"=",
"animation_opts",
",",
"default_width",
"=",
"default_width",
",",
"default_height",
"=",
"default_height",
",",
"validate",
"=",
"validate",
",",
")",
"# Check if file is a string",
"file_is_str",
"=",
"isinstance",
"(",
"file",
",",
"six",
".",
"string_types",
")",
"# Write HTML string",
"if",
"file_is_str",
":",
"with",
"open",
"(",
"file",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"html_str",
")",
"else",
":",
"file",
".",
"write",
"(",
"html_str",
")",
"# Check if we should copy plotly.min.js to output directory",
"if",
"file_is_str",
"and",
"full_html",
"and",
"include_plotlyjs",
"==",
"\"directory\"",
":",
"bundle_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"file",
")",
",",
"\"plotly.min.js\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"bundle_path",
")",
":",
"with",
"open",
"(",
"bundle_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"get_plotlyjs",
"(",
")",
")",
"# Handle auto_open",
"if",
"file_is_str",
"and",
"full_html",
"and",
"auto_open",
":",
"url",
"=",
"\"file://\"",
"+",
"os",
".",
"path",
".",
"abspath",
"(",
"file",
")",
"webbrowser",
".",
"open",
"(",
"url",
")"
] | [
379,
0
] | [
542,
28
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.