Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
scopedef_t.typedefs | (
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None) | returns a set of typedef declarations, that are matched
defined criteria | returns a set of typedef declarations, that are matched
defined criteria | def typedefs(
self,
name=None,
function=None,
header_dir=None,
header_file=None,
recursive=None,
allow_empty=None):
"""returns a set of typedef declarations, that are matched
defined criteria"""
return (
self._find_multiple(
self._impl_matchers[scopedef_t.typedef],
name=name,
function=function,
decl_type=self._impl_decl_types[
scopedef_t.typedef],
header_dir=header_dir,
header_file=header_file,
recursive=recursive,
allow_empty=allow_empty)
) | [
"def",
"typedefs",
"(",
"self",
",",
"name",
"=",
"None",
",",
"function",
"=",
"None",
",",
"header_dir",
"=",
"None",
",",
"header_file",
"=",
"None",
",",
"recursive",
"=",
"None",
",",
"allow_empty",
"=",
"None",
")",
":",
"return",
"(",
"self",
".",
"_find_multiple",
"(",
"self",
".",
"_impl_matchers",
"[",
"scopedef_t",
".",
"typedef",
"]",
",",
"name",
"=",
"name",
",",
"function",
"=",
"function",
",",
"decl_type",
"=",
"self",
".",
"_impl_decl_types",
"[",
"scopedef_t",
".",
"typedef",
"]",
",",
"header_dir",
"=",
"header_dir",
",",
"header_file",
"=",
"header_file",
",",
"recursive",
"=",
"recursive",
",",
"allow_empty",
"=",
"allow_empty",
")",
")"
] | [
1186,
4
] | [
1207,
9
] | python | en | ['en', 'en', 'en'] | True |
scopedef_t.__getitem__ | (self, name_or_function) |
Allow simple name based find of declarations. Internally just calls
`decls` method.
:param name_or_function: Name of `decl` to lookup or finder function.
|
Allow simple name based find of declarations. Internally just calls
`decls` method.
:param name_or_function: Name of `decl` to lookup or finder function.
| def __getitem__(self, name_or_function):
"""
Allow simple name based find of declarations. Internally just calls
`decls` method.
:param name_or_function: Name of `decl` to lookup or finder function.
"""
return self.decls(name_or_function) | [
"def",
"__getitem__",
"(",
"self",
",",
"name_or_function",
")",
":",
"return",
"self",
".",
"decls",
"(",
"name_or_function",
")"
] | [
1209,
4
] | [
1215,
43
] | python | en | ['en', 'error', 'th'] | False |
colored | (text, color=None, on_color=None, attrs=None) | Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
| Colorize text. | def colored(text, color=None, on_color=None, attrs=None):
"""Colorize text.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
"""
if os.getenv('ANSI_COLORS_DISABLED') is None:
fmt_str = '\033[%dm%s'
if color is not None:
text = fmt_str % (COLORS[color], text)
if on_color is not None:
text = fmt_str % (HIGHLIGHTS[on_color], text)
if attrs is not None:
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
return text | [
"def",
"colored",
"(",
"text",
",",
"color",
"=",
"None",
",",
"on_color",
"=",
"None",
",",
"attrs",
"=",
"None",
")",
":",
"if",
"os",
".",
"getenv",
"(",
"'ANSI_COLORS_DISABLED'",
")",
"is",
"None",
":",
"fmt_str",
"=",
"'\\033[%dm%s'",
"if",
"color",
"is",
"not",
"None",
":",
"text",
"=",
"fmt_str",
"%",
"(",
"COLORS",
"[",
"color",
"]",
",",
"text",
")",
"if",
"on_color",
"is",
"not",
"None",
":",
"text",
"=",
"fmt_str",
"%",
"(",
"HIGHLIGHTS",
"[",
"on_color",
"]",
",",
"text",
")",
"if",
"attrs",
"is",
"not",
"None",
":",
"for",
"attr",
"in",
"attrs",
":",
"text",
"=",
"fmt_str",
"%",
"(",
"ATTRIBUTES",
"[",
"attr",
"]",
",",
"text",
")",
"text",
"+=",
"RESET",
"return",
"text"
] | [
85,
0
] | [
114,
15
] | python | en | ['en', 'fr', 'en'] | False |
cprint | (text, color=None, on_color=None, attrs=None, **kwargs) | Print colorize text.
It accepts arguments of print function.
| Print colorize text. | def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs) | [
"def",
"cprint",
"(",
"text",
",",
"color",
"=",
"None",
",",
"on_color",
"=",
"None",
",",
"attrs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"print",
"(",
"(",
"colored",
"(",
"text",
",",
"color",
",",
"on_color",
",",
"attrs",
")",
")",
",",
"*",
"*",
"kwargs",
")"
] | [
117,
0
] | [
123,
60
] | python | en | ['en', 'fr', 'it'] | False |
smart_pointer_traits.is_smart_pointer | (type_) | returns True, if type represents instantiation of
`boost::shared_ptr` or `std::shared_ptr`, False otherwise | returns True, if type represents instantiation of
`boost::shared_ptr` or `std::shared_ptr`, False otherwise | def is_smart_pointer(type_):
"""returns True, if type represents instantiation of
`boost::shared_ptr` or `std::shared_ptr`, False otherwise"""
type_ = type_traits.remove_alias(type_)
type_ = type_traits.remove_cv(type_)
type_ = type_traits.remove_declarated(type_)
if not isinstance(type_,
(class_declaration.class_declaration_t,
class_declaration.class_t)):
return False
if not (
traits_impl_details.impl_details.is_defined_in_xxx(
'boost', type_) or
traits_impl_details.impl_details.is_defined_in_xxx(
'std', type_)):
return False
return type_.decl_string.startswith('::boost::shared_ptr<') or \
type_.decl_string.startswith('::std::shared_ptr<') | [
"def",
"is_smart_pointer",
"(",
"type_",
")",
":",
"type_",
"=",
"type_traits",
".",
"remove_alias",
"(",
"type_",
")",
"type_",
"=",
"type_traits",
".",
"remove_cv",
"(",
"type_",
")",
"type_",
"=",
"type_traits",
".",
"remove_declarated",
"(",
"type_",
")",
"if",
"not",
"isinstance",
"(",
"type_",
",",
"(",
"class_declaration",
".",
"class_declaration_t",
",",
"class_declaration",
".",
"class_t",
")",
")",
":",
"return",
"False",
"if",
"not",
"(",
"traits_impl_details",
".",
"impl_details",
".",
"is_defined_in_xxx",
"(",
"'boost'",
",",
"type_",
")",
"or",
"traits_impl_details",
".",
"impl_details",
".",
"is_defined_in_xxx",
"(",
"'std'",
",",
"type_",
")",
")",
":",
"return",
"False",
"return",
"type_",
".",
"decl_string",
".",
"startswith",
"(",
"'::boost::shared_ptr<'",
")",
"or",
"type_",
".",
"decl_string",
".",
"startswith",
"(",
"'::std::shared_ptr<'",
")"
] | [
46,
4
] | [
63,
62
] | python | en | ['en', 'nl', 'en'] | True |
smart_pointer_traits.value_type | (type_) | returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type | returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type | def value_type(type_):
"""returns reference to `boost::shared_ptr` \
or `std::shared_ptr` value type"""
if not smart_pointer_traits.is_smart_pointer(type_):
raise TypeError(
'Type "%s" is not an instantiation of \
boost::shared_ptr or std::shared_ptr' %
type_.decl_string)
return internal_type_traits.get_by_name(type_, "value_type") | [
"def",
"value_type",
"(",
"type_",
")",
":",
"if",
"not",
"smart_pointer_traits",
".",
"is_smart_pointer",
"(",
"type_",
")",
":",
"raise",
"TypeError",
"(",
"'Type \"%s\" is not an instantiation of \\\n boost::shared_ptr or std::shared_ptr'",
"%",
"type_",
".",
"decl_string",
")",
"return",
"internal_type_traits",
".",
"get_by_name",
"(",
"type_",
",",
"\"value_type\"",
")"
] | [
66,
4
] | [
74,
68
] | python | en | ['en', 'en', 'en'] | True |
auto_ptr_traits.is_smart_pointer | (type_) | returns True, if type represents instantiation of
`boost::shared_ptr`, False otherwise | returns True, if type represents instantiation of
`boost::shared_ptr`, False otherwise | def is_smart_pointer(type_):
"""returns True, if type represents instantiation of
`boost::shared_ptr`, False otherwise"""
type_ = type_traits.remove_alias(type_)
type_ = type_traits.remove_cv(type_)
type_ = type_traits.remove_declarated(type_)
if not isinstance(type_,
(class_declaration.class_declaration_t,
class_declaration.class_t)):
return False
if not traits_impl_details.impl_details.is_defined_in_xxx(
'std', type_):
return False
return type_.decl_string.startswith('::std::auto_ptr<') | [
"def",
"is_smart_pointer",
"(",
"type_",
")",
":",
"type_",
"=",
"type_traits",
".",
"remove_alias",
"(",
"type_",
")",
"type_",
"=",
"type_traits",
".",
"remove_cv",
"(",
"type_",
")",
"type_",
"=",
"type_traits",
".",
"remove_declarated",
"(",
"type_",
")",
"if",
"not",
"isinstance",
"(",
"type_",
",",
"(",
"class_declaration",
".",
"class_declaration_t",
",",
"class_declaration",
".",
"class_t",
")",
")",
":",
"return",
"False",
"if",
"not",
"traits_impl_details",
".",
"impl_details",
".",
"is_defined_in_xxx",
"(",
"'std'",
",",
"type_",
")",
":",
"return",
"False",
"return",
"type_",
".",
"decl_string",
".",
"startswith",
"(",
"'::std::auto_ptr<'",
")"
] | [
83,
4
] | [
96,
63
] | python | en | ['en', 'nl', 'en'] | True |
auto_ptr_traits.value_type | (type_) | returns reference to `boost::shared_ptr` value type | returns reference to `boost::shared_ptr` value type | def value_type(type_):
"""returns reference to `boost::shared_ptr` value type"""
if not auto_ptr_traits.is_smart_pointer(type_):
raise TypeError(
'Type "%s" is not instantiation of std::auto_ptr' %
type_.decl_string)
return internal_type_traits.get_by_name(type_, "element_type") | [
"def",
"value_type",
"(",
"type_",
")",
":",
"if",
"not",
"auto_ptr_traits",
".",
"is_smart_pointer",
"(",
"type_",
")",
":",
"raise",
"TypeError",
"(",
"'Type \"%s\" is not instantiation of std::auto_ptr'",
"%",
"type_",
".",
"decl_string",
")",
"return",
"internal_type_traits",
".",
"get_by_name",
"(",
"type_",
",",
"\"element_type\"",
")"
] | [
99,
4
] | [
105,
70
] | python | en | ['en', 'en', 'en'] | True |
TeamCityReporter.report_message_type | (self, msg) | Issues an `inspectionType` service message to define generic properties of a given PyLint message type.
:param utils.Message msg: a PyLint message
| Issues an `inspectionType` service message to define generic properties of a given PyLint message type.
:param utils.Message msg: a PyLint message
| def report_message_type(self, msg):
"""Issues an `inspectionType` service message to define generic properties of a given PyLint message type.
:param utils.Message msg: a PyLint message
"""
desc = get_message_description(self.linter, msg.msg_id)
self.tc.message('inspectionType', id=msg.msg_id, name=msg.symbol, description=desc if desc else msg.symbol, category=msg.category) | [
"def",
"report_message_type",
"(",
"self",
",",
"msg",
")",
":",
"desc",
"=",
"get_message_description",
"(",
"self",
".",
"linter",
",",
"msg",
".",
"msg_id",
")",
"self",
".",
"tc",
".",
"message",
"(",
"'inspectionType'",
",",
"id",
"=",
"msg",
".",
"msg_id",
",",
"name",
"=",
"msg",
".",
"symbol",
",",
"description",
"=",
"desc",
"if",
"desc",
"else",
"msg",
".",
"symbol",
",",
"category",
"=",
"msg",
".",
"category",
")"
] | [
61,
4
] | [
66,
138
] | python | en | ['en', 'en', 'en'] | True |
TeamCityReporter.handle_message | (self, msg) | Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
| Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter. | def handle_message(self, msg):
"""Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
"""
if msg.msg_id not in self.msg_types:
self.report_message_type(msg)
self.msg_types.add(msg.msg_id)
self.tc.message('inspection', typeId=msg.msg_id, message=msg.msg,
file=os.path.relpath(msg.abspath).replace('\\', '/'),
line=str(msg.line),
SEVERITY=TC_SEVERITY.get(msg.category)) | [
"def",
"handle_message",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
".",
"msg_id",
"not",
"in",
"self",
".",
"msg_types",
":",
"self",
".",
"report_message_type",
"(",
"msg",
")",
"self",
".",
"msg_types",
".",
"add",
"(",
"msg",
".",
"msg_id",
")",
"self",
".",
"tc",
".",
"message",
"(",
"'inspection'",
",",
"typeId",
"=",
"msg",
".",
"msg_id",
",",
"message",
"=",
"msg",
".",
"msg",
",",
"file",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"msg",
".",
"abspath",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
",",
"line",
"=",
"str",
"(",
"msg",
".",
"line",
")",
",",
"SEVERITY",
"=",
"TC_SEVERITY",
".",
"get",
"(",
"msg",
".",
"category",
")",
")"
] | [
68,
4
] | [
81,
63
] | python | en | ['en', 'en', 'en'] | True |
TeamCityReporter.display_reports | (self, layout) | Issues the final PyLint score as a TeamCity build statistic value | Issues the final PyLint score as a TeamCity build statistic value | def display_reports(self, layout):
"""Issues the final PyLint score as a TeamCity build statistic value"""
try:
score = self.linter.stats['global_note']
except (AttributeError, KeyError):
pass
else:
self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score)) | [
"def",
"display_reports",
"(",
"self",
",",
"layout",
")",
":",
"try",
":",
"score",
"=",
"self",
".",
"linter",
".",
"stats",
"[",
"'global_note'",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"pass",
"else",
":",
"self",
".",
"tc",
".",
"message",
"(",
"'buildStatisticValue'",
",",
"key",
"=",
"'PyLintScore'",
",",
"value",
"=",
"str",
"(",
"score",
")",
")"
] | [
83,
4
] | [
90,
87
] | python | en | ['en', 'en', 'en'] | True |
unique_proportion | (_metrics) | Computes the proportion of unique non-null values out of all non-null values | Computes the proportion of unique non-null values out of all non-null values | def unique_proportion(_metrics):
"""Computes the proportion of unique non-null values out of all non-null values"""
total_values = _metrics.get("table.row_count")
unique_values = _metrics.get("column.distinct_values.count")
null_count = _metrics.get("column_values.nonnull.unexpected_count")
# Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)
if total_values > 0 and total_values != null_count:
return unique_values / (total_values - null_count)
else:
return 0 | [
"def",
"unique_proportion",
"(",
"_metrics",
")",
":",
"total_values",
"=",
"_metrics",
".",
"get",
"(",
"\"table.row_count\"",
")",
"unique_values",
"=",
"_metrics",
".",
"get",
"(",
"\"column.distinct_values.count\"",
")",
"null_count",
"=",
"_metrics",
".",
"get",
"(",
"\"column_values.nonnull.unexpected_count\"",
")",
"# Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)",
"if",
"total_values",
">",
"0",
"and",
"total_values",
"!=",
"null_count",
":",
"return",
"unique_values",
"/",
"(",
"total_values",
"-",
"null_count",
")",
"else",
":",
"return",
"0"
] | [
22,
0
] | [
32,
16
] | python | en | ['en', 'en', 'en'] | True |
test_query_store_store_backend_id | (basic_sqlalchemy_query_store) |
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
|
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
| def test_query_store_store_backend_id(basic_sqlalchemy_query_store):
"""
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
"""
# Check that store_backend_id exists can be read
assert basic_sqlalchemy_query_store.store_backend_id is not None
# Check that store_backend_id is a valid UUID
assert test_utils.validate_uuid4(basic_sqlalchemy_query_store.store_backend_id) | [
"def",
"test_query_store_store_backend_id",
"(",
"basic_sqlalchemy_query_store",
")",
":",
"# Check that store_backend_id exists can be read",
"assert",
"basic_sqlalchemy_query_store",
".",
"store_backend_id",
"is",
"not",
"None",
"# Check that store_backend_id is a valid UUID",
"assert",
"test_utils",
".",
"validate_uuid4",
"(",
"basic_sqlalchemy_query_store",
".",
"store_backend_id",
")"
] | [
61,
0
] | [
70,
83
] | python | en | ['en', 'error', 'th'] | False |
MetricParameterBuilder.__init__ | (
self,
parameter_name: str,
metric_name: str,
metric_domain_kwargs: Optional[Union[str, dict]] = None,
metric_value_kwargs: Optional[Union[str, dict]] = None,
enforce_numeric_metric: Optional[Union[str, bool]] = False,
replace_nan_with_zero: Optional[Union[str, bool]] = False,
data_context: Optional[DataContext] = None,
batch_request: Optional[Union[dict, str]] = None,
) |
Args:
parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric)
metric_domain_kwargs: used in MetricConfiguration
metric_value_kwargs: used in MetricConfiguration
enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values
replace_nan_with_zero: if False (default), then if the computed metric gives NaN, then exception is raised;
otherwise, if True, then if the computed metric gives NaN, then it is converted to the 0.0 (float) value.
data_context: DataContext
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
|
Args:
parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric)
metric_domain_kwargs: used in MetricConfiguration
metric_value_kwargs: used in MetricConfiguration
enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values
replace_nan_with_zero: if False (default), then if the computed metric gives NaN, then exception is raised;
otherwise, if True, then if the computed metric gives NaN, then it is converted to the 0.0 (float) value.
data_context: DataContext
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
| def __init__(
self,
parameter_name: str,
metric_name: str,
metric_domain_kwargs: Optional[Union[str, dict]] = None,
metric_value_kwargs: Optional[Union[str, dict]] = None,
enforce_numeric_metric: Optional[Union[str, bool]] = False,
replace_nan_with_zero: Optional[Union[str, bool]] = False,
data_context: Optional[DataContext] = None,
batch_request: Optional[Union[dict, str]] = None,
):
"""
Args:
parameter_name: the name of this parameter -- this is user-specified parameter name (from configuration);
it is not the fully-qualified parameter name; a fully-qualified parameter name must start with "$parameter."
and may contain one or more subsequent parts (e.g., "$parameter.<my_param_from_config>.<metric_name>").
metric_name: the name of a metric used in MetricConfiguration (must be a supported and registered metric)
metric_domain_kwargs: used in MetricConfiguration
metric_value_kwargs: used in MetricConfiguration
enforce_numeric_metric: used in MetricConfiguration to insure that metric computations return numeric values
replace_nan_with_zero: if False (default), then if the computed metric gives NaN, then exception is raised;
otherwise, if True, then if the computed metric gives NaN, then it is converted to the 0.0 (float) value.
data_context: DataContext
batch_request: specified in ParameterBuilder configuration to get Batch objects for parameter computation.
"""
super().__init__(
parameter_name=parameter_name,
data_context=data_context,
batch_request=batch_request,
)
self._metric_name = metric_name
self._metric_domain_kwargs = metric_domain_kwargs
self._metric_value_kwargs = metric_value_kwargs
self._enforce_numeric_metric = enforce_numeric_metric
self._replace_nan_with_zero = replace_nan_with_zero | [
"def",
"__init__",
"(",
"self",
",",
"parameter_name",
":",
"str",
",",
"metric_name",
":",
"str",
",",
"metric_domain_kwargs",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"metric_value_kwargs",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"dict",
"]",
"]",
"=",
"None",
",",
"enforce_numeric_metric",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"bool",
"]",
"]",
"=",
"False",
",",
"replace_nan_with_zero",
":",
"Optional",
"[",
"Union",
"[",
"str",
",",
"bool",
"]",
"]",
"=",
"False",
",",
"data_context",
":",
"Optional",
"[",
"DataContext",
"]",
"=",
"None",
",",
"batch_request",
":",
"Optional",
"[",
"Union",
"[",
"dict",
",",
"str",
"]",
"]",
"=",
"None",
",",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"parameter_name",
"=",
"parameter_name",
",",
"data_context",
"=",
"data_context",
",",
"batch_request",
"=",
"batch_request",
",",
")",
"self",
".",
"_metric_name",
"=",
"metric_name",
"self",
".",
"_metric_domain_kwargs",
"=",
"metric_domain_kwargs",
"self",
".",
"_metric_value_kwargs",
"=",
"metric_value_kwargs",
"self",
".",
"_enforce_numeric_metric",
"=",
"enforce_numeric_metric",
"self",
".",
"_replace_nan_with_zero",
"=",
"replace_nan_with_zero"
] | [
19,
4
] | [
55,
59
] | python | en | ['en', 'error', 'th'] | False |
MetricParameterBuilder._build_parameters | (
self,
parameter_container: ParameterContainer,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) |
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details.
Args:
:return: a ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details
|
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details.
Args:
:return: a ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details
| def _build_parameters(
self,
parameter_container: ParameterContainer,
domain: Domain,
*,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
):
"""
Builds ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details.
Args:
:return: a ParameterContainer object that holds ParameterNode objects with attribute name-value pairs and optional details
"""
validator: Validator = self.get_validator(
domain=domain,
variables=variables,
parameters=parameters,
)
batch_id: str = self.get_batch_id(variables=variables)
metric_computation_result: Dict[
str, Union[Any, Number, Dict[str, Any]]
] = self.get_metric(
batch_id=batch_id,
validator=validator,
metric_name=self._metric_name,
metric_domain_kwargs=self._metric_domain_kwargs,
metric_value_kwargs=self._metric_value_kwargs,
enforce_numeric_metric=self._enforce_numeric_metric,
replace_nan_with_zero=self._replace_nan_with_zero,
domain=domain,
variables=variables,
parameters=parameters,
)
parameter_values: Dict[str, Any] = {
f"$parameter.{self.parameter_name}": metric_computation_result,
}
build_parameter_container(
parameter_container=parameter_container, parameter_values=parameter_values
) | [
"def",
"_build_parameters",
"(",
"self",
",",
"parameter_container",
":",
"ParameterContainer",
",",
"domain",
":",
"Domain",
",",
"*",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
"parameters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"ParameterContainer",
"]",
"]",
"=",
"None",
",",
")",
":",
"validator",
":",
"Validator",
"=",
"self",
".",
"get_validator",
"(",
"domain",
"=",
"domain",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"batch_id",
":",
"str",
"=",
"self",
".",
"get_batch_id",
"(",
"variables",
"=",
"variables",
")",
"metric_computation_result",
":",
"Dict",
"[",
"str",
",",
"Union",
"[",
"Any",
",",
"Number",
",",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
"]",
"=",
"self",
".",
"get_metric",
"(",
"batch_id",
"=",
"batch_id",
",",
"validator",
"=",
"validator",
",",
"metric_name",
"=",
"self",
".",
"_metric_name",
",",
"metric_domain_kwargs",
"=",
"self",
".",
"_metric_domain_kwargs",
",",
"metric_value_kwargs",
"=",
"self",
".",
"_metric_value_kwargs",
",",
"enforce_numeric_metric",
"=",
"self",
".",
"_enforce_numeric_metric",
",",
"replace_nan_with_zero",
"=",
"self",
".",
"_replace_nan_with_zero",
",",
"domain",
"=",
"domain",
",",
"variables",
"=",
"variables",
",",
"parameters",
"=",
"parameters",
",",
")",
"parameter_values",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"f\"$parameter.{self.parameter_name}\"",
":",
"metric_computation_result",
",",
"}",
"build_parameter_container",
"(",
"parameter_container",
"=",
"parameter_container",
",",
"parameter_values",
"=",
"parameter_values",
")"
] | [
57,
4
] | [
99,
9
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnValueZScoresToBeLessThan.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
try:
# Ensuring Z-score Threshold metric has been properly provided
assert (
"threshold" in configuration.kwargs
), "A Z-score threshold must be provided"
assert isinstance(
configuration.kwargs["threshold"], (float, int, dict)
), "Provided threshold must be a number"
if isinstance(configuration.kwargs["threshold"], dict):
assert (
"$PARAMETER" in configuration.kwargs["threshold"]
), 'Evaluation Parameter dict for threshold kwarg must have "$PARAMETER" key.'
assert isinstance(
configuration.kwargs["double_sided"], (bool, dict)
), "Double sided parameter must be a boolean value"
if isinstance(configuration.kwargs["double_sided"], dict):
assert (
"$PARAMETER" in configuration.kwargs["double_sided"]
), 'Evaluation Parameter dict for double_sided kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"if",
"configuration",
"is",
"None",
":",
"configuration",
"=",
"self",
".",
"configuration",
"try",
":",
"# Ensuring Z-score Threshold metric has been properly provided",
"assert",
"(",
"\"threshold\"",
"in",
"configuration",
".",
"kwargs",
")",
",",
"\"A Z-score threshold must be provided\"",
"assert",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"threshold\"",
"]",
",",
"(",
"float",
",",
"int",
",",
"dict",
")",
")",
",",
"\"Provided threshold must be a number\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"threshold\"",
"]",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
"[",
"\"threshold\"",
"]",
")",
",",
"'Evaluation Parameter dict for threshold kwarg must have \"$PARAMETER\" key.'",
"assert",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"double_sided\"",
"]",
",",
"(",
"bool",
",",
"dict",
")",
")",
",",
"\"Double sided parameter must be a boolean value\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"double_sided\"",
"]",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
"[",
"\"double_sided\"",
"]",
")",
",",
"'Evaluation Parameter dict for double_sided kwarg must have \"$PARAMETER\" key.'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
83,
4
] | [
121,
19
] | python | en | ['en', 'error', 'th'] | False |
test_cli_datasource_list | (empty_data_context, empty_sqlite_db, caplog) | Test an empty project and after adding a single datasource. | Test an empty project and after adding a single datasource. | def test_cli_datasource_list(empty_data_context, empty_sqlite_db, caplog):
"""Test an empty project and after adding a single datasource."""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["datasource", "list", "-d", project_root_dir], catch_exceptions=False
)
stdout = result.stdout.strip()
assert "No Datasources found" in stdout
assert context.list_datasources() == []
datasource_name = "wow_a_datasource"
_add_datasource_and_credentials_to_context(
context, datasource_name, empty_sqlite_db
)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli, ["datasource", "list", "-d", project_root_dir], catch_exceptions=False
)
url = str(empty_sqlite_db.engine.url)
expected_output = """\
1 Datasource found:[0m
[0m
- [36mname:[0m wow_a_datasource[0m
[36mmodule_name:[0m great_expectations.datasource[0m
[36mclass_name:[0m SqlAlchemyDatasource[0m
[36mbatch_kwargs_generators:[0m[0m
[36mdefault:[0m[0m
[36mclass_name:[0m TableBatchKwargsGenerator[0m
[36mcredentials:[0m[0m
[36murl:[0m {}[0m
[36mdata_asset_type:[0m[0m
[36mclass_name:[0m SqlAlchemyDataset[0m
[36mmodule_name:[0m None[0m
""".format(
url
).strip()
stdout = result.stdout.strip()
assert stdout == expected_output
assert_no_logging_messages_or_tracebacks(caplog, result) | [
"def",
"test_cli_datasource_list",
"(",
"empty_data_context",
",",
"empty_sqlite_db",
",",
"caplog",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"list\"",
",",
"\"-d\"",
",",
"project_root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
")",
"stdout",
"=",
"result",
".",
"stdout",
".",
"strip",
"(",
")",
"assert",
"\"No Datasources found\"",
"in",
"stdout",
"assert",
"context",
".",
"list_datasources",
"(",
")",
"==",
"[",
"]",
"datasource_name",
"=",
"\"wow_a_datasource\"",
"_add_datasource_and_credentials_to_context",
"(",
"context",
",",
"datasource_name",
",",
"empty_sqlite_db",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"list\"",
",",
"\"-d\"",
",",
"project_root_dir",
"]",
",",
"catch_exceptions",
"=",
"False",
")",
"url",
"=",
"str",
"(",
"empty_sqlite_db",
".",
"engine",
".",
"url",
")",
"expected_output",
"=",
"\"\"\"\\\n1 Datasource found:\u001b[0m\n\u001b[0m\n - \u001b[36mname:\u001b[0m wow_a_datasource\u001b[0m\n \u001b[36mmodule_name:\u001b[0m great_expectations.datasource\u001b[0m\n \u001b[36mclass_name:\u001b[0m SqlAlchemyDatasource\u001b[0m\n \u001b[36mbatch_kwargs_generators:\u001b[0m\u001b[0m\n \u001b[36mdefault:\u001b[0m\u001b[0m\n \u001b[36mclass_name:\u001b[0m TableBatchKwargsGenerator\u001b[0m\n \u001b[36mcredentials:\u001b[0m\u001b[0m\n \u001b[36murl:\u001b[0m {}\u001b[0m\n \u001b[36mdata_asset_type:\u001b[0m\u001b[0m\n \u001b[36mclass_name:\u001b[0m SqlAlchemyDataset\u001b[0m\n \u001b[36mmodule_name:\u001b[0m None\u001b[0m\n\"\"\"",
".",
"format",
"(",
"url",
")",
".",
"strip",
"(",
")",
"stdout",
"=",
"result",
".",
"stdout",
".",
"strip",
"(",
")",
"assert",
"stdout",
"==",
"expected_output",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
")"
] | [
15,
0
] | [
60,
60
] | python | en | ['en', 'en', 'en'] | True |
test_cli_datasource_profile_answering_no | (
empty_data_context, titanic_sqlite_db, caplog
) |
When datasource profile command is called without additional arguments,
the command must prompt the user with a confirm (y/n) before profiling.
We are verifying that it does that and respects user's "no".
|
When datasource profile command is called without additional arguments,
the command must prompt the user with a confirm (y/n) before profiling.
We are verifying that it does that and respects user's "no".
| def test_cli_datasource_profile_answering_no(
empty_data_context, titanic_sqlite_db, caplog
):
"""
When datasource profile command is called without additional arguments,
the command must prompt the user with a confirm (y/n) before profiling.
We are verifying that it does that and respects user's "no".
"""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, titanic_sqlite_db
)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["datasource", "profile", datasource_name, "-d", project_root_dir, "--no-view"],
input="n\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert "Profiling 'wow_a_datasource'" in stdout
assert "Skipping profiling for now." in stdout
assert_no_logging_messages_or_tracebacks(caplog, result) | [
"def",
"test_cli_datasource_profile_answering_no",
"(",
"empty_data_context",
",",
"titanic_sqlite_db",
",",
"caplog",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"datasource_name",
"=",
"\"wow_a_datasource\"",
"context",
"=",
"_add_datasource_and_credentials_to_context",
"(",
"context",
",",
"datasource_name",
",",
"titanic_sqlite_db",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"profile\"",
",",
"datasource_name",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"\"n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Profiling 'wow_a_datasource'\"",
"in",
"stdout",
"assert",
"\"Skipping profiling for now.\"",
"in",
"stdout",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
")"
] | [
195,
0
] | [
223,
60
] | python | en | ['en', 'error', 'th'] | False |
test_cli_datasource_profile_on_empty_database | (
empty_data_context, empty_sqlite_db, caplog
) |
We run the datasource profile command against an empty database (no tables).
This means that no generator can "see" a list of available data assets.
The command must exit with an error message saying that no generator can see
any assets.
|
We run the datasource profile command against an empty database (no tables).
This means that no generator can "see" a list of available data assets.
The command must exit with an error message saying that no generator can see
any assets.
| def test_cli_datasource_profile_on_empty_database(
empty_data_context, empty_sqlite_db, caplog
):
"""
We run the datasource profile command against an empty database (no tables).
This means that no generator can "see" a list of available data assets.
The command must exit with an error message saying that no generator can see
any assets.
"""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, empty_sqlite_db
)
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
["datasource", "profile", datasource_name, "-d", project_root_dir, "--no-view"],
input="n\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 1
assert "Profiling 'wow_a_datasource'" in stdout
assert "No batch kwargs generators can list available data assets" in stdout
assert_no_logging_messages_or_tracebacks(caplog, result) | [
"def",
"test_cli_datasource_profile_on_empty_database",
"(",
"empty_data_context",
",",
"empty_sqlite_db",
",",
"caplog",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"datasource_name",
"=",
"\"wow_a_datasource\"",
"context",
"=",
"_add_datasource_and_credentials_to_context",
"(",
"context",
",",
"datasource_name",
",",
"empty_sqlite_db",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"profile\"",
",",
"datasource_name",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--no-view\"",
"]",
",",
"input",
"=",
"\"n\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"assert",
"\"Profiling 'wow_a_datasource'\"",
"in",
"stdout",
"assert",
"\"No batch kwargs generators can list available data assets\"",
"in",
"stdout",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
")"
] | [
226,
0
] | [
256,
60
] | python | en | ['en', 'error', 'th'] | False |
test_cli_datasource_profile_with_datasource_arg_and_generator_name_arg | (
empty_data_context, titanic_sqlite_db, caplog
) |
Here we are verifying that when generator_name argument is passed to
the methods down the stack.
We use a datasource with two generators. This way we can check that the
name of the expectation suite created by the profiler corresponds to
the name of the data asset listed by the generator that we told the profiler
to use.
The logic of processing this argument is testing in tests/profile.
|
Here we are verifying that when generator_name argument is passed to
the methods down the stack. | def test_cli_datasource_profile_with_datasource_arg_and_generator_name_arg(
empty_data_context, titanic_sqlite_db, caplog
):
"""
Here we are verifying that when generator_name argument is passed to
the methods down the stack.
We use a datasource with two generators. This way we can check that the
name of the expectation suite created by the profiler corresponds to
the name of the data asset listed by the generator that we told the profiler
to use.
The logic of processing this argument is testing in tests/profile.
"""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource__with_two_generators_and_credentials_to_context(
context, datasource_name, titanic_sqlite_db
)
second_generator_name = "second_generator"
runner = CliRunner()
result = runner.invoke(
cli,
[
"datasource",
"profile",
datasource_name,
"--batch-kwargs-generator-name",
second_generator_name,
"-d",
project_root_dir,
"--no-view",
],
input="Y\n",
)
stdout = result.stdout
assert result.exit_code == 0
assert "Profiling '{}'".format(datasource_name) in stdout
context = DataContext(project_root_dir)
assert len(context.list_datasources()) == 1
expectations_store = context.stores["expectations_store"]
suites = expectations_store.list_keys()
assert len(suites) == 1
assert (
suites[0].expectation_suite_name
== "wow_a_datasource.second_generator.asset_one.BasicDatasetProfiler"
)
assert "Preparing column 1 of 7" in caplog.messages[0]
assert len(caplog.messages) == 10
assert_no_tracebacks(result) | [
"def",
"test_cli_datasource_profile_with_datasource_arg_and_generator_name_arg",
"(",
"empty_data_context",
",",
"titanic_sqlite_db",
",",
"caplog",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"datasource_name",
"=",
"\"wow_a_datasource\"",
"context",
"=",
"_add_datasource__with_two_generators_and_credentials_to_context",
"(",
"context",
",",
"datasource_name",
",",
"titanic_sqlite_db",
")",
"second_generator_name",
"=",
"\"second_generator\"",
"runner",
"=",
"CliRunner",
"(",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"profile\"",
",",
"datasource_name",
",",
"\"--batch-kwargs-generator-name\"",
",",
"second_generator_name",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--no-view\"",
",",
"]",
",",
"input",
"=",
"\"Y\\n\"",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Profiling '{}'\"",
".",
"format",
"(",
"datasource_name",
")",
"in",
"stdout",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"assert",
"len",
"(",
"context",
".",
"list_datasources",
"(",
")",
")",
"==",
"1",
"expectations_store",
"=",
"context",
".",
"stores",
"[",
"\"expectations_store\"",
"]",
"suites",
"=",
"expectations_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"suites",
")",
"==",
"1",
"assert",
"(",
"suites",
"[",
"0",
"]",
".",
"expectation_suite_name",
"==",
"\"wow_a_datasource.second_generator.asset_one.BasicDatasetProfiler\"",
")",
"assert",
"\"Preparing column 1 of 7\"",
"in",
"caplog",
".",
"messages",
"[",
"0",
"]",
"assert",
"len",
"(",
"caplog",
".",
"messages",
")",
"==",
"10",
"assert_no_tracebacks",
"(",
"result",
")"
] | [
316,
0
] | [
372,
32
] | python | en | ['en', 'error', 'th'] | False |
test_cli_datasource_profile_with_data_asset_and_additional_batch_kwargs_with_limit | (
empty_data_context, titanic_sqlite_db, caplog
) |
User can pass additional batch kwargs (e.g., limit) to a sql backend.
Here we are verifying that passing "limit" affects the query correctly -
the row count in the batch that the profiler uses to profile the data asset
must match the limit passed by the user.
|
User can pass additional batch kwargs (e.g., limit) to a sql backend.
Here we are verifying that passing "limit" affects the query correctly -
the row count in the batch that the profiler uses to profile the data asset
must match the limit passed by the user.
| def test_cli_datasource_profile_with_data_asset_and_additional_batch_kwargs_with_limit(
empty_data_context, titanic_sqlite_db, caplog
):
"""
User can pass additional batch kwargs (e.g., limit) to a sql backend.
Here we are verifying that passing "limit" affects the query correctly -
the row count in the batch that the profiler uses to profile the data asset
must match the limit passed by the user.
"""
project_root_dir = empty_data_context.root_directory
context = DataContext(project_root_dir)
datasource_name = "wow_a_datasource"
context = _add_datasource_and_credentials_to_context(
context, datasource_name, titanic_sqlite_db
)
res = context.get_available_data_asset_names("wow_a_datasource")
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
cli,
[
"datasource",
"profile",
"-d",
project_root_dir,
"--data-assets",
"main.titanic",
"--additional-batch-kwargs",
'{"limit": 97}',
"--no-view",
],
input="Y\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert "Profiling '{}'".format(datasource_name) in stdout
assert "The following Data Docs sites will be built:\n" in stdout
assert "local_site:" in stdout
context = DataContext(project_root_dir)
assert len(context.list_datasources()) == 1
expectations_store = context.stores["expectations_store"]
suites = expectations_store.list_keys()
assert len(suites) == 1
assert (
suites[0].expectation_suite_name
== "wow_a_datasource.default.main.titanic.BasicDatasetProfiler"
)
validations_store = context.stores["validations_store"]
validation_keys = validations_store.list_keys()
assert len(validation_keys) == 1
validation = validations_store.get(validation_keys[0])
assert (
validation.meta["expectation_suite_name"]
== "wow_a_datasource.default.main.titanic.BasicDatasetProfiler"
)
assert validation.success is False
row_count_validation_results = [
validation_result
for validation_result in validation.results
if validation_result.expectation_config.expectation_type
== "expect_table_row_count_to_be_between"
]
assert len(row_count_validation_results) == 1
assert row_count_validation_results[0].result["observed_value"] == 97
assert "Preparing column 1 of 7" in caplog.messages[0]
assert len(caplog.messages) == 10
assert_no_tracebacks(result) | [
"def",
"test_cli_datasource_profile_with_data_asset_and_additional_batch_kwargs_with_limit",
"(",
"empty_data_context",
",",
"titanic_sqlite_db",
",",
"caplog",
")",
":",
"project_root_dir",
"=",
"empty_data_context",
".",
"root_directory",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"datasource_name",
"=",
"\"wow_a_datasource\"",
"context",
"=",
"_add_datasource_and_credentials_to_context",
"(",
"context",
",",
"datasource_name",
",",
"titanic_sqlite_db",
")",
"res",
"=",
"context",
".",
"get_available_data_asset_names",
"(",
"\"wow_a_datasource\"",
")",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"datasource\"",
",",
"\"profile\"",
",",
"\"-d\"",
",",
"project_root_dir",
",",
"\"--data-assets\"",
",",
"\"main.titanic\"",
",",
"\"--additional-batch-kwargs\"",
",",
"'{\"limit\": 97}'",
",",
"\"--no-view\"",
",",
"]",
",",
"input",
"=",
"\"Y\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"\"Profiling '{}'\"",
".",
"format",
"(",
"datasource_name",
")",
"in",
"stdout",
"assert",
"\"The following Data Docs sites will be built:\\n\"",
"in",
"stdout",
"assert",
"\"local_site:\"",
"in",
"stdout",
"context",
"=",
"DataContext",
"(",
"project_root_dir",
")",
"assert",
"len",
"(",
"context",
".",
"list_datasources",
"(",
")",
")",
"==",
"1",
"expectations_store",
"=",
"context",
".",
"stores",
"[",
"\"expectations_store\"",
"]",
"suites",
"=",
"expectations_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"suites",
")",
"==",
"1",
"assert",
"(",
"suites",
"[",
"0",
"]",
".",
"expectation_suite_name",
"==",
"\"wow_a_datasource.default.main.titanic.BasicDatasetProfiler\"",
")",
"validations_store",
"=",
"context",
".",
"stores",
"[",
"\"validations_store\"",
"]",
"validation_keys",
"=",
"validations_store",
".",
"list_keys",
"(",
")",
"assert",
"len",
"(",
"validation_keys",
")",
"==",
"1",
"validation",
"=",
"validations_store",
".",
"get",
"(",
"validation_keys",
"[",
"0",
"]",
")",
"assert",
"(",
"validation",
".",
"meta",
"[",
"\"expectation_suite_name\"",
"]",
"==",
"\"wow_a_datasource.default.main.titanic.BasicDatasetProfiler\"",
")",
"assert",
"validation",
".",
"success",
"is",
"False",
"row_count_validation_results",
"=",
"[",
"validation_result",
"for",
"validation_result",
"in",
"validation",
".",
"results",
"if",
"validation_result",
".",
"expectation_config",
".",
"expectation_type",
"==",
"\"expect_table_row_count_to_be_between\"",
"]",
"assert",
"len",
"(",
"row_count_validation_results",
")",
"==",
"1",
"assert",
"row_count_validation_results",
"[",
"0",
"]",
".",
"result",
"[",
"\"observed_value\"",
"]",
"==",
"97",
"assert",
"\"Preparing column 1 of 7\"",
"in",
"caplog",
".",
"messages",
"[",
"0",
"]",
"assert",
"len",
"(",
"caplog",
".",
"messages",
")",
"==",
"10",
"assert_no_tracebacks",
"(",
"result",
")"
] | [
426,
0
] | [
499,
32
] | python | en | ['en', 'error', 'th'] | False |
ExpectColumnValuesToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
min_val = None
max_val = None
if "min_value" in configuration.kwargs:
min_val = configuration.kwargs["min_value"]
if "max_value" in configuration.kwargs:
max_val = configuration.kwargs["max_value"]
assert (
min_val is not None or max_val is not None
), "min_value and max_value cannot both be None"
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"min_val",
"=",
"None",
"max_val",
"=",
"None",
"if",
"\"min_value\"",
"in",
"configuration",
".",
"kwargs",
":",
"min_val",
"=",
"configuration",
".",
"kwargs",
"[",
"\"min_value\"",
"]",
"if",
"\"max_value\"",
"in",
"configuration",
".",
"kwargs",
":",
"max_val",
"=",
"configuration",
".",
"kwargs",
"[",
"\"max_value\"",
"]",
"assert",
"(",
"min_val",
"is",
"not",
"None",
"or",
"max_val",
"is",
"not",
"None",
")",
",",
"\"min_value and max_value cannot both be None\"",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
113,
4
] | [
138,
85
] | python | en | ['en', 'error', 'th'] | False |
_add_chrome_proxy_extension | (
chrome_options, proxy_string, proxy_user, proxy_pass) | Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) | Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) | def _add_chrome_proxy_extension(
chrome_options, proxy_string, proxy_user, proxy_pass):
""" Implementation of https://stackoverflow.com/a/35293284 for
https://stackoverflow.com/questions/12848327/
(Run Selenium on a proxy server that requires authentication.) """
arg_join = " ".join(sys.argv)
if not ("-n" in sys.argv or "-n=" in arg_join or arg_join == "-c"):
# Single-threaded
proxy_helper.create_proxy_zip(proxy_string, proxy_user, proxy_pass)
else:
# Pytest multi-threaded test
import threading
lock = threading.Lock()
with lock:
time.sleep(random.uniform(0.02, 0.15))
if not os.path.exists(PROXY_ZIP_PATH):
proxy_helper.create_proxy_zip(
proxy_string, proxy_user, proxy_pass)
time.sleep(random.uniform(0.1, 0.2))
proxy_zip = PROXY_ZIP_PATH
if not os.path.exists(PROXY_ZIP_PATH):
# Handle "Permission denied" on the default proxy.zip path
proxy_zip = PROXY_ZIP_PATH_2
chrome_options.add_extension(proxy_zip)
return chrome_options | [
"def",
"_add_chrome_proxy_extension",
"(",
"chrome_options",
",",
"proxy_string",
",",
"proxy_user",
",",
"proxy_pass",
")",
":",
"arg_join",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"not",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
"or",
"\"-n=\"",
"in",
"arg_join",
"or",
"arg_join",
"==",
"\"-c\"",
")",
":",
"# Single-threaded",
"proxy_helper",
".",
"create_proxy_zip",
"(",
"proxy_string",
",",
"proxy_user",
",",
"proxy_pass",
")",
"else",
":",
"# Pytest multi-threaded test",
"import",
"threading",
"lock",
"=",
"threading",
".",
"Lock",
"(",
")",
"with",
"lock",
":",
"time",
".",
"sleep",
"(",
"random",
".",
"uniform",
"(",
"0.02",
",",
"0.15",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"PROXY_ZIP_PATH",
")",
":",
"proxy_helper",
".",
"create_proxy_zip",
"(",
"proxy_string",
",",
"proxy_user",
",",
"proxy_pass",
")",
"time",
".",
"sleep",
"(",
"random",
".",
"uniform",
"(",
"0.1",
",",
"0.2",
")",
")",
"proxy_zip",
"=",
"PROXY_ZIP_PATH",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"PROXY_ZIP_PATH",
")",
":",
"# Handle \"Permission denied\" on the default proxy.zip path",
"proxy_zip",
"=",
"PROXY_ZIP_PATH_2",
"chrome_options",
".",
"add_extension",
"(",
"proxy_zip",
")",
"return",
"chrome_options"
] | [
95,
0
] | [
119,
25
] | python | en | ['en', 'en', 'en'] | True |
_add_chrome_disable_csp_extension | (chrome_options) | Disable Chrome's Content-Security-Policy with a browser extension.
See https://github.com/PhilGrayson/chrome-csp-disable for details. | Disable Chrome's Content-Security-Policy with a browser extension.
See https://github.com/PhilGrayson/chrome-csp-disable for details. | def _add_chrome_disable_csp_extension(chrome_options):
""" Disable Chrome's Content-Security-Policy with a browser extension.
See https://github.com/PhilGrayson/chrome-csp-disable for details. """
disable_csp_zip = DISABLE_CSP_ZIP_PATH
chrome_options.add_extension(disable_csp_zip)
return chrome_options | [
"def",
"_add_chrome_disable_csp_extension",
"(",
"chrome_options",
")",
":",
"disable_csp_zip",
"=",
"DISABLE_CSP_ZIP_PATH",
"chrome_options",
".",
"add_extension",
"(",
"disable_csp_zip",
")",
"return",
"chrome_options"
] | [
122,
0
] | [
127,
25
] | python | en | ['en', 'en', 'en'] | True |
get_local_driver | (
browser_name, headless, servername,
proxy_string, proxy_auth, proxy_user, proxy_pass, user_agent,
disable_csp, enable_sync, use_auto_ext, no_sandbox, disable_gpu,
incognito, guest_mode, devtools,
user_data_dir, extension_zip, extension_dir,
mobile_emulator, device_width, device_height, device_pixel_ratio) |
Spins up a new web browser and returns the driver.
Can also be used to spin up additional browsers for the same test.
|
Spins up a new web browser and returns the driver.
Can also be used to spin up additional browsers for the same test.
| def get_local_driver(
browser_name, headless, servername,
proxy_string, proxy_auth, proxy_user, proxy_pass, user_agent,
disable_csp, enable_sync, use_auto_ext, no_sandbox, disable_gpu,
incognito, guest_mode, devtools,
user_data_dir, extension_zip, extension_dir,
mobile_emulator, device_width, device_height, device_pixel_ratio):
'''
Spins up a new web browser and returns the driver.
Can also be used to spin up additional browsers for the same test.
'''
downloads_path = download_helper.get_downloads_folder()
download_helper.reset_downloads_folder()
if browser_name == constants.Browser.FIREFOX:
try:
try:
# Use Geckodriver for Firefox if it's on the PATH
profile = _create_firefox_profile(
downloads_path, proxy_string, user_agent, disable_csp)
firefox_capabilities = DesiredCapabilities.FIREFOX.copy()
firefox_capabilities['marionette'] = True
options = webdriver.FirefoxOptions()
if headless:
options.add_argument('-headless')
firefox_capabilities['moz:firefoxOptions'] = (
{'args': ['-headless']})
if LOCAL_GECKODRIVER and os.path.exists(LOCAL_GECKODRIVER):
try:
make_driver_executable_if_not(LOCAL_GECKODRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make geckodriver"
" executable: %s" % e)
elif not is_geckodriver_on_path():
args = " ".join(sys.argv)
if not ("-n" in sys.argv or "-n=" in args or args == "-c"):
# (Not multithreaded)
from seleniumbase.console_scripts import sb_install
sys_args = sys.argv # Save a copy of current sys args
print("\nWarning: geckodriver not found!"
" Installing now:")
try:
sb_install.main(override="geckodriver")
except Exception as e:
print("\nWarning: Could not install geckodriver: "
"%s" % e)
sys.argv = sys_args # Put back the original sys args
if "linux" in PLATFORM or not headless:
firefox_driver = webdriver.Firefox(
firefox_profile=profile,
capabilities=firefox_capabilities)
else:
firefox_driver = webdriver.Firefox(
firefox_profile=profile,
capabilities=firefox_capabilities,
options=options)
except Exception:
profile = _create_firefox_profile(
downloads_path, proxy_string, user_agent, disable_csp)
firefox_capabilities = DesiredCapabilities.FIREFOX.copy()
firefox_driver = webdriver.Firefox(
firefox_profile=profile,
capabilities=firefox_capabilities)
return firefox_driver
except Exception as e:
if headless:
raise Exception(e)
return webdriver.Firefox()
elif browser_name == constants.Browser.INTERNET_EXPLORER:
if not IS_WINDOWS:
raise Exception(
"IE Browser is for Windows-based operating systems only!")
from selenium.webdriver.ie.options import Options
ie_options = Options()
ie_options.ignore_protected_mode_settings = False
ie_options.ignore_zoom_level = True
ie_options.require_window_focus = False
ie_options.native_events = True
ie_options.full_page_screenshot = True
ie_options.persistent_hover = True
ie_capabilities = ie_options.to_capabilities()
if LOCAL_IEDRIVER and os.path.exists(LOCAL_IEDRIVER):
try:
make_driver_executable_if_not(LOCAL_IEDRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make iedriver"
" executable: %s" % e)
return webdriver.Ie(capabilities=ie_capabilities)
elif browser_name == constants.Browser.EDGE:
try:
chrome_options = _set_chrome_options(
downloads_path, headless,
proxy_string, proxy_auth, proxy_user, proxy_pass, user_agent,
disable_csp, enable_sync, use_auto_ext,
no_sandbox, disable_gpu, incognito, guest_mode, devtools,
user_data_dir, extension_zip, extension_dir, servername,
mobile_emulator, device_width, device_height,
device_pixel_ratio)
if LOCAL_EDGEDRIVER and os.path.exists(LOCAL_EDGEDRIVER):
try:
make_driver_executable_if_not(LOCAL_EDGEDRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make edgedriver"
" executable: %s" % e)
elif not is_edgedriver_on_path():
args = " ".join(sys.argv)
if not ("-n" in sys.argv or "-n=" in args or args == "-c"):
# (Not multithreaded)
from seleniumbase.console_scripts import sb_install
sys_args = sys.argv # Save a copy of current sys args
print("\nWarning: msedgedriver not found. Installing now:")
sb_install.main(override="edgedriver")
sys.argv = sys_args # Put back the original sys args
return webdriver.Chrome(executable_path=LOCAL_EDGEDRIVER,
options=chrome_options)
except Exception as e:
if headless:
raise Exception(e)
if LOCAL_EDGEDRIVER and os.path.exists(LOCAL_EDGEDRIVER):
try:
make_driver_executable_if_not(LOCAL_EDGEDRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make edgedriver"
" executable: %s" % e)
return webdriver.Chrome(executable_path=LOCAL_EDGEDRIVER)
elif browser_name == constants.Browser.SAFARI:
arg_join = " ".join(sys.argv)
if ("-n" in sys.argv) or ("-n=" in arg_join) or (arg_join == "-c"):
# Skip if multithreaded
raise Exception("Can't run Safari tests in multi-threaded mode!")
safari_capabilities = _set_safari_capabilities()
return webdriver.Safari(desired_capabilities=safari_capabilities)
elif browser_name == constants.Browser.OPERA:
if LOCAL_OPERADRIVER and os.path.exists(LOCAL_OPERADRIVER):
try:
make_driver_executable_if_not(LOCAL_OPERADRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make operadriver"
" executable: %s" % e)
return webdriver.Opera()
elif browser_name == constants.Browser.PHANTOM_JS:
with warnings.catch_warnings():
# Ignore "PhantomJS has been deprecated" UserWarning
warnings.simplefilter("ignore", category=UserWarning)
return webdriver.PhantomJS()
elif browser_name == constants.Browser.GOOGLE_CHROME:
try:
chrome_options = _set_chrome_options(
downloads_path, headless,
proxy_string, proxy_auth, proxy_user, proxy_pass, user_agent,
disable_csp, enable_sync, use_auto_ext,
no_sandbox, disable_gpu, incognito, guest_mode, devtools,
user_data_dir, extension_zip, extension_dir, servername,
mobile_emulator, device_width, device_height,
device_pixel_ratio)
if LOCAL_CHROMEDRIVER and os.path.exists(LOCAL_CHROMEDRIVER):
try:
make_driver_executable_if_not(LOCAL_CHROMEDRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make chromedriver"
" executable: %s" % e)
elif not is_chromedriver_on_path():
args = " ".join(sys.argv)
if not ("-n" in sys.argv or "-n=" in args or args == "-c"):
# (Not multithreaded)
from seleniumbase.console_scripts import sb_install
sys_args = sys.argv # Save a copy of current sys args
print("\nWarning: chromedriver not found. Installing now:")
sb_install.main(override="chromedriver")
sys.argv = sys_args # Put back the original sys args
if not headless or "linux" not in PLATFORM:
return webdriver.Chrome(options=chrome_options)
else: # Running headless on Linux
try:
return webdriver.Chrome(options=chrome_options)
except Exception:
# Use the virtual display on Linux during headless errors
logging.debug("\nWarning: Chrome failed to launch in"
" headless mode. Attempting to use the"
" SeleniumBase virtual display on Linux...")
chrome_options.headless = False
return webdriver.Chrome(options=chrome_options)
except Exception as e:
if headless:
raise Exception(e)
if LOCAL_CHROMEDRIVER and os.path.exists(LOCAL_CHROMEDRIVER):
try:
make_driver_executable_if_not(LOCAL_CHROMEDRIVER)
except Exception as e:
logging.debug("\nWarning: Could not make chromedriver"
" executable: %s" % e)
return webdriver.Chrome()
else:
raise Exception(
"%s is not a valid browser option for this system!" % browser_name) | [
"def",
"get_local_driver",
"(",
"browser_name",
",",
"headless",
",",
"servername",
",",
"proxy_string",
",",
"proxy_auth",
",",
"proxy_user",
",",
"proxy_pass",
",",
"user_agent",
",",
"disable_csp",
",",
"enable_sync",
",",
"use_auto_ext",
",",
"no_sandbox",
",",
"disable_gpu",
",",
"incognito",
",",
"guest_mode",
",",
"devtools",
",",
"user_data_dir",
",",
"extension_zip",
",",
"extension_dir",
",",
"mobile_emulator",
",",
"device_width",
",",
"device_height",
",",
"device_pixel_ratio",
")",
":",
"downloads_path",
"=",
"download_helper",
".",
"get_downloads_folder",
"(",
")",
"download_helper",
".",
"reset_downloads_folder",
"(",
")",
"if",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"FIREFOX",
":",
"try",
":",
"try",
":",
"# Use Geckodriver for Firefox if it's on the PATH",
"profile",
"=",
"_create_firefox_profile",
"(",
"downloads_path",
",",
"proxy_string",
",",
"user_agent",
",",
"disable_csp",
")",
"firefox_capabilities",
"=",
"DesiredCapabilities",
".",
"FIREFOX",
".",
"copy",
"(",
")",
"firefox_capabilities",
"[",
"'marionette'",
"]",
"=",
"True",
"options",
"=",
"webdriver",
".",
"FirefoxOptions",
"(",
")",
"if",
"headless",
":",
"options",
".",
"add_argument",
"(",
"'-headless'",
")",
"firefox_capabilities",
"[",
"'moz:firefoxOptions'",
"]",
"=",
"(",
"{",
"'args'",
":",
"[",
"'-headless'",
"]",
"}",
")",
"if",
"LOCAL_GECKODRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_GECKODRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_GECKODRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make geckodriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"elif",
"not",
"is_geckodriver_on_path",
"(",
")",
":",
"args",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"not",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
"or",
"\"-n=\"",
"in",
"args",
"or",
"args",
"==",
"\"-c\"",
")",
":",
"# (Not multithreaded)",
"from",
"seleniumbase",
".",
"console_scripts",
"import",
"sb_install",
"sys_args",
"=",
"sys",
".",
"argv",
"# Save a copy of current sys args",
"print",
"(",
"\"\\nWarning: geckodriver not found!\"",
"\" Installing now:\"",
")",
"try",
":",
"sb_install",
".",
"main",
"(",
"override",
"=",
"\"geckodriver\"",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"\\nWarning: Could not install geckodriver: \"",
"\"%s\"",
"%",
"e",
")",
"sys",
".",
"argv",
"=",
"sys_args",
"# Put back the original sys args",
"if",
"\"linux\"",
"in",
"PLATFORM",
"or",
"not",
"headless",
":",
"firefox_driver",
"=",
"webdriver",
".",
"Firefox",
"(",
"firefox_profile",
"=",
"profile",
",",
"capabilities",
"=",
"firefox_capabilities",
")",
"else",
":",
"firefox_driver",
"=",
"webdriver",
".",
"Firefox",
"(",
"firefox_profile",
"=",
"profile",
",",
"capabilities",
"=",
"firefox_capabilities",
",",
"options",
"=",
"options",
")",
"except",
"Exception",
":",
"profile",
"=",
"_create_firefox_profile",
"(",
"downloads_path",
",",
"proxy_string",
",",
"user_agent",
",",
"disable_csp",
")",
"firefox_capabilities",
"=",
"DesiredCapabilities",
".",
"FIREFOX",
".",
"copy",
"(",
")",
"firefox_driver",
"=",
"webdriver",
".",
"Firefox",
"(",
"firefox_profile",
"=",
"profile",
",",
"capabilities",
"=",
"firefox_capabilities",
")",
"return",
"firefox_driver",
"except",
"Exception",
"as",
"e",
":",
"if",
"headless",
":",
"raise",
"Exception",
"(",
"e",
")",
"return",
"webdriver",
".",
"Firefox",
"(",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"INTERNET_EXPLORER",
":",
"if",
"not",
"IS_WINDOWS",
":",
"raise",
"Exception",
"(",
"\"IE Browser is for Windows-based operating systems only!\"",
")",
"from",
"selenium",
".",
"webdriver",
".",
"ie",
".",
"options",
"import",
"Options",
"ie_options",
"=",
"Options",
"(",
")",
"ie_options",
".",
"ignore_protected_mode_settings",
"=",
"False",
"ie_options",
".",
"ignore_zoom_level",
"=",
"True",
"ie_options",
".",
"require_window_focus",
"=",
"False",
"ie_options",
".",
"native_events",
"=",
"True",
"ie_options",
".",
"full_page_screenshot",
"=",
"True",
"ie_options",
".",
"persistent_hover",
"=",
"True",
"ie_capabilities",
"=",
"ie_options",
".",
"to_capabilities",
"(",
")",
"if",
"LOCAL_IEDRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_IEDRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_IEDRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make iedriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"return",
"webdriver",
".",
"Ie",
"(",
"capabilities",
"=",
"ie_capabilities",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"EDGE",
":",
"try",
":",
"chrome_options",
"=",
"_set_chrome_options",
"(",
"downloads_path",
",",
"headless",
",",
"proxy_string",
",",
"proxy_auth",
",",
"proxy_user",
",",
"proxy_pass",
",",
"user_agent",
",",
"disable_csp",
",",
"enable_sync",
",",
"use_auto_ext",
",",
"no_sandbox",
",",
"disable_gpu",
",",
"incognito",
",",
"guest_mode",
",",
"devtools",
",",
"user_data_dir",
",",
"extension_zip",
",",
"extension_dir",
",",
"servername",
",",
"mobile_emulator",
",",
"device_width",
",",
"device_height",
",",
"device_pixel_ratio",
")",
"if",
"LOCAL_EDGEDRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_EDGEDRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_EDGEDRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make edgedriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"elif",
"not",
"is_edgedriver_on_path",
"(",
")",
":",
"args",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"not",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
"or",
"\"-n=\"",
"in",
"args",
"or",
"args",
"==",
"\"-c\"",
")",
":",
"# (Not multithreaded)",
"from",
"seleniumbase",
".",
"console_scripts",
"import",
"sb_install",
"sys_args",
"=",
"sys",
".",
"argv",
"# Save a copy of current sys args",
"print",
"(",
"\"\\nWarning: msedgedriver not found. Installing now:\"",
")",
"sb_install",
".",
"main",
"(",
"override",
"=",
"\"edgedriver\"",
")",
"sys",
".",
"argv",
"=",
"sys_args",
"# Put back the original sys args",
"return",
"webdriver",
".",
"Chrome",
"(",
"executable_path",
"=",
"LOCAL_EDGEDRIVER",
",",
"options",
"=",
"chrome_options",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"headless",
":",
"raise",
"Exception",
"(",
"e",
")",
"if",
"LOCAL_EDGEDRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_EDGEDRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_EDGEDRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make edgedriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"return",
"webdriver",
".",
"Chrome",
"(",
"executable_path",
"=",
"LOCAL_EDGEDRIVER",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"SAFARI",
":",
"arg_join",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
")",
"or",
"(",
"\"-n=\"",
"in",
"arg_join",
")",
"or",
"(",
"arg_join",
"==",
"\"-c\"",
")",
":",
"# Skip if multithreaded",
"raise",
"Exception",
"(",
"\"Can't run Safari tests in multi-threaded mode!\"",
")",
"safari_capabilities",
"=",
"_set_safari_capabilities",
"(",
")",
"return",
"webdriver",
".",
"Safari",
"(",
"desired_capabilities",
"=",
"safari_capabilities",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"OPERA",
":",
"if",
"LOCAL_OPERADRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_OPERADRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_OPERADRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make operadriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"return",
"webdriver",
".",
"Opera",
"(",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"PHANTOM_JS",
":",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Ignore \"PhantomJS has been deprecated\" UserWarning",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"UserWarning",
")",
"return",
"webdriver",
".",
"PhantomJS",
"(",
")",
"elif",
"browser_name",
"==",
"constants",
".",
"Browser",
".",
"GOOGLE_CHROME",
":",
"try",
":",
"chrome_options",
"=",
"_set_chrome_options",
"(",
"downloads_path",
",",
"headless",
",",
"proxy_string",
",",
"proxy_auth",
",",
"proxy_user",
",",
"proxy_pass",
",",
"user_agent",
",",
"disable_csp",
",",
"enable_sync",
",",
"use_auto_ext",
",",
"no_sandbox",
",",
"disable_gpu",
",",
"incognito",
",",
"guest_mode",
",",
"devtools",
",",
"user_data_dir",
",",
"extension_zip",
",",
"extension_dir",
",",
"servername",
",",
"mobile_emulator",
",",
"device_width",
",",
"device_height",
",",
"device_pixel_ratio",
")",
"if",
"LOCAL_CHROMEDRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_CHROMEDRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_CHROMEDRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make chromedriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"elif",
"not",
"is_chromedriver_on_path",
"(",
")",
":",
"args",
"=",
"\" \"",
".",
"join",
"(",
"sys",
".",
"argv",
")",
"if",
"not",
"(",
"\"-n\"",
"in",
"sys",
".",
"argv",
"or",
"\"-n=\"",
"in",
"args",
"or",
"args",
"==",
"\"-c\"",
")",
":",
"# (Not multithreaded)",
"from",
"seleniumbase",
".",
"console_scripts",
"import",
"sb_install",
"sys_args",
"=",
"sys",
".",
"argv",
"# Save a copy of current sys args",
"print",
"(",
"\"\\nWarning: chromedriver not found. Installing now:\"",
")",
"sb_install",
".",
"main",
"(",
"override",
"=",
"\"chromedriver\"",
")",
"sys",
".",
"argv",
"=",
"sys_args",
"# Put back the original sys args",
"if",
"not",
"headless",
"or",
"\"linux\"",
"not",
"in",
"PLATFORM",
":",
"return",
"webdriver",
".",
"Chrome",
"(",
"options",
"=",
"chrome_options",
")",
"else",
":",
"# Running headless on Linux",
"try",
":",
"return",
"webdriver",
".",
"Chrome",
"(",
"options",
"=",
"chrome_options",
")",
"except",
"Exception",
":",
"# Use the virtual display on Linux during headless errors",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Chrome failed to launch in\"",
"\" headless mode. Attempting to use the\"",
"\" SeleniumBase virtual display on Linux...\"",
")",
"chrome_options",
".",
"headless",
"=",
"False",
"return",
"webdriver",
".",
"Chrome",
"(",
"options",
"=",
"chrome_options",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"headless",
":",
"raise",
"Exception",
"(",
"e",
")",
"if",
"LOCAL_CHROMEDRIVER",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"LOCAL_CHROMEDRIVER",
")",
":",
"try",
":",
"make_driver_executable_if_not",
"(",
"LOCAL_CHROMEDRIVER",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"debug",
"(",
"\"\\nWarning: Could not make chromedriver\"",
"\" executable: %s\"",
"%",
"e",
")",
"return",
"webdriver",
".",
"Chrome",
"(",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"%s is not a valid browser option for this system!\"",
"%",
"browser_name",
")"
] | [
565,
0
] | [
759,
79
] | python | en | ['en', 'error', 'th'] | False |
Document.__init__ | (self, text: str,
id: Optional[str] = None,
score: Optional[float] = None,
probability: Optional[float] = None,
question: Optional[str] = None,
meta: Dict[str, Any] = None,
embedding: Optional[np.ndarray] = None) |
Object used to represent documents / passages in a standardized way within Haystack.
For example, this is what the retriever will return from the DocumentStore,
regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore.
Note that there can be multiple Documents originating from one file (e.g. PDF),
if you split the text into smaller passages. We'll have one Document per passage in this case.
:param id: ID used within the DocumentStore
:param text: Text of the document
:param score: Retriever's query score for a retrieved document
:param probability: a pseudo probability by scaling score in the range 0 to 1
:param question: Question text for FAQs.
:param meta: Meta fields for a document like name, url, or author.
:param embedding: Vector encoding of the text
|
Object used to represent documents / passages in a standardized way within Haystack.
For example, this is what the retriever will return from the DocumentStore,
regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore. | def __init__(self, text: str,
id: Optional[str] = None,
score: Optional[float] = None,
probability: Optional[float] = None,
question: Optional[str] = None,
meta: Dict[str, Any] = None,
embedding: Optional[np.ndarray] = None):
"""
Object used to represent documents / passages in a standardized way within Haystack.
For example, this is what the retriever will return from the DocumentStore,
regardless if it's ElasticsearchDocumentStore or InMemoryDocumentStore.
Note that there can be multiple Documents originating from one file (e.g. PDF),
if you split the text into smaller passages. We'll have one Document per passage in this case.
:param id: ID used within the DocumentStore
:param text: Text of the document
:param score: Retriever's query score for a retrieved document
:param probability: a pseudo probability by scaling score in the range 0 to 1
:param question: Question text for FAQs.
:param meta: Meta fields for a document like name, url, or author.
:param embedding: Vector encoding of the text
"""
self.text = text
# Create a unique ID (either new one, or one from user input)
if id:
self.id = str(id)
else:
self.id = str(uuid4())
self.score = score
self.probability = probability
self.question = question
self.meta = meta or {}
self.embedding = embedding | [
"def",
"__init__",
"(",
"self",
",",
"text",
":",
"str",
",",
"id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"score",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"probability",
":",
"Optional",
"[",
"float",
"]",
"=",
"None",
",",
"question",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"meta",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"None",
",",
"embedding",
":",
"Optional",
"[",
"np",
".",
"ndarray",
"]",
"=",
"None",
")",
":",
"self",
".",
"text",
"=",
"text",
"# Create a unique ID (either new one, or one from user input)",
"if",
"id",
":",
"self",
".",
"id",
"=",
"str",
"(",
"id",
")",
"else",
":",
"self",
".",
"id",
"=",
"str",
"(",
"uuid4",
"(",
")",
")",
"self",
".",
"score",
"=",
"score",
"self",
".",
"probability",
"=",
"probability",
"self",
".",
"question",
"=",
"question",
"self",
".",
"meta",
"=",
"meta",
"or",
"{",
"}",
"self",
".",
"embedding",
"=",
"embedding"
] | [
6,
4
] | [
41,
34
] | python | en | ['en', 'error', 'th'] | False |
Label.__init__ | (self, question: str,
answer: str,
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
id: Optional[str] = None,
document_id: Optional[str] = None,
offset_start_in_doc: Optional[int] = None,
no_answer: Optional[bool] = None,
model_id: Optional[int] = None,
created_at: Optional[str] = None,
updated_at: Optional[str] = None) |
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API.
:param question: the question(or query) for finding answers.
:param answer: the answer string.
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param id: Unique ID used within the DocumentStore. If not supplied, a uuid will be generated automatically.
:param document_id: the document_store's ID for the returned answer document.
:param offset_start_in_doc: the answer start offset in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction (in-case of user feedback).
:param created_at: Timestamp of creation with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S").
:param created_at: Timestamp of update with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S")
|
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API. | def __init__(self, question: str,
answer: str,
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
id: Optional[str] = None,
document_id: Optional[str] = None,
offset_start_in_doc: Optional[int] = None,
no_answer: Optional[bool] = None,
model_id: Optional[int] = None,
created_at: Optional[str] = None,
updated_at: Optional[str] = None):
"""
Object used to represent label/feedback in a standardized way within Haystack.
This includes labels from dataset like SQuAD, annotations from labeling tools,
or, user-feedback from the Haystack REST API.
:param question: the question(or query) for finding answers.
:param answer: the answer string.
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param id: Unique ID used within the DocumentStore. If not supplied, a uuid will be generated automatically.
:param document_id: the document_store's ID for the returned answer document.
:param offset_start_in_doc: the answer start offset in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction (in-case of user feedback).
:param created_at: Timestamp of creation with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S").
:param created_at: Timestamp of update with format yyyy-MM-dd HH:mm:ss.
Generate in Python via time.strftime("%Y-%m-%d %H:%M:%S")
"""
# Create a unique ID (either new one, or one from user input)
if id:
self.id = str(id)
else:
self.id = str(uuid4())
self.created_at = created_at
self.updated_at = updated_at
self.question = question
self.answer = answer
self.is_correct_answer = is_correct_answer
self.is_correct_document = is_correct_document
self.origin = origin
self.document_id = document_id
self.offset_start_in_doc = offset_start_in_doc
self.no_answer = no_answer
self.model_id = model_id | [
"def",
"__init__",
"(",
"self",
",",
"question",
":",
"str",
",",
"answer",
":",
"str",
",",
"is_correct_answer",
":",
"bool",
",",
"is_correct_document",
":",
"bool",
",",
"origin",
":",
"str",
",",
"id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"document_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"offset_start_in_doc",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"no_answer",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"model_id",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"created_at",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"updated_at",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
":",
"# Create a unique ID (either new one, or one from user input)",
"if",
"id",
":",
"self",
".",
"id",
"=",
"str",
"(",
"id",
")",
"else",
":",
"self",
".",
"id",
"=",
"str",
"(",
"uuid4",
"(",
")",
")",
"self",
".",
"created_at",
"=",
"created_at",
"self",
".",
"updated_at",
"=",
"updated_at",
"self",
".",
"question",
"=",
"question",
"self",
".",
"answer",
"=",
"answer",
"self",
".",
"is_correct_answer",
"=",
"is_correct_answer",
"self",
".",
"is_correct_document",
"=",
"is_correct_document",
"self",
".",
"origin",
"=",
"origin",
"self",
".",
"document_id",
"=",
"document_id",
"self",
".",
"offset_start_in_doc",
"=",
"offset_start_in_doc",
"self",
".",
"no_answer",
"=",
"no_answer",
"self",
".",
"model_id",
"=",
"model_id"
] | [
80,
4
] | [
131,
32
] | python | en | ['en', 'error', 'th'] | False |
MultiLabel.__init__ | (self, question: str,
multiple_answers: List[str],
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
multiple_document_ids: List[Any],
multiple_offset_start_in_docs: List[Any],
no_answer: Optional[bool] = None,
model_id: Optional[int] = None) |
Object used to aggregate multiple possible answers for the same question
:param question: the question(or query) for finding answers.
:param multiple_answers: list of possible answer strings
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param multiple_document_ids: the document_store's IDs for the returned answer documents.
:param multiple_offset_start_in_docs: the answer start offsets in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction (in-case of user feedback).
|
Object used to aggregate multiple possible answers for the same question | def __init__(self, question: str,
multiple_answers: List[str],
is_correct_answer: bool,
is_correct_document: bool,
origin: str,
multiple_document_ids: List[Any],
multiple_offset_start_in_docs: List[Any],
no_answer: Optional[bool] = None,
model_id: Optional[int] = None):
"""
Object used to aggregate multiple possible answers for the same question
:param question: the question(or query) for finding answers.
:param multiple_answers: list of possible answer strings
:param is_correct_answer: whether the sample is positive or negative.
:param is_correct_document: in case of negative sample(is_correct_answer is False), there could be two cases;
incorrect answer but correct document & incorrect document. This flag denotes if
the returned document was correct.
:param origin: the source for the labels. It can be used to later for filtering.
:param multiple_document_ids: the document_store's IDs for the returned answer documents.
:param multiple_offset_start_in_docs: the answer start offsets in the document.
:param no_answer: whether the question in unanswerable.
:param model_id: model_id used for prediction (in-case of user feedback).
"""
self.question = question
self.multiple_answers = multiple_answers
self.is_correct_answer = is_correct_answer
self.is_correct_document = is_correct_document
self.origin = origin
self.multiple_document_ids = multiple_document_ids
self.multiple_offset_start_in_docs = multiple_offset_start_in_docs
self.no_answer = no_answer
self.model_id = model_id | [
"def",
"__init__",
"(",
"self",
",",
"question",
":",
"str",
",",
"multiple_answers",
":",
"List",
"[",
"str",
"]",
",",
"is_correct_answer",
":",
"bool",
",",
"is_correct_document",
":",
"bool",
",",
"origin",
":",
"str",
",",
"multiple_document_ids",
":",
"List",
"[",
"Any",
"]",
",",
"multiple_offset_start_in_docs",
":",
"List",
"[",
"Any",
"]",
",",
"no_answer",
":",
"Optional",
"[",
"bool",
"]",
"=",
"None",
",",
"model_id",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"self",
".",
"question",
"=",
"question",
"self",
".",
"multiple_answers",
"=",
"multiple_answers",
"self",
".",
"is_correct_answer",
"=",
"is_correct_answer",
"self",
".",
"is_correct_document",
"=",
"is_correct_document",
"self",
".",
"origin",
"=",
"origin",
"self",
".",
"multiple_document_ids",
"=",
"multiple_document_ids",
"self",
".",
"multiple_offset_start_in_docs",
"=",
"multiple_offset_start_in_docs",
"self",
".",
"no_answer",
"=",
"no_answer",
"self",
".",
"model_id",
"=",
"model_id"
] | [
174,
4
] | [
206,
32
] | python | en | ['en', 'error', 'th'] | False |
BaseComponent.__init_subclass__ | (cls, **kwargs) | This automatically keeps track of all available subclasses.
Enables generic load() for all specific component implementations.
| This automatically keeps track of all available subclasses.
Enables generic load() for all specific component implementations.
| def __init_subclass__(cls, **kwargs):
""" This automatically keeps track of all available subclasses.
Enables generic load() for all specific component implementations.
"""
super().__init_subclass__(**kwargs)
cls.subclasses[cls.__name__] = cls | [
"def",
"__init_subclass__",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init_subclass__",
"(",
"*",
"*",
"kwargs",
")",
"cls",
".",
"subclasses",
"[",
"cls",
".",
"__name__",
"]",
"=",
"cls"
] | [
230,
4
] | [
235,
42
] | python | en | ['en', 'en', 'en'] | True |
BaseComponent.load_from_args | (cls, component_type: str, **kwargs) |
Load a component instance of the given type using the kwargs.
:param component_type: name of the component class to load.
:param kwargs: parameters to pass to the __init__() for the component.
|
Load a component instance of the given type using the kwargs.
:param component_type: name of the component class to load.
:param kwargs: parameters to pass to the __init__() for the component.
| def load_from_args(cls, component_type: str, **kwargs):
"""
Load a component instance of the given type using the kwargs.
:param component_type: name of the component class to load.
:param kwargs: parameters to pass to the __init__() for the component.
"""
if component_type not in cls.subclasses.keys():
raise Exception(f"Haystack component with the name '{component_type}' does not exist.")
instance = cls.subclasses[component_type](**kwargs)
return instance | [
"def",
"load_from_args",
"(",
"cls",
",",
"component_type",
":",
"str",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"component_type",
"not",
"in",
"cls",
".",
"subclasses",
".",
"keys",
"(",
")",
":",
"raise",
"Exception",
"(",
"f\"Haystack component with the name '{component_type}' does not exist.\"",
")",
"instance",
"=",
"cls",
".",
"subclasses",
"[",
"component_type",
"]",
"(",
"*",
"*",
"kwargs",
")",
"return",
"instance"
] | [
238,
4
] | [
248,
23
] | python | en | ['en', 'error', 'th'] | False |
Twitter.start_streaming | (self, callback) | Starts streaming tweets and returning data to the callback. | Starts streaming tweets and returning data to the callback. | def start_streaming(self, callback):
"""Starts streaming tweets and returning data to the callback."""
self.twitter_listener = TwitterListener(
callback=callback, logs_to_cloud=self.logs_to_cloud)
twitter_stream = Stream(self.twitter_auth, self.twitter_listener)
self.logs.debug('Starting stream.')
twitter_stream.filter(follow=[TRUMP_USER_ID])
# If we got here because of an API error, raise it.
if self.twitter_listener and self.twitter_listener.get_error_status():
raise Exception('Twitter API error: %s' %
self.twitter_listener.get_error_status()) | [
"def",
"start_streaming",
"(",
"self",
",",
"callback",
")",
":",
"self",
".",
"twitter_listener",
"=",
"TwitterListener",
"(",
"callback",
"=",
"callback",
",",
"logs_to_cloud",
"=",
"self",
".",
"logs_to_cloud",
")",
"twitter_stream",
"=",
"Stream",
"(",
"self",
".",
"twitter_auth",
",",
"self",
".",
"twitter_listener",
")",
"self",
".",
"logs",
".",
"debug",
"(",
"'Starting stream.'",
")",
"twitter_stream",
".",
"filter",
"(",
"follow",
"=",
"[",
"TRUMP_USER_ID",
"]",
")",
"# If we got here because of an API error, raise it.",
"if",
"self",
".",
"twitter_listener",
"and",
"self",
".",
"twitter_listener",
".",
"get_error_status",
"(",
")",
":",
"raise",
"Exception",
"(",
"'Twitter API error: %s'",
"%",
"self",
".",
"twitter_listener",
".",
"get_error_status",
"(",
")",
")"
] | [
77,
4
] | [
90,
69
] | python | en | ['en', 'en', 'en'] | True |
Twitter.stop_streaming | (self) | Stops the current stream. | Stops the current stream. | def stop_streaming(self):
"""Stops the current stream."""
if not self.twitter_listener:
self.logs.warn('No stream to stop.')
return
self.logs.debug('Stopping stream.')
self.twitter_listener.stop_queue()
self.twitter_listener = None | [
"def",
"stop_streaming",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"twitter_listener",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No stream to stop.'",
")",
"return",
"self",
".",
"logs",
".",
"debug",
"(",
"'Stopping stream.'",
")",
"self",
".",
"twitter_listener",
".",
"stop_queue",
"(",
")",
"self",
".",
"twitter_listener",
"=",
"None"
] | [
92,
4
] | [
101,
36
] | python | en | ['en', 'en', 'en'] | True |
Twitter.tweet | (self, companies, tweet) | Posts a tweet listing the companies, their ticker symbols, and a
quote of the original tweet.
| Posts a tweet listing the companies, their ticker symbols, and a
quote of the original tweet.
| def tweet(self, companies, tweet):
"""Posts a tweet listing the companies, their ticker symbols, and a
quote of the original tweet.
"""
link = self.get_tweet_link(tweet)
text = self.make_tweet_text(companies, link)
self.logs.info('Tweeting: %s' % text)
self.twitter_api.update_status(text) | [
"def",
"tweet",
"(",
"self",
",",
"companies",
",",
"tweet",
")",
":",
"link",
"=",
"self",
".",
"get_tweet_link",
"(",
"tweet",
")",
"text",
"=",
"self",
".",
"make_tweet_text",
"(",
"companies",
",",
"link",
")",
"self",
".",
"logs",
".",
"info",
"(",
"'Tweeting: %s'",
"%",
"text",
")",
"self",
".",
"twitter_api",
".",
"update_status",
"(",
"text",
")"
] | [
103,
4
] | [
112,
44
] | python | en | ['en', 'en', 'en'] | True |
Twitter.make_tweet_text | (self, companies, link) | Generates the text for a tweet. | Generates the text for a tweet. | def make_tweet_text(self, companies, link):
"""Generates the text for a tweet."""
# Find all distinct company names.
names = []
for company in companies:
name = company['name']
if name not in names:
names.append(name)
# Collect the ticker symbols and sentiment scores for each name.
tickers = {}
sentiments = {}
for name in names:
tickers[name] = []
for company in companies:
if company['name'] == name:
ticker = company['ticker']
tickers[name].append(ticker)
sentiment = company['sentiment']
# Assuming the same sentiment for each ticker.
sentiments[name] = sentiment
# Create lines for each name with sentiment emoji and ticker symbols.
lines = []
for name in names:
sentiment_str = self.get_sentiment_emoji(sentiments[name])
tickers_str = ' '.join(['$%s' % t for t in tickers[name]])
line = '%s %s %s' % (name, sentiment_str, tickers_str)
lines.append(line)
# Combine the lines and ellipsize if necessary.
lines_str = '\n'.join(lines)
size = len(lines_str) + 1 + len(link)
if size > MAX_TWEET_SIZE:
self.logs.warn('Ellipsizing lines: %s' % lines_str)
lines_size = MAX_TWEET_SIZE - len(link) - 2
lines_str = '%s\u2026' % lines_str[:lines_size]
# Combine the lines with the link.
text = '%s\n%s' % (lines_str, link)
return text | [
"def",
"make_tweet_text",
"(",
"self",
",",
"companies",
",",
"link",
")",
":",
"# Find all distinct company names.",
"names",
"=",
"[",
"]",
"for",
"company",
"in",
"companies",
":",
"name",
"=",
"company",
"[",
"'name'",
"]",
"if",
"name",
"not",
"in",
"names",
":",
"names",
".",
"append",
"(",
"name",
")",
"# Collect the ticker symbols and sentiment scores for each name.",
"tickers",
"=",
"{",
"}",
"sentiments",
"=",
"{",
"}",
"for",
"name",
"in",
"names",
":",
"tickers",
"[",
"name",
"]",
"=",
"[",
"]",
"for",
"company",
"in",
"companies",
":",
"if",
"company",
"[",
"'name'",
"]",
"==",
"name",
":",
"ticker",
"=",
"company",
"[",
"'ticker'",
"]",
"tickers",
"[",
"name",
"]",
".",
"append",
"(",
"ticker",
")",
"sentiment",
"=",
"company",
"[",
"'sentiment'",
"]",
"# Assuming the same sentiment for each ticker.",
"sentiments",
"[",
"name",
"]",
"=",
"sentiment",
"# Create lines for each name with sentiment emoji and ticker symbols.",
"lines",
"=",
"[",
"]",
"for",
"name",
"in",
"names",
":",
"sentiment_str",
"=",
"self",
".",
"get_sentiment_emoji",
"(",
"sentiments",
"[",
"name",
"]",
")",
"tickers_str",
"=",
"' '",
".",
"join",
"(",
"[",
"'$%s'",
"%",
"t",
"for",
"t",
"in",
"tickers",
"[",
"name",
"]",
"]",
")",
"line",
"=",
"'%s %s %s'",
"%",
"(",
"name",
",",
"sentiment_str",
",",
"tickers_str",
")",
"lines",
".",
"append",
"(",
"line",
")",
"# Combine the lines and ellipsize if necessary.",
"lines_str",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"size",
"=",
"len",
"(",
"lines_str",
")",
"+",
"1",
"+",
"len",
"(",
"link",
")",
"if",
"size",
">",
"MAX_TWEET_SIZE",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Ellipsizing lines: %s'",
"%",
"lines_str",
")",
"lines_size",
"=",
"MAX_TWEET_SIZE",
"-",
"len",
"(",
"link",
")",
"-",
"2",
"lines_str",
"=",
"'%s\\u2026'",
"%",
"lines_str",
"[",
":",
"lines_size",
"]",
"# Combine the lines with the link.",
"text",
"=",
"'%s\\n%s'",
"%",
"(",
"lines_str",
",",
"link",
")",
"return",
"text"
] | [
114,
4
] | [
156,
19
] | python | en | ['en', 'en', 'en'] | True |
Twitter.get_sentiment_emoji | (self, sentiment) | Returns the emoji matching the sentiment. | Returns the emoji matching the sentiment. | def get_sentiment_emoji(self, sentiment):
"""Returns the emoji matching the sentiment."""
if not sentiment:
return EMOJI_SHRUG
if sentiment > 0:
return EMOJI_THUMBS_UP
if sentiment < 0:
return EMOJI_THUMBS_DOWN
self.logs.warn('Unknown sentiment: %s' % sentiment)
return EMOJI_SHRUG | [
"def",
"get_sentiment_emoji",
"(",
"self",
",",
"sentiment",
")",
":",
"if",
"not",
"sentiment",
":",
"return",
"EMOJI_SHRUG",
"if",
"sentiment",
">",
"0",
":",
"return",
"EMOJI_THUMBS_UP",
"if",
"sentiment",
"<",
"0",
":",
"return",
"EMOJI_THUMBS_DOWN",
"self",
".",
"logs",
".",
"warn",
"(",
"'Unknown sentiment: %s'",
"%",
"sentiment",
")",
"return",
"EMOJI_SHRUG"
] | [
158,
4
] | [
171,
26
] | python | en | ['en', 'sn', 'en'] | True |
Twitter.get_tweet | (self, tweet_id) | Looks up metadata for a single tweet. | Looks up metadata for a single tweet. | def get_tweet(self, tweet_id):
"""Looks up metadata for a single tweet."""
try:
# Use tweet_mode=extended so we get the full text.
status = self.twitter_api.get_status(tweet_id,
tweet_mode='extended')
if not status:
self.logs.error('Bad status response: %s' % status)
return None
except TweepError as e:
self.logs.error('Failed to get status for ID: %s (%s)' % (
tweet_id, e))
return None
# Use the raw JSON, just like the streaming API.
return status._json | [
"def",
"get_tweet",
"(",
"self",
",",
"tweet_id",
")",
":",
"try",
":",
"# Use tweet_mode=extended so we get the full text.",
"status",
"=",
"self",
".",
"twitter_api",
".",
"get_status",
"(",
"tweet_id",
",",
"tweet_mode",
"=",
"'extended'",
")",
"if",
"not",
"status",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Bad status response: %s'",
"%",
"status",
")",
"return",
"None",
"except",
"TweepError",
"as",
"e",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Failed to get status for ID: %s (%s)'",
"%",
"(",
"tweet_id",
",",
"e",
")",
")",
"return",
"None",
"# Use the raw JSON, just like the streaming API.",
"return",
"status",
".",
"_json"
] | [
173,
4
] | [
189,
27
] | python | en | ['en', 'en', 'en'] | True |
Twitter.get_all_tweets | (self) | Looks up metadata for the most recent Trump tweets. | Looks up metadata for the most recent Trump tweets. | def get_all_tweets(self):
"""Looks up metadata for the most recent Trump tweets."""
tweets = []
# Only the 3,200 most recent tweets are available through the API. Use
# the @Trump2Cash account to filter down to the relevant ones.
for status in Cursor(self.twitter_api.user_timeline,
user_id=TRUMP2CASH_USER_ID,
exclude_replies=True).items():
# Extract the quoted @realDonaldTrump tweet, if available.
try:
quoted_tweet_id = status.quoted_status_id
except AttributeError:
self.logs.warn('Skipping tweet: %s' % status)
continue
# Get the tweet details and add it to the list.
quoted_tweet = self.get_tweet(quoted_tweet_id)
if quoted_tweet:
tweets.append(quoted_tweet)
self.logs.debug('Got tweets: %s' % tweets)
return tweets | [
"def",
"get_all_tweets",
"(",
"self",
")",
":",
"tweets",
"=",
"[",
"]",
"# Only the 3,200 most recent tweets are available through the API. Use",
"# the @Trump2Cash account to filter down to the relevant ones.",
"for",
"status",
"in",
"Cursor",
"(",
"self",
".",
"twitter_api",
".",
"user_timeline",
",",
"user_id",
"=",
"TRUMP2CASH_USER_ID",
",",
"exclude_replies",
"=",
"True",
")",
".",
"items",
"(",
")",
":",
"# Extract the quoted @realDonaldTrump tweet, if available.",
"try",
":",
"quoted_tweet_id",
"=",
"status",
".",
"quoted_status_id",
"except",
"AttributeError",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'Skipping tweet: %s'",
"%",
"status",
")",
"continue",
"# Get the tweet details and add it to the list.",
"quoted_tweet",
"=",
"self",
".",
"get_tweet",
"(",
"quoted_tweet_id",
")",
"if",
"quoted_tweet",
":",
"tweets",
".",
"append",
"(",
"quoted_tweet",
")",
"self",
".",
"logs",
".",
"debug",
"(",
"'Got tweets: %s'",
"%",
"tweets",
")",
"return",
"tweets"
] | [
191,
4
] | [
216,
21
] | python | en | ['en', 'en', 'en'] | True |
Twitter.get_tweet_text | (self, tweet) | Returns the full text of a tweet. | Returns the full text of a tweet. | def get_tweet_text(self, tweet):
"""Returns the full text of a tweet."""
# The format for getting at the full text is different depending on
# whether the tweet came through the REST API or the Streaming API:
# https://dev.twitter.com/overview/api/upcoming-changes-to-tweets
try:
if 'extended_tweet' in tweet:
self.logs.debug('Decoding extended tweet from Streaming API.')
return tweet['extended_tweet']['full_text']
elif 'full_text' in tweet:
self.logs.debug('Decoding extended tweet from REST API.')
return tweet['full_text']
else:
self.logs.debug('Decoding short tweet.')
return tweet['text']
except KeyError:
self.logs.error('Malformed tweet: %s' % tweet)
return None | [
"def",
"get_tweet_text",
"(",
"self",
",",
"tweet",
")",
":",
"# The format for getting at the full text is different depending on",
"# whether the tweet came through the REST API or the Streaming API:",
"# https://dev.twitter.com/overview/api/upcoming-changes-to-tweets",
"try",
":",
"if",
"'extended_tweet'",
"in",
"tweet",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Decoding extended tweet from Streaming API.'",
")",
"return",
"tweet",
"[",
"'extended_tweet'",
"]",
"[",
"'full_text'",
"]",
"elif",
"'full_text'",
"in",
"tweet",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Decoding extended tweet from REST API.'",
")",
"return",
"tweet",
"[",
"'full_text'",
"]",
"else",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Decoding short tweet.'",
")",
"return",
"tweet",
"[",
"'text'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Malformed tweet: %s'",
"%",
"tweet",
")",
"return",
"None"
] | [
218,
4
] | [
236,
23
] | python | en | ['en', 'en', 'en'] | True |
Twitter.get_tweet_link | (self, tweet) | Creates the link URL to a tweet. | Creates the link URL to a tweet. | def get_tweet_link(self, tweet):
"""Creates the link URL to a tweet."""
if not tweet:
self.logs.error('No tweet to get link.')
return None
try:
screen_name = tweet['user']['screen_name']
id_str = tweet['id_str']
except KeyError:
self.logs.error('Malformed tweet for link: %s' % tweet)
return None
link = TWEET_URL % (screen_name, id_str)
return link | [
"def",
"get_tweet_link",
"(",
"self",
",",
"tweet",
")",
":",
"if",
"not",
"tweet",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'No tweet to get link.'",
")",
"return",
"None",
"try",
":",
"screen_name",
"=",
"tweet",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"id_str",
"=",
"tweet",
"[",
"'id_str'",
"]",
"except",
"KeyError",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Malformed tweet for link: %s'",
"%",
"tweet",
")",
"return",
"None",
"link",
"=",
"TWEET_URL",
"%",
"(",
"screen_name",
",",
"id_str",
")",
"return",
"link"
] | [
238,
4
] | [
253,
19
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.start_queue | (self) | Creates a queue and starts the worker threads. | Creates a queue and starts the worker threads. | def start_queue(self):
"""Creates a queue and starts the worker threads."""
self.queue = Queue()
self.stop_event = Event()
self.logs.debug('Starting %s worker threads.' % NUM_THREADS)
self.workers = []
for worker_id in range(NUM_THREADS):
worker = Thread(target=self.process_queue, args=[worker_id])
worker.daemon = True
worker.start()
self.workers.append(worker) | [
"def",
"start_queue",
"(",
"self",
")",
":",
"self",
".",
"queue",
"=",
"Queue",
"(",
")",
"self",
".",
"stop_event",
"=",
"Event",
"(",
")",
"self",
".",
"logs",
".",
"debug",
"(",
"'Starting %s worker threads.'",
"%",
"NUM_THREADS",
")",
"self",
".",
"workers",
"=",
"[",
"]",
"for",
"worker_id",
"in",
"range",
"(",
"NUM_THREADS",
")",
":",
"worker",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"process_queue",
",",
"args",
"=",
"[",
"worker_id",
"]",
")",
"worker",
".",
"daemon",
"=",
"True",
"worker",
".",
"start",
"(",
")",
"self",
".",
"workers",
".",
"append",
"(",
"worker",
")"
] | [
266,
4
] | [
277,
39
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.stop_queue | (self) | Shuts down the queue and worker threads. | Shuts down the queue and worker threads. | def stop_queue(self):
"""Shuts down the queue and worker threads."""
# First stop the queue.
if self.queue:
self.logs.debug('Stopping queue.')
self.queue.join()
else:
self.logs.warn('No queue to stop.')
# Then stop the worker threads.
if self.workers:
self.logs.debug('Stopping %d worker threads.' % len(self.workers))
self.stop_event.set()
for worker in self.workers:
# Block until the thread terminates.
worker.join()
else:
self.logs.warn('No worker threads to stop.') | [
"def",
"stop_queue",
"(",
"self",
")",
":",
"# First stop the queue.",
"if",
"self",
".",
"queue",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Stopping queue.'",
")",
"self",
".",
"queue",
".",
"join",
"(",
")",
"else",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No queue to stop.'",
")",
"# Then stop the worker threads.",
"if",
"self",
".",
"workers",
":",
"self",
".",
"logs",
".",
"debug",
"(",
"'Stopping %d worker threads.'",
"%",
"len",
"(",
"self",
".",
"workers",
")",
")",
"self",
".",
"stop_event",
".",
"set",
"(",
")",
"for",
"worker",
"in",
"self",
".",
"workers",
":",
"# Block until the thread terminates.",
"worker",
".",
"join",
"(",
")",
"else",
":",
"self",
".",
"logs",
".",
"warn",
"(",
"'No worker threads to stop.'",
")"
] | [
279,
4
] | [
297,
56
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.process_queue | (self, worker_id) | Continuously processes tasks on the queue. | Continuously processes tasks on the queue. | def process_queue(self, worker_id):
"""Continuously processes tasks on the queue."""
# Create a new logs instance (with its own httplib2 instance) so that
# there is a separate one for each thread.
logs = Logs('twitter-listener-worker-%s' % worker_id,
to_cloud=self.logs_to_cloud)
logs.debug('Started worker thread: %s' % worker_id)
while not self.stop_event.is_set():
try:
data = self.queue.get(block=True, timeout=QUEUE_TIMEOUT_S)
start_time = time()
self.handle_data(logs, data)
self.queue.task_done()
end_time = time()
qsize = self.queue.qsize()
logs.debug('Worker %s took %.f ms with %d tasks remaining.' %
(worker_id, end_time - start_time, qsize))
except Empty:
logs.debug('Worker %s timed out on an empty queue.' %
worker_id)
continue
except Exception:
# The main loop doesn't catch and report exceptions from
# background threads, so do that here.
logs.catch()
logs.debug('Stopped worker thread: %s' % worker_id) | [
"def",
"process_queue",
"(",
"self",
",",
"worker_id",
")",
":",
"# Create a new logs instance (with its own httplib2 instance) so that",
"# there is a separate one for each thread.",
"logs",
"=",
"Logs",
"(",
"'twitter-listener-worker-%s'",
"%",
"worker_id",
",",
"to_cloud",
"=",
"self",
".",
"logs_to_cloud",
")",
"logs",
".",
"debug",
"(",
"'Started worker thread: %s'",
"%",
"worker_id",
")",
"while",
"not",
"self",
".",
"stop_event",
".",
"is_set",
"(",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"block",
"=",
"True",
",",
"timeout",
"=",
"QUEUE_TIMEOUT_S",
")",
"start_time",
"=",
"time",
"(",
")",
"self",
".",
"handle_data",
"(",
"logs",
",",
"data",
")",
"self",
".",
"queue",
".",
"task_done",
"(",
")",
"end_time",
"=",
"time",
"(",
")",
"qsize",
"=",
"self",
".",
"queue",
".",
"qsize",
"(",
")",
"logs",
".",
"debug",
"(",
"'Worker %s took %.f ms with %d tasks remaining.'",
"%",
"(",
"worker_id",
",",
"end_time",
"-",
"start_time",
",",
"qsize",
")",
")",
"except",
"Empty",
":",
"logs",
".",
"debug",
"(",
"'Worker %s timed out on an empty queue.'",
"%",
"worker_id",
")",
"continue",
"except",
"Exception",
":",
"# The main loop doesn't catch and report exceptions from",
"# background threads, so do that here.",
"logs",
".",
"catch",
"(",
")",
"logs",
".",
"debug",
"(",
"'Stopped worker thread: %s'",
"%",
"worker_id",
")"
] | [
299,
4
] | [
326,
59
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.on_error | (self, status) | Handles any API errors. | Handles any API errors. | def on_error(self, status):
"""Handles any API errors."""
self.logs.error('Twitter error: %s' % status)
self.error_status = status
self.stop_queue()
return False | [
"def",
"on_error",
"(",
"self",
",",
"status",
")",
":",
"self",
".",
"logs",
".",
"error",
"(",
"'Twitter error: %s'",
"%",
"status",
")",
"self",
".",
"error_status",
"=",
"status",
"self",
".",
"stop_queue",
"(",
")",
"return",
"False"
] | [
328,
4
] | [
334,
20
] | python | en | ['en', 'mg', 'en'] | True |
TwitterListener.get_error_status | (self) | Returns the API error status, if there was one. | Returns the API error status, if there was one. | def get_error_status(self):
"""Returns the API error status, if there was one."""
return self.error_status | [
"def",
"get_error_status",
"(",
"self",
")",
":",
"return",
"self",
".",
"error_status"
] | [
336,
4
] | [
338,
32
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.on_data | (self, data) | Puts a task to process the new data on the queue. | Puts a task to process the new data on the queue. | def on_data(self, data):
"""Puts a task to process the new data on the queue."""
# Stop streaming if requested.
if self.stop_event.is_set():
return False
# Put the task on the queue and keep streaming.
self.queue.put(data)
return True | [
"def",
"on_data",
"(",
"self",
",",
"data",
")",
":",
"# Stop streaming if requested.",
"if",
"self",
".",
"stop_event",
".",
"is_set",
"(",
")",
":",
"return",
"False",
"# Put the task on the queue and keep streaming.",
"self",
".",
"queue",
".",
"put",
"(",
"data",
")",
"return",
"True"
] | [
340,
4
] | [
349,
19
] | python | en | ['en', 'en', 'en'] | True |
TwitterListener.handle_data | (self, logs, data) | Sanity-checks and extracts the data before sending it to the
callback.
| Sanity-checks and extracts the data before sending it to the
callback.
| def handle_data(self, logs, data):
"""Sanity-checks and extracts the data before sending it to the
callback.
"""
try:
tweet = loads(data)
except ValueError:
logs.error('Failed to decode JSON data: %s' % data)
return
try:
user_id_str = tweet['user']['id_str']
screen_name = tweet['user']['screen_name']
except KeyError:
logs.error('Malformed tweet: %s' % tweet)
return
# We're only interested in tweets from Mr. Trump himself, so skip the
# rest.
if user_id_str != TRUMP_USER_ID:
logs.debug('Skipping tweet from user: %s (%s)' %
(screen_name, user_id_str))
return
logs.info('Examining tweet: %s' % tweet)
# Call the callback.
self.callback(tweet) | [
"def",
"handle_data",
"(",
"self",
",",
"logs",
",",
"data",
")",
":",
"try",
":",
"tweet",
"=",
"loads",
"(",
"data",
")",
"except",
"ValueError",
":",
"logs",
".",
"error",
"(",
"'Failed to decode JSON data: %s'",
"%",
"data",
")",
"return",
"try",
":",
"user_id_str",
"=",
"tweet",
"[",
"'user'",
"]",
"[",
"'id_str'",
"]",
"screen_name",
"=",
"tweet",
"[",
"'user'",
"]",
"[",
"'screen_name'",
"]",
"except",
"KeyError",
":",
"logs",
".",
"error",
"(",
"'Malformed tweet: %s'",
"%",
"tweet",
")",
"return",
"# We're only interested in tweets from Mr. Trump himself, so skip the",
"# rest.",
"if",
"user_id_str",
"!=",
"TRUMP_USER_ID",
":",
"logs",
".",
"debug",
"(",
"'Skipping tweet from user: %s (%s)'",
"%",
"(",
"screen_name",
",",
"user_id_str",
")",
")",
"return",
"logs",
".",
"info",
"(",
"'Examining tweet: %s'",
"%",
"tweet",
")",
"# Call the callback.",
"self",
".",
"callback",
"(",
"tweet",
")"
] | [
351,
4
] | [
379,
28
] | python | en | ['en', 'en', 'en'] | True |
parse_service_messages | (text) |
Parses service messages from the given build log.
:type text: str
:rtype: list[ServiceMessage]
|
Parses service messages from the given build log.
:type text: str
:rtype: list[ServiceMessage]
| def parse_service_messages(text):
"""
Parses service messages from the given build log.
:type text: str
:rtype: list[ServiceMessage]
"""
messages = list()
for line in text.splitlines():
r = line.strip()
index = r.find("##teamcity[")
if index != -1:
m = _parse_one_service_message(r[index:])
messages.append(m)
return messages | [
"def",
"parse_service_messages",
"(",
"text",
")",
":",
"messages",
"=",
"list",
"(",
")",
"for",
"line",
"in",
"text",
".",
"splitlines",
"(",
")",
":",
"r",
"=",
"line",
".",
"strip",
"(",
")",
"index",
"=",
"r",
".",
"find",
"(",
"\"##teamcity[\"",
")",
"if",
"index",
"!=",
"-",
"1",
":",
"m",
"=",
"_parse_one_service_message",
"(",
"r",
"[",
"index",
":",
"]",
")",
"messages",
".",
"append",
"(",
"m",
")",
"return",
"messages"
] | [
47,
0
] | [
60,
19
] | python | en | ['en', 'error', 'th'] | False |
service_messages_to_string | (messages) |
:type messages: list[ServiceMessage]
|
:type messages: list[ServiceMessage]
| def service_messages_to_string(messages):
"""
:type messages: list[ServiceMessage]
"""
return u"\n".join([x.as_unicode() for x in messages]) | [
"def",
"service_messages_to_string",
"(",
"messages",
")",
":",
"return",
"u\"\\n\"",
".",
"join",
"(",
"[",
"x",
".",
"as_unicode",
"(",
")",
"for",
"x",
"in",
"messages",
"]",
")"
] | [
63,
0
] | [
67,
57
] | python | en | ['en', 'error', 'th'] | False |
_parse_one_service_message | (s) |
Parses one service message.
:type s: str
:rtype: service_message
|
Parses one service message.
:type s: str
:rtype: service_message
| def _parse_one_service_message(s):
"""
Parses one service message.
:type s: str
:rtype: service_message
"""
b1 = s.index('[')
b2 = s.rindex(']', b1)
inner = s[b1 + 1:b2].strip()
space1 = inner.find(' ')
if space1 >= 0:
name_len = space1
else:
name_len = inner.__len__()
name = inner[0:name_len]
params = dict()
beg = name_len + 1
while beg < inner.__len__():
if inner[beg] == '_':
beg += 1
continue
eq = inner.find('=', beg)
if eq == -1:
break
q1 = inner.find("'", eq)
if q1 == -1:
break
q2 = inner.find("'", q1 + 1)
while q2 > 0 and inner[q2 - 1] == '|':
q2 = inner.find("'", q2 + 1)
if q2 == -1:
break
param_name = inner[beg:eq].strip()
param_value = inner[q1 + 1:q2]
params[param_name] = param_value
beg = q2 + 1
return ServiceMessage(name, params) | [
"def",
"_parse_one_service_message",
"(",
"s",
")",
":",
"b1",
"=",
"s",
".",
"index",
"(",
"'['",
")",
"b2",
"=",
"s",
".",
"rindex",
"(",
"']'",
",",
"b1",
")",
"inner",
"=",
"s",
"[",
"b1",
"+",
"1",
":",
"b2",
"]",
".",
"strip",
"(",
")",
"space1",
"=",
"inner",
".",
"find",
"(",
"' '",
")",
"if",
"space1",
">=",
"0",
":",
"name_len",
"=",
"space1",
"else",
":",
"name_len",
"=",
"inner",
".",
"__len__",
"(",
")",
"name",
"=",
"inner",
"[",
"0",
":",
"name_len",
"]",
"params",
"=",
"dict",
"(",
")",
"beg",
"=",
"name_len",
"+",
"1",
"while",
"beg",
"<",
"inner",
".",
"__len__",
"(",
")",
":",
"if",
"inner",
"[",
"beg",
"]",
"==",
"'_'",
":",
"beg",
"+=",
"1",
"continue",
"eq",
"=",
"inner",
".",
"find",
"(",
"'='",
",",
"beg",
")",
"if",
"eq",
"==",
"-",
"1",
":",
"break",
"q1",
"=",
"inner",
".",
"find",
"(",
"\"'\"",
",",
"eq",
")",
"if",
"q1",
"==",
"-",
"1",
":",
"break",
"q2",
"=",
"inner",
".",
"find",
"(",
"\"'\"",
",",
"q1",
"+",
"1",
")",
"while",
"q2",
">",
"0",
"and",
"inner",
"[",
"q2",
"-",
"1",
"]",
"==",
"'|'",
":",
"q2",
"=",
"inner",
".",
"find",
"(",
"\"'\"",
",",
"q2",
"+",
"1",
")",
"if",
"q2",
"==",
"-",
"1",
":",
"break",
"param_name",
"=",
"inner",
"[",
"beg",
":",
"eq",
"]",
".",
"strip",
"(",
")",
"param_value",
"=",
"inner",
"[",
"q1",
"+",
"1",
":",
"q2",
"]",
"params",
"[",
"param_name",
"]",
"=",
"param_value",
"beg",
"=",
"q2",
"+",
"1",
"return",
"ServiceMessage",
"(",
"name",
",",
"params",
")"
] | [
70,
0
] | [
110,
39
] | python | en | ['en', 'error', 'th'] | False |
match | (messages, message) |
:type messages: list[ServiceMessage]
:type message: ServiceMessage
|
:type messages: list[ServiceMessage]
:type message: ServiceMessage
| def match(messages, message):
"""
:type messages: list[ServiceMessage]
:type message: ServiceMessage
"""
candidates = [x for x in messages if x >= message]
if len(candidates) == 0:
raise AssertionError("No messages match " + message.as_unicode() + " across " + service_messages_to_string(messages))
if len(candidates) > 1:
raise AssertionError("More than one message match " + message.as_unicode() + " across " + service_messages_to_string(messages) +
": " + service_messages_to_string(candidates))
return candidates[0] | [
"def",
"match",
"(",
"messages",
",",
"message",
")",
":",
"candidates",
"=",
"[",
"x",
"for",
"x",
"in",
"messages",
"if",
"x",
">=",
"message",
"]",
"if",
"len",
"(",
"candidates",
")",
"==",
"0",
":",
"raise",
"AssertionError",
"(",
"\"No messages match \"",
"+",
"message",
".",
"as_unicode",
"(",
")",
"+",
"\" across \"",
"+",
"service_messages_to_string",
"(",
"messages",
")",
")",
"if",
"len",
"(",
"candidates",
")",
">",
"1",
":",
"raise",
"AssertionError",
"(",
"\"More than one message match \"",
"+",
"message",
".",
"as_unicode",
"(",
")",
"+",
"\" across \"",
"+",
"service_messages_to_string",
"(",
"messages",
")",
"+",
"\": \"",
"+",
"service_messages_to_string",
"(",
"candidates",
")",
")",
"return",
"candidates",
"[",
"0",
"]"
] | [
118,
0
] | [
129,
24
] | python | en | ['en', 'error', 'th'] | False |
assert_service_messages | (actual_messages_string, expected_messages, actual_messages_predicate=lambda x: True) |
:type expected_messages: list[ServiceMessage]
|
:type expected_messages: list[ServiceMessage]
| def assert_service_messages(actual_messages_string, expected_messages, actual_messages_predicate=lambda x: True):
"""
:type expected_messages: list[ServiceMessage]
"""
expected_messages = [x for x in expected_messages if x is not None]
actual_messages = [x for x in parse_service_messages(actual_messages_string) if actual_messages_predicate(x)]
try:
if len(actual_messages) != len(expected_messages):
raise AssertionError("Expected %d service messages, but got %d" % (len(expected_messages), len(actual_messages)))
for index, (actual, expected) in enumerate(zip(actual_messages, expected_messages)):
message = u"Expected\n" + expected.as_unicode() + u", but got\n" + actual.as_unicode() + u"\n at index " + str(index)
assert actual >= expected, message
except AssertionError:
print("Actual:\n" + service_messages_to_string(actual_messages) + "\n")
print("Expected:\n" + service_messages_to_string(expected_messages) + "\n")
raise sys.exc_info()[1]
return actual_messages | [
"def",
"assert_service_messages",
"(",
"actual_messages_string",
",",
"expected_messages",
",",
"actual_messages_predicate",
"=",
"lambda",
"x",
":",
"True",
")",
":",
"expected_messages",
"=",
"[",
"x",
"for",
"x",
"in",
"expected_messages",
"if",
"x",
"is",
"not",
"None",
"]",
"actual_messages",
"=",
"[",
"x",
"for",
"x",
"in",
"parse_service_messages",
"(",
"actual_messages_string",
")",
"if",
"actual_messages_predicate",
"(",
"x",
")",
"]",
"try",
":",
"if",
"len",
"(",
"actual_messages",
")",
"!=",
"len",
"(",
"expected_messages",
")",
":",
"raise",
"AssertionError",
"(",
"\"Expected %d service messages, but got %d\"",
"%",
"(",
"len",
"(",
"expected_messages",
")",
",",
"len",
"(",
"actual_messages",
")",
")",
")",
"for",
"index",
",",
"(",
"actual",
",",
"expected",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"actual_messages",
",",
"expected_messages",
")",
")",
":",
"message",
"=",
"u\"Expected\\n\"",
"+",
"expected",
".",
"as_unicode",
"(",
")",
"+",
"u\", but got\\n\"",
"+",
"actual",
".",
"as_unicode",
"(",
")",
"+",
"u\"\\n at index \"",
"+",
"str",
"(",
"index",
")",
"assert",
"actual",
">=",
"expected",
",",
"message",
"except",
"AssertionError",
":",
"print",
"(",
"\"Actual:\\n\"",
"+",
"service_messages_to_string",
"(",
"actual_messages",
")",
"+",
"\"\\n\"",
")",
"print",
"(",
"\"Expected:\\n\"",
"+",
"service_messages_to_string",
"(",
"expected_messages",
")",
"+",
"\"\\n\"",
")",
"raise",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
"return",
"actual_messages"
] | [
132,
0
] | [
151,
26
] | python | en | ['en', 'error', 'th'] | False |
ServiceMessage.__init__ | (self, name, params) |
:type name: string
:type params: dict[string, string]
|
:type name: string
:type params: dict[string, string]
| def __init__(self, name, params):
"""
:type name: string
:type params: dict[string, string]
"""
self.name = name
self.params = params | [
"def",
"__init__",
"(",
"self",
",",
"name",
",",
"params",
")",
":",
"self",
".",
"name",
"=",
"name",
"self",
".",
"params",
"=",
"params"
] | [
6,
4
] | [
12,
28
] | python | en | ['en', 'error', 'th'] | False |
ServiceMessage.__ge__ | (self, other) |
:type self: service_message
:type other: service_message
:rtype: bool
|
:type self: service_message
:type other: service_message
:rtype: bool
| def __ge__(self, other):
"""
:type self: service_message
:type other: service_message
:rtype: bool
"""
if self.name != other.name:
return False
for p in other.params:
if p in self.params:
v1 = self.params[p]
v2 = other.params[p]
if to_unicode(v1).lower() != to_unicode(v2).lower():
return False
else:
return False
return True | [
"def",
"__ge__",
"(",
"self",
",",
"other",
")",
":",
"if",
"self",
".",
"name",
"!=",
"other",
".",
"name",
":",
"return",
"False",
"for",
"p",
"in",
"other",
".",
"params",
":",
"if",
"p",
"in",
"self",
".",
"params",
":",
"v1",
"=",
"self",
".",
"params",
"[",
"p",
"]",
"v2",
"=",
"other",
".",
"params",
"[",
"p",
"]",
"if",
"to_unicode",
"(",
"v1",
")",
".",
"lower",
"(",
")",
"!=",
"to_unicode",
"(",
"v2",
")",
".",
"lower",
"(",
")",
":",
"return",
"False",
"else",
":",
"return",
"False",
"return",
"True"
] | [
14,
4
] | [
31,
19
] | python | en | ['en', 'error', 'th'] | False |
TupleFilesystemStoreBackend.rrmdir | (self, mroot, curpath) |
recursively removes empty dirs between curpath and mroot inclusive
|
recursively removes empty dirs between curpath and mroot inclusive
| def rrmdir(self, mroot, curpath):
"""
recursively removes empty dirs between curpath and mroot inclusive
"""
try:
while (
not os.listdir(curpath) and os.path.exists(curpath) and mroot != curpath
):
f2 = os.path.dirname(curpath)
os.rmdir(curpath)
curpath = f2
except (NotADirectoryError, FileNotFoundError):
pass | [
"def",
"rrmdir",
"(",
"self",
",",
"mroot",
",",
"curpath",
")",
":",
"try",
":",
"while",
"(",
"not",
"os",
".",
"listdir",
"(",
"curpath",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"curpath",
")",
"and",
"mroot",
"!=",
"curpath",
")",
":",
"f2",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"curpath",
")",
"os",
".",
"rmdir",
"(",
"curpath",
")",
"curpath",
"=",
"f2",
"except",
"(",
"NotADirectoryError",
",",
"FileNotFoundError",
")",
":",
"pass"
] | [
373,
4
] | [
385,
16
] | python | en | ['en', 'error', 'th'] | False |
impute_missing_data_1D | (data1D) |
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
|
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
| def impute_missing_data_1D(data1D):
"""
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
"""
nan_mask = ~np.isnan(data1D)
x = np.arange(len(data1D))
x_no_nan = x[nan_mask]
data_no_nan = data1D[nan_mask]
if len(x_no_nan) >= 2:
f = interp1d(x_no_nan, data_no_nan)
# Select points for interpolation.
interpolation_x_mask = (x_no_nan[0]<=x) & (x<=x_no_nan[-1])
interpolation_x = x[interpolation_x_mask]
data1D_interp = np.arange(len(data1D), dtype=np.float32)
# The ends of data1D may contain NaNs that must be included.
end_nan_inds = x[(x<=x_no_nan[0]) | (x_no_nan[-1]<=x)]
data1D_interp[end_nan_inds] = np.nan
data1D_interp[interpolation_x_mask] = f(interpolation_x)
return data1D_interp
else: # Cannot interpolate with a single non-nan point.
return data1D | [
"def",
"impute_missing_data_1D",
"(",
"data1D",
")",
":",
"nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"data1D",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data1D",
")",
")",
"x_no_nan",
"=",
"x",
"[",
"nan_mask",
"]",
"data_no_nan",
"=",
"data1D",
"[",
"nan_mask",
"]",
"if",
"len",
"(",
"x_no_nan",
")",
">=",
"2",
":",
"f",
"=",
"interp1d",
"(",
"x_no_nan",
",",
"data_no_nan",
")",
"# Select points for interpolation.",
"interpolation_x_mask",
"=",
"(",
"x_no_nan",
"[",
"0",
"]",
"<=",
"x",
")",
"&",
"(",
"x",
"<=",
"x_no_nan",
"[",
"-",
"1",
"]",
")",
"interpolation_x",
"=",
"x",
"[",
"interpolation_x_mask",
"]",
"data1D_interp",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data1D",
")",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"# The ends of data1D may contain NaNs that must be included.",
"end_nan_inds",
"=",
"x",
"[",
"(",
"x",
"<=",
"x_no_nan",
"[",
"0",
"]",
")",
"|",
"(",
"x_no_nan",
"[",
"-",
"1",
"]",
"<=",
"x",
")",
"]",
"data1D_interp",
"[",
"end_nan_inds",
"]",
"=",
"np",
".",
"nan",
"data1D_interp",
"[",
"interpolation_x_mask",
"]",
"=",
"f",
"(",
"interpolation_x",
")",
"return",
"data1D_interp",
"else",
":",
"# Cannot interpolate with a single non-nan point.",
"return",
"data1D"
] | [
43,
0
] | [
74,
21
] | python | en | ['en', 'error', 'th'] | False |
np_dt64_to_str | (np_datetime, fmt='%Y-%m-%d') | Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime. | Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime. | def np_dt64_to_str(np_datetime, fmt='%Y-%m-%d'):
"""Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime."""
return pd.to_datetime(str(np_datetime)).strftime(fmt) | [
"def",
"np_dt64_to_str",
"(",
"np_datetime",
",",
"fmt",
"=",
"'%Y-%m-%d'",
")",
":",
"return",
"pd",
".",
"to_datetime",
"(",
"str",
"(",
"np_datetime",
")",
")",
".",
"strftime",
"(",
"fmt",
")"
] | [
85,
0
] | [
87,
57
] | python | en | ['en', 'en', 'en'] | True |
xarray_plot_data_vars_over_time | (dataset, colors=['orange', 'blue']) |
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
|
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz (john.c.rattz | def xarray_plot_data_vars_over_time(dataset, colors=['orange', 'blue']):
"""
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
"""
data_var_names = sorted(list(dataset.data_vars))
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr_name in enumerate(data_var_names):
data_arr = dataset[data_arr_name]
nan_mask = nan_mask & data_arr.notnull().values
plt.plot(data_arr[nan_mask], marker='o', c=colors[i])
times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times)))
plt.xticks(np.arange(len(date_strs[nan_mask])), date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.legend(data_var_names, loc='upper right')
plt.show() | [
"def",
"xarray_plot_data_vars_over_time",
"(",
"dataset",
",",
"colors",
"=",
"[",
"'orange'",
",",
"'blue'",
"]",
")",
":",
"data_var_names",
"=",
"sorted",
"(",
"list",
"(",
"dataset",
".",
"data_vars",
")",
")",
"len_dataset",
"=",
"dataset",
".",
"time",
".",
"size",
"nan_mask",
"=",
"np",
".",
"full",
"(",
"len_dataset",
",",
"True",
")",
"for",
"i",
",",
"data_arr_name",
"in",
"enumerate",
"(",
"data_var_names",
")",
":",
"data_arr",
"=",
"dataset",
"[",
"data_arr_name",
"]",
"nan_mask",
"=",
"nan_mask",
"&",
"data_arr",
".",
"notnull",
"(",
")",
".",
"values",
"plt",
".",
"plot",
"(",
"data_arr",
"[",
"nan_mask",
"]",
",",
"marker",
"=",
"'o'",
",",
"c",
"=",
"colors",
"[",
"i",
"]",
")",
"times",
"=",
"dataset",
".",
"time",
".",
"values",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times",
")",
")",
")",
"plt",
".",
"xticks",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"date_strs",
"[",
"nan_mask",
"]",
")",
")",
",",
"date_strs",
"[",
"nan_mask",
"]",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"legend",
"(",
"data_var_names",
",",
"loc",
"=",
"'upper right'",
")",
"plt",
".",
"show",
"(",
")"
] | [
122,
0
] | [
149,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_scatterplot_data_vars | (dataset, figure_kwargs={'figsize':(12,6)}, colors=['blue', 'orange'], markersize=5) |
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot.
frac_dates: float
The fraction of dates to label on the x-axis.
figure_kwargs: dict
A dictionary of kwargs for matplotlib figure creation.
colors: list
A list of strings denoting abbreviated colors for each data variable's points.
For example, 'r' is red and 'b' is blue.
markersize: float
The size of markers in the scatterplot.
:Authors:
John Rattz ([email protected])
|
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis. | def xarray_scatterplot_data_vars(dataset, figure_kwargs={'figsize':(12,6)}, colors=['blue', 'orange'], markersize=5):
"""
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot.
frac_dates: float
The fraction of dates to label on the x-axis.
figure_kwargs: dict
A dictionary of kwargs for matplotlib figure creation.
colors: list
A list of strings denoting abbreviated colors for each data variable's points.
For example, 'r' is red and 'b' is blue.
markersize: float
The size of markers in the scatterplot.
:Authors:
John Rattz ([email protected])
"""
plt.figure(**figure_kwargs)
data_var_names = list(dataset.data_vars)
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr in enumerate(dataset.data_vars.values()):
if len(list(dataset.dims)) > 1:
dims_to_check_for_nulls = [dim for dim in list(dataset.dims) if dim != 'time']
nan_mask = nan_mask & data_arr.notnull().any(dim=dims_to_check_for_nulls).values
else:
nan_mask = data_arr.notnull().values
times = data_arr.to_dataframe().index.get_level_values('time').values
plt.scatter(stats.rankdata(times, method='dense')-1, data_arr.values.flatten(), c=colors[i], s=markersize)
unique_times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), unique_times)))
plt.xticks(np.arange(len(date_strs))[nan_mask], date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.xlabel('time')
plt.legend(data_var_names, loc='upper right')
plt.show() | [
"def",
"xarray_scatterplot_data_vars",
"(",
"dataset",
",",
"figure_kwargs",
"=",
"{",
"'figsize'",
":",
"(",
"12",
",",
"6",
")",
"}",
",",
"colors",
"=",
"[",
"'blue'",
",",
"'orange'",
"]",
",",
"markersize",
"=",
"5",
")",
":",
"plt",
".",
"figure",
"(",
"*",
"*",
"figure_kwargs",
")",
"data_var_names",
"=",
"list",
"(",
"dataset",
".",
"data_vars",
")",
"len_dataset",
"=",
"dataset",
".",
"time",
".",
"size",
"nan_mask",
"=",
"np",
".",
"full",
"(",
"len_dataset",
",",
"True",
")",
"for",
"i",
",",
"data_arr",
"in",
"enumerate",
"(",
"dataset",
".",
"data_vars",
".",
"values",
"(",
")",
")",
":",
"if",
"len",
"(",
"list",
"(",
"dataset",
".",
"dims",
")",
")",
">",
"1",
":",
"dims_to_check_for_nulls",
"=",
"[",
"dim",
"for",
"dim",
"in",
"list",
"(",
"dataset",
".",
"dims",
")",
"if",
"dim",
"!=",
"'time'",
"]",
"nan_mask",
"=",
"nan_mask",
"&",
"data_arr",
".",
"notnull",
"(",
")",
".",
"any",
"(",
"dim",
"=",
"dims_to_check_for_nulls",
")",
".",
"values",
"else",
":",
"nan_mask",
"=",
"data_arr",
".",
"notnull",
"(",
")",
".",
"values",
"times",
"=",
"data_arr",
".",
"to_dataframe",
"(",
")",
".",
"index",
".",
"get_level_values",
"(",
"'time'",
")",
".",
"values",
"plt",
".",
"scatter",
"(",
"stats",
".",
"rankdata",
"(",
"times",
",",
"method",
"=",
"'dense'",
")",
"-",
"1",
",",
"data_arr",
".",
"values",
".",
"flatten",
"(",
")",
",",
"c",
"=",
"colors",
"[",
"i",
"]",
",",
"s",
"=",
"markersize",
")",
"unique_times",
"=",
"dataset",
".",
"time",
".",
"values",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"unique_times",
")",
")",
")",
"plt",
".",
"xticks",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"date_strs",
")",
")",
"[",
"nan_mask",
"]",
",",
"date_strs",
"[",
"nan_mask",
"]",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"xlabel",
"(",
"'time'",
")",
"plt",
".",
"legend",
"(",
"data_var_names",
",",
"loc",
"=",
"'upper right'",
")",
"plt",
".",
"show",
"(",
")"
] | [
151,
0
] | [
191,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_plot_ndvi_boxplot_wofs_lineplot_over_time | (dataset, resolution=None, colors=['orange', 'blue']) |
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
|
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz (john.c.rattz | def xarray_plot_ndvi_boxplot_wofs_lineplot_over_time(dataset, resolution=None, colors=['orange', 'blue']):
"""
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
:Authors:
John Rattz ([email protected])
"""
plotting_data = dataset.stack(lat_lon=('latitude', 'longitude'))
time_agg_str = 'weekofyear' if resolution is not None and resolution == 'weekly' else 'time'
if time_agg_str != 'time':
plotting_data = plotting_data.groupby('time.'+time_agg_str).mean(dim='time')
fig, ax = plt.subplots(figsize=(9,6))
ndvi_box_color, wofs_line_color = ('orange', 'blue')
times = plotting_data[time_agg_str].values
# NDVI boxplot boxes
# The data formatted for matplotlib.pyplot.boxplot().
ndvi_formatted_data = xr.DataArray(np.full_like(plotting_data.ndvi.values, np.nan))
for i, time in enumerate(times):
ndvi_formatted_data.loc[i,:] = plotting_data.loc[{time_agg_str:time}].ndvi.values
ndvi_nan_mask = ~np.isnan(ndvi_formatted_data)
filtered_formatted_data = [] # Data formatted for matplotlib.pyplot.boxplot().
acq_inds_to_keep = [] # Indices of acquisitions to keep. Other indicies contain all nan values.
for i, (d, m) in enumerate(zip(ndvi_formatted_data, ndvi_nan_mask)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
acq_inds_to_keep.append(i)
times_no_nan = times[acq_inds_to_keep]
epochs = np.array(list(map(n64_to_epoch, times_no_nan))) if time_agg_str == 'time' else None
x_locs = epochs if time_agg_str == 'time' else times_no_nan
box_width = 0.5*np.min(np.diff(x_locs))
bp = ax.boxplot(filtered_formatted_data, widths=[box_width]*len(filtered_formatted_data),
positions=x_locs, patch_artist=True, boxprops=dict(facecolor=ndvi_box_color),
flierprops=dict(marker='o', markersize=0.25),
manage_xticks=False) # `manage_xticks=False` to avoid excessive padding on the x-axis.
# WOFS line
wofs_formatted_data = xr.DataArray(np.full_like(plotting_data.wofs.values, np.nan))
for i, time in enumerate(times):
wofs_formatted_data.loc[i,:] = plotting_data.loc[{time_agg_str:time}].wofs.values
wofs_line_plot_data = np.nanmean(wofs_formatted_data.values, axis=1)
wofs_nan_mask = ~np.isnan(wofs_line_plot_data)
line = ax.plot(x_locs, wofs_line_plot_data[wofs_nan_mask], c=wofs_line_color)
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times_no_nan))) if time_agg_str=='time' else \
naive_months_ticks_by_week(times_no_nan)
x_labels = date_strs
plt.xticks(x_locs, x_labels, rotation=45, ha='right', rotation_mode='anchor')
plt.legend(handles=[bp['boxes'][0],line[0]], labels=list(plotting_data.data_vars), loc='best')
plt.tight_layout()
plt.show() | [
"def",
"xarray_plot_ndvi_boxplot_wofs_lineplot_over_time",
"(",
"dataset",
",",
"resolution",
"=",
"None",
",",
"colors",
"=",
"[",
"'orange'",
",",
"'blue'",
"]",
")",
":",
"plotting_data",
"=",
"dataset",
".",
"stack",
"(",
"lat_lon",
"=",
"(",
"'latitude'",
",",
"'longitude'",
")",
")",
"time_agg_str",
"=",
"'weekofyear'",
"if",
"resolution",
"is",
"not",
"None",
"and",
"resolution",
"==",
"'weekly'",
"else",
"'time'",
"if",
"time_agg_str",
"!=",
"'time'",
":",
"plotting_data",
"=",
"plotting_data",
".",
"groupby",
"(",
"'time.'",
"+",
"time_agg_str",
")",
".",
"mean",
"(",
"dim",
"=",
"'time'",
")",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"(",
"9",
",",
"6",
")",
")",
"ndvi_box_color",
",",
"wofs_line_color",
"=",
"(",
"'orange'",
",",
"'blue'",
")",
"times",
"=",
"plotting_data",
"[",
"time_agg_str",
"]",
".",
"values",
"# NDVI boxplot boxes",
"# The data formatted for matplotlib.pyplot.boxplot().",
"ndvi_formatted_data",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"full_like",
"(",
"plotting_data",
".",
"ndvi",
".",
"values",
",",
"np",
".",
"nan",
")",
")",
"for",
"i",
",",
"time",
"in",
"enumerate",
"(",
"times",
")",
":",
"ndvi_formatted_data",
".",
"loc",
"[",
"i",
",",
":",
"]",
"=",
"plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"time",
"}",
"]",
".",
"ndvi",
".",
"values",
"ndvi_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"ndvi_formatted_data",
")",
"filtered_formatted_data",
"=",
"[",
"]",
"# Data formatted for matplotlib.pyplot.boxplot().",
"acq_inds_to_keep",
"=",
"[",
"]",
"# Indices of acquisitions to keep. Other indicies contain all nan values.",
"for",
"i",
",",
"(",
"d",
",",
"m",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"ndvi_formatted_data",
",",
"ndvi_nan_mask",
")",
")",
":",
"if",
"len",
"(",
"d",
"[",
"m",
"]",
"!=",
"0",
")",
":",
"filtered_formatted_data",
".",
"append",
"(",
"d",
"[",
"m",
"]",
")",
"acq_inds_to_keep",
".",
"append",
"(",
"i",
")",
"times_no_nan",
"=",
"times",
"[",
"acq_inds_to_keep",
"]",
"epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"times_no_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"None",
"x_locs",
"=",
"epochs",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"times_no_nan",
"box_width",
"=",
"0.5",
"*",
"np",
".",
"min",
"(",
"np",
".",
"diff",
"(",
"x_locs",
")",
")",
"bp",
"=",
"ax",
".",
"boxplot",
"(",
"filtered_formatted_data",
",",
"widths",
"=",
"[",
"box_width",
"]",
"*",
"len",
"(",
"filtered_formatted_data",
")",
",",
"positions",
"=",
"x_locs",
",",
"patch_artist",
"=",
"True",
",",
"boxprops",
"=",
"dict",
"(",
"facecolor",
"=",
"ndvi_box_color",
")",
",",
"flierprops",
"=",
"dict",
"(",
"marker",
"=",
"'o'",
",",
"markersize",
"=",
"0.25",
")",
",",
"manage_xticks",
"=",
"False",
")",
"# `manage_xticks=False` to avoid excessive padding on the x-axis.",
"# WOFS line",
"wofs_formatted_data",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"full_like",
"(",
"plotting_data",
".",
"wofs",
".",
"values",
",",
"np",
".",
"nan",
")",
")",
"for",
"i",
",",
"time",
"in",
"enumerate",
"(",
"times",
")",
":",
"wofs_formatted_data",
".",
"loc",
"[",
"i",
",",
":",
"]",
"=",
"plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"time",
"}",
"]",
".",
"wofs",
".",
"values",
"wofs_line_plot_data",
"=",
"np",
".",
"nanmean",
"(",
"wofs_formatted_data",
".",
"values",
",",
"axis",
"=",
"1",
")",
"wofs_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"wofs_line_plot_data",
")",
"line",
"=",
"ax",
".",
"plot",
"(",
"x_locs",
",",
"wofs_line_plot_data",
"[",
"wofs_nan_mask",
"]",
",",
"c",
"=",
"wofs_line_color",
")",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times_no_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"naive_months_ticks_by_week",
"(",
"times_no_nan",
")",
"x_labels",
"=",
"date_strs",
"plt",
".",
"xticks",
"(",
"x_locs",
",",
"x_labels",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"plt",
".",
"legend",
"(",
"handles",
"=",
"[",
"bp",
"[",
"'boxes'",
"]",
"[",
"0",
"]",
",",
"line",
"[",
"0",
"]",
"]",
",",
"labels",
"=",
"list",
"(",
"plotting_data",
".",
"data_vars",
")",
",",
"loc",
"=",
"'best'",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"show",
"(",
")"
] | [
193,
0
] | [
256,
14
] | python | en | ['en', 'error', 'th'] | False |
xarray_time_series_plot | (dataset, plot_descs, x_coord='longitude',
y_coord='latitude', fig_params=None,
scale_params=None, fig=None, ax=None,
max_times_per_plot=None, show_legend=True,
title=None) |
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
The primary coordinate must be 'time'.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation, and some other plot types
require
many-to-one aggregation. Aggregation types can be any of
['mean', 'median', 'none'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'gaussian', 'poly', 'cubic_spline', 'box'].
The plot type 'poly' requires a 'degree' entry mapping to an integer in its
dictionary of keyword arguments.
Here is an example:
{'ndvi':{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},
'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`
to use as tick and axis labels.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
``if fig is None and ax is None``. Note that in the case of multiple plots
being created (see ``max_times_per_plot`` below), figsize will be the size
of each plot - not the entire figure.
scale_params: dict
Currently not used.
Dictionary mapping names of DataArrays to scaling methods
(e.g. {'ndvi': 'std', 'wofs':'norm'}). The options are ['std', 'norm'].
The option 'std' standardizes. The option 'norm' normalizes (min-max scales).
Note that of these options, only normalizing guarantees that the y values will be
in a fixed range - namely [0,1].
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
max_times_per_plot: int
The maximum number of times per plot. If specified, one plot will be generated for
each group of this many times. The plots will be arranged in a row-major grid.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
:Authors:
John Rattz ([email protected])
|
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
The primary coordinate must be 'time'.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation, and some other plot types
require
many-to-one aggregation. Aggregation types can be any of
['mean', 'median', 'none'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'gaussian', 'poly', 'cubic_spline', 'box'].
The plot type 'poly' requires a 'degree' entry mapping to an integer in its
dictionary of keyword arguments.
Here is an example:
{'ndvi':{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},
'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`
to use as tick and axis labels.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
``if fig is None and ax is None``. Note that in the case of multiple plots
being created (see ``max_times_per_plot`` below), figsize will be the size
of each plot - not the entire figure.
scale_params: dict
Currently not used.
Dictionary mapping names of DataArrays to scaling methods
(e.g. {'ndvi': 'std', 'wofs':'norm'}). The options are ['std', 'norm'].
The option 'std' standardizes. The option 'norm' normalizes (min-max scales).
Note that of these options, only normalizing guarantees that the y values will be
in a fixed range - namely [0,1].
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
max_times_per_plot: int
The maximum number of times per plot. If specified, one plot will be generated for
each group of this many times. The plots will be arranged in a row-major grid.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
:Authors:
John Rattz (john.c.rattz | def xarray_time_series_plot(dataset, plot_descs, x_coord='longitude',
y_coord='latitude', fig_params=None,
scale_params=None, fig=None, ax=None,
max_times_per_plot=None, show_legend=True,
title=None):
"""
Plot data variables in an xarray.Dataset together in one figure, with different
plot types for each (e.g. box-and-whisker plot, line plot, scatter plot), and
optional curve fitting to aggregations along time. Handles data binned with
xarray.Dataset methods resample() and groupby(). That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
The primary coordinate must be 'time'.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation, and some other plot types
require
many-to-one aggregation. Aggregation types can be any of
['mean', 'median', 'none'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'gaussian', 'poly', 'cubic_spline', 'box'].
The plot type 'poly' requires a 'degree' entry mapping to an integer in its
dictionary of keyword arguments.
Here is an example:
{'ndvi':{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},
'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`
to use as tick and axis labels.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
``if fig is None and ax is None``. Note that in the case of multiple plots
being created (see ``max_times_per_plot`` below), figsize will be the size
of each plot - not the entire figure.
scale_params: dict
Currently not used.
Dictionary mapping names of DataArrays to scaling methods
(e.g. {'ndvi': 'std', 'wofs':'norm'}). The options are ['std', 'norm'].
The option 'std' standardizes. The option 'norm' normalizes (min-max scales).
Note that of these options, only normalizing guarantees that the y values will be
in a fixed range - namely [0,1].
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
max_times_per_plot: int
The maximum number of times per plot. If specified, one plot will be generated for
each group of this many times. The plots will be arranged in a row-major grid.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
:Authors:
John Rattz ([email protected])
"""
# Set default values for mutable data.
fig_params = {} if fig_params is None else fig_params
fig_params.setdefault('figsize', (18,12))
scale_params = {} if scale_params is None else scale_params
# Lists of plot types that can and cannot accept many-to-one aggregation
# for each time slice.
plot_types_requiring_aggregation = ['line', 'gaussian', 'poly', 'cubic_spline']
plot_types_handling_aggregation = ['scatter'] + plot_types_requiring_aggregation
plot_types_not_handling_aggregation = ['box']
all_plot_types = plot_types_requiring_aggregation + plot_types_handling_aggregation\
+ plot_types_not_handling_aggregation
# Aggregation types that aggregate all values for a given time to one value.
many_to_one_agg_types = ['mean', 'median']
# Aggregation types that aggregate to many values or do not aggregate.
many_to_many_agg_types = ['none']
all_agg_types = many_to_one_agg_types + many_to_many_agg_types
# Determine how the data was aggregated, if at all.
possible_time_agg_strs = ['week', 'weekofyear', 'month']
time_agg_str = 'time'
for possible_time_agg_str in possible_time_agg_strs:
if possible_time_agg_str in list(dataset.coords):
time_agg_str = possible_time_agg_str
break
# Make the data 2D - time and a stack of all other dimensions.
non_time_dims = list(set(dataset.dims)-{time_agg_str})
all_plotting_bands = list(plot_descs.keys())
all_plotting_data = dataset[all_plotting_bands].stack(stacked_data=non_time_dims)
all_times = all_plotting_data[time_agg_str].values
# Mask out times for which no data variable to plot has any non-NaN data.
nan_mask_data_vars = list(all_plotting_data[all_plotting_bands]\
.notnull().data_vars.values())
for i, data_var in enumerate(nan_mask_data_vars):
time_nan_mask = data_var.values if i == 0 else time_nan_mask | data_var.values
time_nan_mask = np.any(time_nan_mask, axis=1)
times_not_all_nan = all_times[time_nan_mask]
all_plotting_data = all_plotting_data.loc[{time_agg_str:times_not_all_nan}]
# Scale
# if scale_params denotes the scaling type for the whole Dataset, scale the Dataset.
if isinstance(scale_params, str):
all_plotting_data = xr_scale(all_plotting_data, scaling=scale_params)
# else, it is a dictionary denoting how to scale each DataArray.
elif len(scale_params) > 0:
for data_arr_name, scaling in scale_params.items():
all_plotting_data[data_arr_name] = \
xr_scale(all_plotting_data[data_arr_name], scaling=scaling)
# Handle the potential for multiple plots.
max_times_per_plot = len(times_not_all_nan) if max_times_per_plot is None else \
max_times_per_plot
num_plots = int(np.ceil(len(times_not_all_nan)/max_times_per_plot))
subset_num_cols = 2
subset_num_rows = int(np.ceil(num_plots / subset_num_cols))
if num_plots > 1:
# figsize = fig_params.pop('figsize')
base_figsize = fig_params.pop('figsize', \
figure_ratio(dataset, x_coord, y_coord,
fixed_width=10))
figsize = [base*num for base,num in
zip(base_figsize, (subset_num_cols,subset_num_rows))]
fig = plt.figure(figsize=figsize, **fig_params)
# Create each plot.
for time_ind, fig_ind in zip(range(0, len(times_not_all_nan), max_times_per_plot),
range(num_plots)):
lower_time_bound_ind, upper_time_bound_ind = \
time_ind, min(time_ind+max_times_per_plot, len(times_not_all_nan))
time_extents = times_not_all_nan[[lower_time_bound_ind, upper_time_bound_ind-1]]
# Retrieve or create the axes if necessary.
if len(times_not_all_nan) <= max_times_per_plot:
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
else:
ax = fig.add_subplot(subset_num_rows, subset_num_cols, fig_ind + 1)
fig_times_not_all_nan =\
times_not_all_nan[lower_time_bound_ind:upper_time_bound_ind]
plotting_data = all_plotting_data.loc[{time_agg_str:fig_times_not_all_nan}]
epochs = np.array(list(map(n64_to_epoch, fig_times_not_all_nan))) \
if time_agg_str == 'time' else None
x_locs = np_scale(epochs if time_agg_str == 'time' else fig_times_not_all_nan)
# Data variable plots within each plot.
data_arr_plots = []
legend_labels = []
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for plot_dict in plot_dicts:
for plot_type, plot_kwargs in plot_dict.items():
assert plot_type in all_plot_types, \
r"For the '{}' DataArray: plot_type '{}' not recognized"\
.format(data_arr_name, plot_type)
full_data_arr_plotting_data = plotting_data[data_arr_name].values
# Any times with all nan data are ignored in any plot type.
data_arr_nan_mask = \
np.any(~np.isnan(full_data_arr_plotting_data), axis=1)
# Skip plotting this data variable if it does not have
# enough data to plot.
if skip_plot(np.sum(data_arr_nan_mask), plot_type, plot_kwargs):
continue
# Remove times with all nan data.
data_arr_plotting_data = \
full_data_arr_plotting_data[data_arr_nan_mask]
# Large scales for x_locs can break the curve fitting
# for some reason.
data_arr_x_locs = x_locs[data_arr_nan_mask]
# Some plot types require aggregation.
if plot_type in plot_types_requiring_aggregation:
if agg_type not in many_to_one_agg_types:
raise ValueError("For the '{}' DataArray: the plot type "
"'{}' requires aggregation (currently using '{}'). "
"Please pass any of {} as the aggregation type "
"or change the plot type.".format(data_arr_name,\
plot_type, agg_type, many_to_one_agg_types))
# Some plot types cannot accept many-to-one aggregation.
if plot_type not in plot_types_handling_aggregation:
if agg_type not in many_to_many_agg_types:
raise ValueError("For the '{}' DataArray: "
"the plot type '{}' doesn't accept aggregation "
"(currently using '{}'). Please pass any of {} as "
"the aggregation type or change the plot type."
.format(data_arr_name, plot_type, agg_type,
many_to_many_agg_types))
if agg_type == 'mean':
y = ignore_warnings(np.nanmean, \
data_arr_plotting_data, axis=1)
elif agg_type == 'median':
y = ignore_warnings(np.nanmedian, \
data_arr_plotting_data, axis=1)
elif agg_type == 'none':
y = data_arr_plotting_data
# Create specified plot types.
plot_type_str = "" # Used to label the legend.
if plot_type == 'scatter':
data_arr_plots.append(ax.scatter(data_arr_x_locs, y,
**plot_kwargs))
plot_type_str += 'scatterplot'
elif plot_type == 'line':
data_arr_plots.append(ax.plot(data_arr_x_locs, y,
**plot_kwargs)[0])
plot_type_str += 'lineplot'
elif plot_type == 'box':
boxplot_nan_mask = ~np.isnan(y)
# Data formatted for matplotlib.pyplot.boxplot().
filtered_formatted_data = []
for i, (d, m) in enumerate(zip(y, boxplot_nan_mask)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
box_width = 0.5*np.min(np.diff(data_arr_x_locs)) \
if len(data_arr_x_locs) > 1 else 0.5
# Provide default arguments.
plot_kwargs.setdefault('boxprops', dict(facecolor='orange'))
plot_kwargs.setdefault('flierprops', dict(marker='o',\
markersize=0.5))
plot_kwargs.setdefault('showfliers', False)
# `manage_xticks=False` to avoid excessive padding on x-axis.
bp = ax.boxplot(filtered_formatted_data,
widths=[box_width]*len(filtered_formatted_data),
positions=data_arr_x_locs, patch_artist=True,
manage_xticks=False, **plot_kwargs)
data_arr_plots.append(bp['boxes'][0])
plot_type_str += 'boxplot'
elif plot_type == 'gaussian':
data_arr_plots.append(
plot_curvefit(data_arr_x_locs, y, fit_type=plot_type,
plot_kwargs=plot_kwargs, ax=ax))
plot_type_str += 'gaussian fit'
elif plot_type == 'poly':
assert 'degree' in plot_kwargs, \
r"For the '{}' DataArray: When using 'poly' as "\
"the fit type, the fit kwargs must have 'degree'"\
"specified.".format(data_arr_name)
data_arr_plots.append(
plot_curvefit(data_arr_x_locs, y, fit_type=plot_type,
plot_kwargs=plot_kwargs, ax=ax))
plot_type_str += 'degree {} polynomial fit'\
.format(plot_kwargs['degree'])
elif plot_type == 'cubic_spline':
data_arr_plots.append(
plot_curvefit(data_arr_x_locs, y, fit_type=plot_type,
plot_kwargs=plot_kwargs, ax=ax))
plot_type_str += 'cubic spline fit'
plot_type_str += ' of {}'.format(agg_type) \
if agg_type != 'none' else ''
legend_labels.append('{} of {}'\
.format(plot_type_str, data_arr_name))
# Label the axes and create the legend.
date_strs = \
np.array(list(map(lambda time: np_dt64_to_str(time), fig_times_not_all_nan)))\
if time_agg_str=='time' else\
naive_months_ticks_by_week(fig_times_not_all_nan) \
if time_agg_str in ['week', 'weekofyear'] else\
month_ints_to_month_names(fig_times_not_all_nan)
plt.xticks(x_locs, date_strs, rotation=45, ha='right', rotation_mode='anchor')
if show_legend:
plt.legend(handles=data_arr_plots, labels=legend_labels, loc='best')
title_postpend = " ({} to {})".format(date_strs[0], date_strs[-1])
title_prepend = "Figure {}".format(fig_ind) if title is None else title
plt.title(title_prepend + title_postpend)
plt.tight_layout()
return fig | [
"def",
"xarray_time_series_plot",
"(",
"dataset",
",",
"plot_descs",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"fig_params",
"=",
"None",
",",
"scale_params",
"=",
"None",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"max_times_per_plot",
"=",
"None",
",",
"show_legend",
"=",
"True",
",",
"title",
"=",
"None",
")",
":",
"# Set default values for mutable data.",
"fig_params",
"=",
"{",
"}",
"if",
"fig_params",
"is",
"None",
"else",
"fig_params",
"fig_params",
".",
"setdefault",
"(",
"'figsize'",
",",
"(",
"18",
",",
"12",
")",
")",
"scale_params",
"=",
"{",
"}",
"if",
"scale_params",
"is",
"None",
"else",
"scale_params",
"# Lists of plot types that can and cannot accept many-to-one aggregation ",
"# for each time slice.",
"plot_types_requiring_aggregation",
"=",
"[",
"'line'",
",",
"'gaussian'",
",",
"'poly'",
",",
"'cubic_spline'",
"]",
"plot_types_handling_aggregation",
"=",
"[",
"'scatter'",
"]",
"+",
"plot_types_requiring_aggregation",
"plot_types_not_handling_aggregation",
"=",
"[",
"'box'",
"]",
"all_plot_types",
"=",
"plot_types_requiring_aggregation",
"+",
"plot_types_handling_aggregation",
"+",
"plot_types_not_handling_aggregation",
"# Aggregation types that aggregate all values for a given time to one value.",
"many_to_one_agg_types",
"=",
"[",
"'mean'",
",",
"'median'",
"]",
"# Aggregation types that aggregate to many values or do not aggregate.",
"many_to_many_agg_types",
"=",
"[",
"'none'",
"]",
"all_agg_types",
"=",
"many_to_one_agg_types",
"+",
"many_to_many_agg_types",
"# Determine how the data was aggregated, if at all.",
"possible_time_agg_strs",
"=",
"[",
"'week'",
",",
"'weekofyear'",
",",
"'month'",
"]",
"time_agg_str",
"=",
"'time'",
"for",
"possible_time_agg_str",
"in",
"possible_time_agg_strs",
":",
"if",
"possible_time_agg_str",
"in",
"list",
"(",
"dataset",
".",
"coords",
")",
":",
"time_agg_str",
"=",
"possible_time_agg_str",
"break",
"# Make the data 2D - time and a stack of all other dimensions.",
"non_time_dims",
"=",
"list",
"(",
"set",
"(",
"dataset",
".",
"dims",
")",
"-",
"{",
"time_agg_str",
"}",
")",
"all_plotting_bands",
"=",
"list",
"(",
"plot_descs",
".",
"keys",
"(",
")",
")",
"all_plotting_data",
"=",
"dataset",
"[",
"all_plotting_bands",
"]",
".",
"stack",
"(",
"stacked_data",
"=",
"non_time_dims",
")",
"all_times",
"=",
"all_plotting_data",
"[",
"time_agg_str",
"]",
".",
"values",
"# Mask out times for which no data variable to plot has any non-NaN data.",
"nan_mask_data_vars",
"=",
"list",
"(",
"all_plotting_data",
"[",
"all_plotting_bands",
"]",
".",
"notnull",
"(",
")",
".",
"data_vars",
".",
"values",
"(",
")",
")",
"for",
"i",
",",
"data_var",
"in",
"enumerate",
"(",
"nan_mask_data_vars",
")",
":",
"time_nan_mask",
"=",
"data_var",
".",
"values",
"if",
"i",
"==",
"0",
"else",
"time_nan_mask",
"|",
"data_var",
".",
"values",
"time_nan_mask",
"=",
"np",
".",
"any",
"(",
"time_nan_mask",
",",
"axis",
"=",
"1",
")",
"times_not_all_nan",
"=",
"all_times",
"[",
"time_nan_mask",
"]",
"all_plotting_data",
"=",
"all_plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"times_not_all_nan",
"}",
"]",
"# Scale",
"# if scale_params denotes the scaling type for the whole Dataset, scale the Dataset.",
"if",
"isinstance",
"(",
"scale_params",
",",
"str",
")",
":",
"all_plotting_data",
"=",
"xr_scale",
"(",
"all_plotting_data",
",",
"scaling",
"=",
"scale_params",
")",
"# else, it is a dictionary denoting how to scale each DataArray.",
"elif",
"len",
"(",
"scale_params",
")",
">",
"0",
":",
"for",
"data_arr_name",
",",
"scaling",
"in",
"scale_params",
".",
"items",
"(",
")",
":",
"all_plotting_data",
"[",
"data_arr_name",
"]",
"=",
"xr_scale",
"(",
"all_plotting_data",
"[",
"data_arr_name",
"]",
",",
"scaling",
"=",
"scaling",
")",
"# Handle the potential for multiple plots.",
"max_times_per_plot",
"=",
"len",
"(",
"times_not_all_nan",
")",
"if",
"max_times_per_plot",
"is",
"None",
"else",
"max_times_per_plot",
"num_plots",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"len",
"(",
"times_not_all_nan",
")",
"/",
"max_times_per_plot",
")",
")",
"subset_num_cols",
"=",
"2",
"subset_num_rows",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"num_plots",
"/",
"subset_num_cols",
")",
")",
"if",
"num_plots",
">",
"1",
":",
"# figsize = fig_params.pop('figsize')",
"base_figsize",
"=",
"fig_params",
".",
"pop",
"(",
"'figsize'",
",",
"figure_ratio",
"(",
"dataset",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"10",
")",
")",
"figsize",
"=",
"[",
"base",
"*",
"num",
"for",
"base",
",",
"num",
"in",
"zip",
"(",
"base_figsize",
",",
"(",
"subset_num_cols",
",",
"subset_num_rows",
")",
")",
"]",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
",",
"*",
"*",
"fig_params",
")",
"# Create each plot.",
"for",
"time_ind",
",",
"fig_ind",
"in",
"zip",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"times_not_all_nan",
")",
",",
"max_times_per_plot",
")",
",",
"range",
"(",
"num_plots",
")",
")",
":",
"lower_time_bound_ind",
",",
"upper_time_bound_ind",
"=",
"time_ind",
",",
"min",
"(",
"time_ind",
"+",
"max_times_per_plot",
",",
"len",
"(",
"times_not_all_nan",
")",
")",
"time_extents",
"=",
"times_not_all_nan",
"[",
"[",
"lower_time_bound_ind",
",",
"upper_time_bound_ind",
"-",
"1",
"]",
"]",
"# Retrieve or create the axes if necessary.",
"if",
"len",
"(",
"times_not_all_nan",
")",
"<=",
"max_times_per_plot",
":",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_params",
")",
"else",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"subset_num_rows",
",",
"subset_num_cols",
",",
"fig_ind",
"+",
"1",
")",
"fig_times_not_all_nan",
"=",
"times_not_all_nan",
"[",
"lower_time_bound_ind",
":",
"upper_time_bound_ind",
"]",
"plotting_data",
"=",
"all_plotting_data",
".",
"loc",
"[",
"{",
"time_agg_str",
":",
"fig_times_not_all_nan",
"}",
"]",
"epochs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"fig_times_not_all_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"None",
"x_locs",
"=",
"np_scale",
"(",
"epochs",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"fig_times_not_all_nan",
")",
"# Data variable plots within each plot.",
"data_arr_plots",
"=",
"[",
"]",
"legend_labels",
"=",
"[",
"]",
"# For each data array to plot...",
"for",
"data_arr_name",
",",
"agg_dict",
"in",
"plot_descs",
".",
"items",
"(",
")",
":",
"# For each aggregation type (e.g. 'mean', 'median')...",
"for",
"agg_type",
",",
"plot_dicts",
"in",
"agg_dict",
".",
"items",
"(",
")",
":",
"# For each plot for this aggregation type...",
"for",
"plot_dict",
"in",
"plot_dicts",
":",
"for",
"plot_type",
",",
"plot_kwargs",
"in",
"plot_dict",
".",
"items",
"(",
")",
":",
"assert",
"plot_type",
"in",
"all_plot_types",
",",
"r\"For the '{}' DataArray: plot_type '{}' not recognized\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
")",
"full_data_arr_plotting_data",
"=",
"plotting_data",
"[",
"data_arr_name",
"]",
".",
"values",
"# Any times with all nan data are ignored in any plot type.",
"data_arr_nan_mask",
"=",
"np",
".",
"any",
"(",
"~",
"np",
".",
"isnan",
"(",
"full_data_arr_plotting_data",
")",
",",
"axis",
"=",
"1",
")",
"# Skip plotting this data variable if it does not have ",
"# enough data to plot.",
"if",
"skip_plot",
"(",
"np",
".",
"sum",
"(",
"data_arr_nan_mask",
")",
",",
"plot_type",
",",
"plot_kwargs",
")",
":",
"continue",
"# Remove times with all nan data.",
"data_arr_plotting_data",
"=",
"full_data_arr_plotting_data",
"[",
"data_arr_nan_mask",
"]",
"# Large scales for x_locs can break the curve fitting ",
"# for some reason.",
"data_arr_x_locs",
"=",
"x_locs",
"[",
"data_arr_nan_mask",
"]",
"# Some plot types require aggregation.",
"if",
"plot_type",
"in",
"plot_types_requiring_aggregation",
":",
"if",
"agg_type",
"not",
"in",
"many_to_one_agg_types",
":",
"raise",
"ValueError",
"(",
"\"For the '{}' DataArray: the plot type \"",
"\"'{}' requires aggregation (currently using '{}'). \"",
"\"Please pass any of {} as the aggregation type \"",
"\"or change the plot type.\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
",",
"agg_type",
",",
"many_to_one_agg_types",
")",
")",
"# Some plot types cannot accept many-to-one aggregation.",
"if",
"plot_type",
"not",
"in",
"plot_types_handling_aggregation",
":",
"if",
"agg_type",
"not",
"in",
"many_to_many_agg_types",
":",
"raise",
"ValueError",
"(",
"\"For the '{}' DataArray: \"",
"\"the plot type '{}' doesn't accept aggregation \"",
"\"(currently using '{}'). Please pass any of {} as \"",
"\"the aggregation type or change the plot type.\"",
".",
"format",
"(",
"data_arr_name",
",",
"plot_type",
",",
"agg_type",
",",
"many_to_many_agg_types",
")",
")",
"if",
"agg_type",
"==",
"'mean'",
":",
"y",
"=",
"ignore_warnings",
"(",
"np",
".",
"nanmean",
",",
"data_arr_plotting_data",
",",
"axis",
"=",
"1",
")",
"elif",
"agg_type",
"==",
"'median'",
":",
"y",
"=",
"ignore_warnings",
"(",
"np",
".",
"nanmedian",
",",
"data_arr_plotting_data",
",",
"axis",
"=",
"1",
")",
"elif",
"agg_type",
"==",
"'none'",
":",
"y",
"=",
"data_arr_plotting_data",
"# Create specified plot types.",
"plot_type_str",
"=",
"\"\"",
"# Used to label the legend.",
"if",
"plot_type",
"==",
"'scatter'",
":",
"data_arr_plots",
".",
"append",
"(",
"ax",
".",
"scatter",
"(",
"data_arr_x_locs",
",",
"y",
",",
"*",
"*",
"plot_kwargs",
")",
")",
"plot_type_str",
"+=",
"'scatterplot'",
"elif",
"plot_type",
"==",
"'line'",
":",
"data_arr_plots",
".",
"append",
"(",
"ax",
".",
"plot",
"(",
"data_arr_x_locs",
",",
"y",
",",
"*",
"*",
"plot_kwargs",
")",
"[",
"0",
"]",
")",
"plot_type_str",
"+=",
"'lineplot'",
"elif",
"plot_type",
"==",
"'box'",
":",
"boxplot_nan_mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"y",
")",
"# Data formatted for matplotlib.pyplot.boxplot().",
"filtered_formatted_data",
"=",
"[",
"]",
"for",
"i",
",",
"(",
"d",
",",
"m",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"y",
",",
"boxplot_nan_mask",
")",
")",
":",
"if",
"len",
"(",
"d",
"[",
"m",
"]",
"!=",
"0",
")",
":",
"filtered_formatted_data",
".",
"append",
"(",
"d",
"[",
"m",
"]",
")",
"box_width",
"=",
"0.5",
"*",
"np",
".",
"min",
"(",
"np",
".",
"diff",
"(",
"data_arr_x_locs",
")",
")",
"if",
"len",
"(",
"data_arr_x_locs",
")",
">",
"1",
"else",
"0.5",
"# Provide default arguments.",
"plot_kwargs",
".",
"setdefault",
"(",
"'boxprops'",
",",
"dict",
"(",
"facecolor",
"=",
"'orange'",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'flierprops'",
",",
"dict",
"(",
"marker",
"=",
"'o'",
",",
"markersize",
"=",
"0.5",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'showfliers'",
",",
"False",
")",
"# `manage_xticks=False` to avoid excessive padding on x-axis.",
"bp",
"=",
"ax",
".",
"boxplot",
"(",
"filtered_formatted_data",
",",
"widths",
"=",
"[",
"box_width",
"]",
"*",
"len",
"(",
"filtered_formatted_data",
")",
",",
"positions",
"=",
"data_arr_x_locs",
",",
"patch_artist",
"=",
"True",
",",
"manage_xticks",
"=",
"False",
",",
"*",
"*",
"plot_kwargs",
")",
"data_arr_plots",
".",
"append",
"(",
"bp",
"[",
"'boxes'",
"]",
"[",
"0",
"]",
")",
"plot_type_str",
"+=",
"'boxplot'",
"elif",
"plot_type",
"==",
"'gaussian'",
":",
"data_arr_plots",
".",
"append",
"(",
"plot_curvefit",
"(",
"data_arr_x_locs",
",",
"y",
",",
"fit_type",
"=",
"plot_type",
",",
"plot_kwargs",
"=",
"plot_kwargs",
",",
"ax",
"=",
"ax",
")",
")",
"plot_type_str",
"+=",
"'gaussian fit'",
"elif",
"plot_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"plot_kwargs",
",",
"r\"For the '{}' DataArray: When using 'poly' as \"",
"\"the fit type, the fit kwargs must have 'degree'\"",
"\"specified.\"",
".",
"format",
"(",
"data_arr_name",
")",
"data_arr_plots",
".",
"append",
"(",
"plot_curvefit",
"(",
"data_arr_x_locs",
",",
"y",
",",
"fit_type",
"=",
"plot_type",
",",
"plot_kwargs",
"=",
"plot_kwargs",
",",
"ax",
"=",
"ax",
")",
")",
"plot_type_str",
"+=",
"'degree {} polynomial fit'",
".",
"format",
"(",
"plot_kwargs",
"[",
"'degree'",
"]",
")",
"elif",
"plot_type",
"==",
"'cubic_spline'",
":",
"data_arr_plots",
".",
"append",
"(",
"plot_curvefit",
"(",
"data_arr_x_locs",
",",
"y",
",",
"fit_type",
"=",
"plot_type",
",",
"plot_kwargs",
"=",
"plot_kwargs",
",",
"ax",
"=",
"ax",
")",
")",
"plot_type_str",
"+=",
"'cubic spline fit'",
"plot_type_str",
"+=",
"' of {}'",
".",
"format",
"(",
"agg_type",
")",
"if",
"agg_type",
"!=",
"'none'",
"else",
"''",
"legend_labels",
".",
"append",
"(",
"'{} of {}'",
".",
"format",
"(",
"plot_type_str",
",",
"data_arr_name",
")",
")",
"# Label the axes and create the legend.",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"fig_times_not_all_nan",
")",
")",
")",
"if",
"time_agg_str",
"==",
"'time'",
"else",
"naive_months_ticks_by_week",
"(",
"fig_times_not_all_nan",
")",
"if",
"time_agg_str",
"in",
"[",
"'week'",
",",
"'weekofyear'",
"]",
"else",
"month_ints_to_month_names",
"(",
"fig_times_not_all_nan",
")",
"plt",
".",
"xticks",
"(",
"x_locs",
",",
"date_strs",
",",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
",",
"rotation_mode",
"=",
"'anchor'",
")",
"if",
"show_legend",
":",
"plt",
".",
"legend",
"(",
"handles",
"=",
"data_arr_plots",
",",
"labels",
"=",
"legend_labels",
",",
"loc",
"=",
"'best'",
")",
"title_postpend",
"=",
"\" ({} to {})\"",
".",
"format",
"(",
"date_strs",
"[",
"0",
"]",
",",
"date_strs",
"[",
"-",
"1",
"]",
")",
"title_prepend",
"=",
"\"Figure {}\"",
".",
"format",
"(",
"fig_ind",
")",
"if",
"title",
"is",
"None",
"else",
"title",
"plt",
".",
"title",
"(",
"title_prepend",
"+",
"title_postpend",
")",
"plt",
".",
"tight_layout",
"(",
")",
"return",
"fig"
] | [
258,
0
] | [
552,
14
] | python | en | ['en', 'error', 'th'] | False |
plot_curvefit | (x, y, fit_type, x_smooth=None, n_pts=200, fig_params={}, plot_kwargs={}, fig=None, ax=None) |
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['poly', 'gaussian', 'cubic_spline'].
The option 'poly' plots a polynomial fit. The option 'gaussian' plots a Gaussian fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
:Authors:
John Rattz ([email protected])
|
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['poly', 'gaussian', 'cubic_spline'].
The option 'poly' plots a polynomial fit. The option 'gaussian' plots a Gaussian fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
:Authors:
John Rattz (john.c.rattz | def plot_curvefit(x, y, fit_type, x_smooth=None, n_pts=200, fig_params={}, plot_kwargs={}, fig=None, ax=None):
"""
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['poly', 'gaussian', 'cubic_spline'].
The option 'poly' plots a polynomial fit. The option 'gaussian' plots a Gaussian fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
:Authors:
John Rattz ([email protected])
"""
# Avoid modifying the original arguments.
fig_params, plot_kwargs = fig_params.copy(), plot_kwargs.copy()
fig_params.setdefault('figsize', (12,6))
plot_kwargs.setdefault('linestyle', '-')
# Retrieve or create the axes if necessary.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
if x_smooth is None:
x_smooth = np.linspace(x.min(), x.max(), n_pts)
if fit_type == 'gaussian':
y_smooth = gaussian_fit(x, y, x_smooth)
elif fit_type == 'poly':
assert 'degree' in plot_kwargs.keys(), "When plotting a polynomal fit, there must be" \
"a 'degree' entry in the plot_kwargs parameter."
degree = plot_kwargs.pop('degree')
y_smooth = poly_fit(x, y, degree, x_smooth)
elif fit_type == 'cubic_spline':
cs = CubicSpline(x,y)
y_smooth = cs(x_smooth)
return ax.plot(x_smooth, y_smooth, **plot_kwargs)[0] | [
"def",
"plot_curvefit",
"(",
"x",
",",
"y",
",",
"fit_type",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"200",
",",
"fig_params",
"=",
"{",
"}",
",",
"plot_kwargs",
"=",
"{",
"}",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"# Avoid modifying the original arguments.",
"fig_params",
",",
"plot_kwargs",
"=",
"fig_params",
".",
"copy",
"(",
")",
",",
"plot_kwargs",
".",
"copy",
"(",
")",
"fig_params",
".",
"setdefault",
"(",
"'figsize'",
",",
"(",
"12",
",",
"6",
")",
")",
"plot_kwargs",
".",
"setdefault",
"(",
"'linestyle'",
",",
"'-'",
")",
"# Retrieve or create the axes if necessary.",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_params",
")",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth",
"=",
"np",
".",
"linspace",
"(",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
",",
"n_pts",
")",
"if",
"fit_type",
"==",
"'gaussian'",
":",
"y_smooth",
"=",
"gaussian_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"plot_kwargs",
".",
"keys",
"(",
")",
",",
"\"When plotting a polynomal fit, there must be\"",
"\"a 'degree' entry in the plot_kwargs parameter.\"",
"degree",
"=",
"plot_kwargs",
".",
"pop",
"(",
"'degree'",
")",
"y_smooth",
"=",
"poly_fit",
"(",
"x",
",",
"y",
",",
"degree",
",",
"x_smooth",
")",
"elif",
"fit_type",
"==",
"'cubic_spline'",
":",
"cs",
"=",
"CubicSpline",
"(",
"x",
",",
"y",
")",
"y_smooth",
"=",
"cs",
"(",
"x_smooth",
")",
"return",
"ax",
".",
"plot",
"(",
"x_smooth",
",",
"y_smooth",
",",
"*",
"*",
"plot_kwargs",
")",
"[",
"0",
"]"
] | [
556,
0
] | [
615,
56
] | python | en | ['en', 'error', 'th'] | False |
plot_band | (dataset, figsize=(20,15), fontsize=24, legend_fontsize=24) |
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
|
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
| def plot_band(dataset, figsize=(20,15), fontsize=24, legend_fontsize=24):
"""
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
"""
# Calculations
times = dataset.time.values
epochs = np.sort(np.array(list(map(n64_to_epoch, times))))
x_locs = (epochs - epochs.min()) / (epochs.max() - epochs.min())
means = dataset.mean(dim=['latitude','longitude'], skipna = True).values
medians = dataset.median(dim=['latitude','longitude'], skipna = True).values
mask = ~np.isnan(means) & ~np.isnan(medians)
plt.figure(figsize=figsize)
ax = plt.gca()
# Shaded Area (percentiles)
with warnings.catch_warnings():
# Ignore warning about encountering an All-NaN slice. Some acquisitions have all-NaN values.
warnings.simplefilter("ignore", category=RuntimeWarning)
quarter = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
25,
axis = 1
)
three_quarters = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
75,
axis = 1
)
np.array(quarter)
np.array(three_quarters)
ax.grid(color='lightgray', linestyle='-', linewidth=1)
fillcolor='gray'
fillalpha=0.4
plt.fill_between(x_locs, quarter, three_quarters, interpolate=False, color=fillcolor, alpha=fillalpha,
label="25th and 75th percentile band")
#Medians
plt.plot(x_locs,medians,color="black",marker="o",linestyle='None', label = "Medians")
#The Actual Plot
plt.plot(x_locs,means,color="blue",label="Mean")
#Linear Regression (on mean)
m, b = np.polyfit(x_locs[mask], means[mask], 1)
plt.plot(x_locs, m*x_locs + b, '-', color="red",label="linear regression of means",linewidth = 3.0)
#Gaussian Curve
plot_curvefit(x_locs[mask], means[mask], fit_type='gaussian', ax=ax,
plot_kwargs=dict(linestyle='-', label="Gaussian smoothed of means",
alpha=1, color='limegreen', linewidth = 3.0))
#Formatting
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times[mask])))
ax.grid(color='k', alpha=0.1, linestyle='-', linewidth=1)
ax.xaxis.set_major_formatter(FuncFormatter(tfmt))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=legend_fontsize)
plt.xticks(x_locs, date_strs, rotation=45, fontsize=fontsize)
plt.yticks(fontsize=fontsize)
ax.set_xlabel('Time', fontsize=fontsize)
ax.set_ylabel('Value', fontsize=fontsize)
plt.show() | [
"def",
"plot_band",
"(",
"dataset",
",",
"figsize",
"=",
"(",
"20",
",",
"15",
")",
",",
"fontsize",
"=",
"24",
",",
"legend_fontsize",
"=",
"24",
")",
":",
"# Calculations",
"times",
"=",
"dataset",
".",
"time",
".",
"values",
"epochs",
"=",
"np",
".",
"sort",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"n64_to_epoch",
",",
"times",
")",
")",
")",
")",
"x_locs",
"=",
"(",
"epochs",
"-",
"epochs",
".",
"min",
"(",
")",
")",
"/",
"(",
"epochs",
".",
"max",
"(",
")",
"-",
"epochs",
".",
"min",
"(",
")",
")",
"means",
"=",
"dataset",
".",
"mean",
"(",
"dim",
"=",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"skipna",
"=",
"True",
")",
".",
"values",
"medians",
"=",
"dataset",
".",
"median",
"(",
"dim",
"=",
"[",
"'latitude'",
",",
"'longitude'",
"]",
",",
"skipna",
"=",
"True",
")",
".",
"values",
"mask",
"=",
"~",
"np",
".",
"isnan",
"(",
"means",
")",
"&",
"~",
"np",
".",
"isnan",
"(",
"medians",
")",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# Shaded Area (percentiles)",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# Ignore warning about encountering an All-NaN slice. Some acquisitions have all-NaN values.",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"quarter",
"=",
"np",
".",
"nanpercentile",
"(",
"dataset",
".",
"values",
".",
"reshape",
"(",
"(",
"len",
"(",
"dataset",
"[",
"'time'",
"]",
")",
",",
"len",
"(",
"dataset",
"[",
"'latitude'",
"]",
")",
"*",
"len",
"(",
"dataset",
"[",
"'longitude'",
"]",
")",
")",
")",
",",
"25",
",",
"axis",
"=",
"1",
")",
"three_quarters",
"=",
"np",
".",
"nanpercentile",
"(",
"dataset",
".",
"values",
".",
"reshape",
"(",
"(",
"len",
"(",
"dataset",
"[",
"'time'",
"]",
")",
",",
"len",
"(",
"dataset",
"[",
"'latitude'",
"]",
")",
"*",
"len",
"(",
"dataset",
"[",
"'longitude'",
"]",
")",
")",
")",
",",
"75",
",",
"axis",
"=",
"1",
")",
"np",
".",
"array",
"(",
"quarter",
")",
"np",
".",
"array",
"(",
"three_quarters",
")",
"ax",
".",
"grid",
"(",
"color",
"=",
"'lightgray'",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"1",
")",
"fillcolor",
"=",
"'gray'",
"fillalpha",
"=",
"0.4",
"plt",
".",
"fill_between",
"(",
"x_locs",
",",
"quarter",
",",
"three_quarters",
",",
"interpolate",
"=",
"False",
",",
"color",
"=",
"fillcolor",
",",
"alpha",
"=",
"fillalpha",
",",
"label",
"=",
"\"25th and 75th percentile band\"",
")",
"#Medians",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"medians",
",",
"color",
"=",
"\"black\"",
",",
"marker",
"=",
"\"o\"",
",",
"linestyle",
"=",
"'None'",
",",
"label",
"=",
"\"Medians\"",
")",
"#The Actual Plot",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"means",
",",
"color",
"=",
"\"blue\"",
",",
"label",
"=",
"\"Mean\"",
")",
"#Linear Regression (on mean)",
"m",
",",
"b",
"=",
"np",
".",
"polyfit",
"(",
"x_locs",
"[",
"mask",
"]",
",",
"means",
"[",
"mask",
"]",
",",
"1",
")",
"plt",
".",
"plot",
"(",
"x_locs",
",",
"m",
"*",
"x_locs",
"+",
"b",
",",
"'-'",
",",
"color",
"=",
"\"red\"",
",",
"label",
"=",
"\"linear regression of means\"",
",",
"linewidth",
"=",
"3.0",
")",
"#Gaussian Curve",
"plot_curvefit",
"(",
"x_locs",
"[",
"mask",
"]",
",",
"means",
"[",
"mask",
"]",
",",
"fit_type",
"=",
"'gaussian'",
",",
"ax",
"=",
"ax",
",",
"plot_kwargs",
"=",
"dict",
"(",
"linestyle",
"=",
"'-'",
",",
"label",
"=",
"\"Gaussian smoothed of means\"",
",",
"alpha",
"=",
"1",
",",
"color",
"=",
"'limegreen'",
",",
"linewidth",
"=",
"3.0",
")",
")",
"#Formatting",
"date_strs",
"=",
"np",
".",
"array",
"(",
"list",
"(",
"map",
"(",
"lambda",
"time",
":",
"np_dt64_to_str",
"(",
"time",
")",
",",
"times",
"[",
"mask",
"]",
")",
")",
")",
"ax",
".",
"grid",
"(",
"color",
"=",
"'k'",
",",
"alpha",
"=",
"0.1",
",",
"linestyle",
"=",
"'-'",
",",
"linewidth",
"=",
"1",
")",
"ax",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"FuncFormatter",
"(",
"tfmt",
")",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'center left'",
",",
"bbox_to_anchor",
"=",
"(",
"1",
",",
"0.5",
")",
",",
"fontsize",
"=",
"legend_fontsize",
")",
"plt",
".",
"xticks",
"(",
"x_locs",
",",
"date_strs",
",",
"rotation",
"=",
"45",
",",
"fontsize",
"=",
"fontsize",
")",
"plt",
".",
"yticks",
"(",
"fontsize",
"=",
"fontsize",
")",
"ax",
".",
"set_xlabel",
"(",
"'Time'",
",",
"fontsize",
"=",
"fontsize",
")",
"ax",
".",
"set_ylabel",
"(",
"'Value'",
",",
"fontsize",
"=",
"fontsize",
")",
"plt",
".",
"show",
"(",
")"
] | [
619,
0
] | [
695,
14
] | python | en | ['en', 'error', 'th'] | False |
convert_name_rgb_255 | (color) |
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str or list (size 3)
The color name to convert or a list of red, green, and blue already in range [0,255].
|
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument. | def convert_name_rgb_255(color):
"""
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str or list (size 3)
The color name to convert or a list of red, green, and blue already in range [0,255].
"""
return [255*rgb for rgb in mpl.colors.to_rgb(color)] if isinstance(color,str) else color | [
"def",
"convert_name_rgb_255",
"(",
"color",
")",
":",
"return",
"[",
"255",
"*",
"rgb",
"for",
"rgb",
"in",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"]",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color"
] | [
748,
0
] | [
758,
92
] | python | en | ['en', 'error', 'th'] | False |
norm_color | (color) |
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
|
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
| def norm_color(color):
"""
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
"""
color = convert_name_rgb_255(color)
if len(color) == 3:
color = [rgb/255 for rgb in color]
return color | [
"def",
"norm_color",
"(",
"color",
")",
":",
"color",
"=",
"convert_name_rgb_255",
"(",
"color",
")",
"if",
"len",
"(",
"color",
")",
"==",
"3",
":",
"color",
"=",
"[",
"rgb",
"/",
"255",
"for",
"rgb",
"in",
"color",
"]",
"return",
"color"
] | [
760,
0
] | [
773,
16
] | python | en | ['en', 'error', 'th'] | False |
create_discrete_color_map | (data_range=None, colors=None, cmap=None,
th=None, pts=None, cmap_name='my_cmap',
data_range_fmt=None, pts_fmt=None) |
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
:Authors:
John Rattz ([email protected])
|
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
:Authors:
John Rattz (john.c.rattz | def create_discrete_color_map(data_range=None, colors=None, cmap=None,
th=None, pts=None, cmap_name='my_cmap',
data_range_fmt=None, pts_fmt=None):
"""
Creates a discrete matplotlib LinearSegmentedColormap with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
:Authors:
John Rattz ([email protected])
"""
assert (colors is None) ^ (cmap is None), \
"Exclusively either `colors` or `cmap` must be specified."
assert th is None or pts is None, \
"The parameters `th` or `pts` may be specified, but not both."
cmap = plt.get_cmap(cmap) if isinstance(cmap, str) else cmap
if th is None: # If `th` is not supplied, construct it based on other arguments.
if pts is not None:
if isinstance(pts, int): # Use `pts` as the number of evenly-spaced points.
assert pts > 0, "The number of points specified by `pts` must be positive."
th_spacing = (data_range[1] - data_range[0])/pts
th = np.linspace(data_range[0]+th_spacing, data_range[1]-th_spacing, pts-1)
else: # Use `pts` as a list-like of points to put thresholds between.
assert data_range[0] <= min(pts) and max(pts) <= data_range[1], \
"The values in `pts` must be within `data_range`, inclusive."
# Choose imaginary lower and upper bounds of the data to scale `pts` with
# so that the first and last color regions are sized appropriately.
data_range_fmt = [None]*2 if data_range_fmt is None else data_range_fmt
data_range_fmt[0] = pts[0] - (pts[1] - pts[0])/2
data_range_fmt[1] = pts[-1] + (pts[-1] - pts[-2])/2
pts = np.interp(pts, data_range_fmt, data_range)#(0,1))
# pts = list(map(lambda pt: norm_range(pt, data_range_fmt), pts))
th = [pts[ind-1] + (pts[ind] - pts[ind-1])/2 for ind in range(1, len(pts))]
else:
assert colors is not None, \
"If neither `th` nor `pts` are specified, `colors` must be specified."
th_spacing = (data_range[1] - data_range[0])/len(colors)
th = np.linspace(data_range[0]+th_spacing, data_range[1]-th_spacing, len(colors)-1)
else:
assert len(th) == 0 or (data_range[0] < min(th) and max(th) < data_range[1]), \
"The values in `th` must be within `data_range`, exclusive."
# Normalize threshold values based on the data range.
th = [(val-data_range[0])/(data_range[1]-data_range[0]) for val in th]
th = np.interp(th, data_range, (0,1))
th = [0.0] + list(th) + [1.0]
if pts_fmt is not None:
for ind in range(len(th)-1):
pts_fmt[ind] = th[ind] + (th[ind+1] - th[ind])/2
if colors is None: # If `colors` is not supplied, construct it based on other arguments.
assert cmap is not None, \
"If `colors` is not specified, `cmap` must be specified."
colors = [cmap(th[ind-1] + (th[ind] - th[ind-1])/2) for ind in range(1, len(th))]
else:
colors = list(map(norm_color, colors))
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color,str) else color) for color in colors]
# For each color entry to go into the color dictionary...
for primary_color_ind, primary_color in enumerate(primary_colors):
cdict_entry = [None]*len(th)
# For each threshold (as well as 0.0 and 1.0), specify the values for this primary color.
for row_ind, th_ind in enumerate(range(len(th))):
# Get the two colors that this threshold corresponds to.
th_color_inds = [0,0] if th_ind==0 else \
[len(colors)-1, len(colors)-1] if th_ind==len(th)-1 else \
[th_ind-1, th_ind]
primary_color_vals = [color_rgbs[th_color_ind][primary_color_ind] for th_color_ind in th_color_inds]
cdict_entry[row_ind] = (th[th_ind],) + tuple(primary_color_vals)
cdict[primary_color] = cdict_entry
cmap = LinearSegmentedColormap(cmap_name, cdict)
return cmap | [
"def",
"create_discrete_color_map",
"(",
"data_range",
"=",
"None",
",",
"colors",
"=",
"None",
",",
"cmap",
"=",
"None",
",",
"th",
"=",
"None",
",",
"pts",
"=",
"None",
",",
"cmap_name",
"=",
"'my_cmap'",
",",
"data_range_fmt",
"=",
"None",
",",
"pts_fmt",
"=",
"None",
")",
":",
"assert",
"(",
"colors",
"is",
"None",
")",
"^",
"(",
"cmap",
"is",
"None",
")",
",",
"\"Exclusively either `colors` or `cmap` must be specified.\"",
"assert",
"th",
"is",
"None",
"or",
"pts",
"is",
"None",
",",
"\"The parameters `th` or `pts` may be specified, but not both.\"",
"cmap",
"=",
"plt",
".",
"get_cmap",
"(",
"cmap",
")",
"if",
"isinstance",
"(",
"cmap",
",",
"str",
")",
"else",
"cmap",
"if",
"th",
"is",
"None",
":",
"# If `th` is not supplied, construct it based on other arguments.",
"if",
"pts",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"pts",
",",
"int",
")",
":",
"# Use `pts` as the number of evenly-spaced points.",
"assert",
"pts",
">",
"0",
",",
"\"The number of points specified by `pts` must be positive.\"",
"th_spacing",
"=",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"pts",
"th",
"=",
"np",
".",
"linspace",
"(",
"data_range",
"[",
"0",
"]",
"+",
"th_spacing",
",",
"data_range",
"[",
"1",
"]",
"-",
"th_spacing",
",",
"pts",
"-",
"1",
")",
"else",
":",
"# Use `pts` as a list-like of points to put thresholds between.",
"assert",
"data_range",
"[",
"0",
"]",
"<=",
"min",
"(",
"pts",
")",
"and",
"max",
"(",
"pts",
")",
"<=",
"data_range",
"[",
"1",
"]",
",",
"\"The values in `pts` must be within `data_range`, inclusive.\"",
"# Choose imaginary lower and upper bounds of the data to scale `pts` with",
"# so that the first and last color regions are sized appropriately.",
"data_range_fmt",
"=",
"[",
"None",
"]",
"*",
"2",
"if",
"data_range_fmt",
"is",
"None",
"else",
"data_range_fmt",
"data_range_fmt",
"[",
"0",
"]",
"=",
"pts",
"[",
"0",
"]",
"-",
"(",
"pts",
"[",
"1",
"]",
"-",
"pts",
"[",
"0",
"]",
")",
"/",
"2",
"data_range_fmt",
"[",
"1",
"]",
"=",
"pts",
"[",
"-",
"1",
"]",
"+",
"(",
"pts",
"[",
"-",
"1",
"]",
"-",
"pts",
"[",
"-",
"2",
"]",
")",
"/",
"2",
"pts",
"=",
"np",
".",
"interp",
"(",
"pts",
",",
"data_range_fmt",
",",
"data_range",
")",
"#(0,1))",
"# pts = list(map(lambda pt: norm_range(pt, data_range_fmt), pts))",
"th",
"=",
"[",
"pts",
"[",
"ind",
"-",
"1",
"]",
"+",
"(",
"pts",
"[",
"ind",
"]",
"-",
"pts",
"[",
"ind",
"-",
"1",
"]",
")",
"/",
"2",
"for",
"ind",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"pts",
")",
")",
"]",
"else",
":",
"assert",
"colors",
"is",
"not",
"None",
",",
"\"If neither `th` nor `pts` are specified, `colors` must be specified.\"",
"th_spacing",
"=",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"len",
"(",
"colors",
")",
"th",
"=",
"np",
".",
"linspace",
"(",
"data_range",
"[",
"0",
"]",
"+",
"th_spacing",
",",
"data_range",
"[",
"1",
"]",
"-",
"th_spacing",
",",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"else",
":",
"assert",
"len",
"(",
"th",
")",
"==",
"0",
"or",
"(",
"data_range",
"[",
"0",
"]",
"<",
"min",
"(",
"th",
")",
"and",
"max",
"(",
"th",
")",
"<",
"data_range",
"[",
"1",
"]",
")",
",",
"\"The values in `th` must be within `data_range`, exclusive.\"",
"# Normalize threshold values based on the data range.",
"th",
"=",
"[",
"(",
"val",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
"for",
"val",
"in",
"th",
"]",
"th",
"=",
"np",
".",
"interp",
"(",
"th",
",",
"data_range",
",",
"(",
"0",
",",
"1",
")",
")",
"th",
"=",
"[",
"0.0",
"]",
"+",
"list",
"(",
"th",
")",
"+",
"[",
"1.0",
"]",
"if",
"pts_fmt",
"is",
"not",
"None",
":",
"for",
"ind",
"in",
"range",
"(",
"len",
"(",
"th",
")",
"-",
"1",
")",
":",
"pts_fmt",
"[",
"ind",
"]",
"=",
"th",
"[",
"ind",
"]",
"+",
"(",
"th",
"[",
"ind",
"+",
"1",
"]",
"-",
"th",
"[",
"ind",
"]",
")",
"/",
"2",
"if",
"colors",
"is",
"None",
":",
"# If `colors` is not supplied, construct it based on other arguments.",
"assert",
"cmap",
"is",
"not",
"None",
",",
"\"If `colors` is not specified, `cmap` must be specified.\"",
"colors",
"=",
"[",
"cmap",
"(",
"th",
"[",
"ind",
"-",
"1",
"]",
"+",
"(",
"th",
"[",
"ind",
"]",
"-",
"th",
"[",
"ind",
"-",
"1",
"]",
")",
"/",
"2",
")",
"for",
"ind",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"th",
")",
")",
"]",
"else",
":",
"colors",
"=",
"list",
"(",
"map",
"(",
"norm_color",
",",
"colors",
")",
")",
"cdict",
"=",
"{",
"}",
"# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.",
"primary_colors",
"=",
"[",
"'red'",
",",
"'green'",
",",
"'blue'",
"]",
"# Get the 3-tuples of rgb values for the colors.",
"color_rgbs",
"=",
"[",
"(",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color",
")",
"for",
"color",
"in",
"colors",
"]",
"# For each color entry to go into the color dictionary...",
"for",
"primary_color_ind",
",",
"primary_color",
"in",
"enumerate",
"(",
"primary_colors",
")",
":",
"cdict_entry",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"th",
")",
"# For each threshold (as well as 0.0 and 1.0), specify the values for this primary color.",
"for",
"row_ind",
",",
"th_ind",
"in",
"enumerate",
"(",
"range",
"(",
"len",
"(",
"th",
")",
")",
")",
":",
"# Get the two colors that this threshold corresponds to.",
"th_color_inds",
"=",
"[",
"0",
",",
"0",
"]",
"if",
"th_ind",
"==",
"0",
"else",
"[",
"len",
"(",
"colors",
")",
"-",
"1",
",",
"len",
"(",
"colors",
")",
"-",
"1",
"]",
"if",
"th_ind",
"==",
"len",
"(",
"th",
")",
"-",
"1",
"else",
"[",
"th_ind",
"-",
"1",
",",
"th_ind",
"]",
"primary_color_vals",
"=",
"[",
"color_rgbs",
"[",
"th_color_ind",
"]",
"[",
"primary_color_ind",
"]",
"for",
"th_color_ind",
"in",
"th_color_inds",
"]",
"cdict_entry",
"[",
"row_ind",
"]",
"=",
"(",
"th",
"[",
"th_ind",
"]",
",",
")",
"+",
"tuple",
"(",
"primary_color_vals",
")",
"cdict",
"[",
"primary_color",
"]",
"=",
"cdict_entry",
"cmap",
"=",
"LinearSegmentedColormap",
"(",
"cmap_name",
",",
"cdict",
")",
"return",
"cmap"
] | [
779,
0
] | [
885,
15
] | python | en | ['en', 'error', 'th'] | False |
create_gradient_color_map | (data_range, colors, positions=None, cmap_name='my_cmap') |
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
|
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
| def create_gradient_color_map(data_range, colors, positions=None, cmap_name='my_cmap'):
"""
Creates a gradient colormap with a LinearSegmentedColormap. Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
"""
# Normalize position values based on the data range.
if positions is None:
range_size = data_range[1] - data_range[0]
spacing = range_size / (len(colors) - 1)
positions = [spacing*i for i in range(1, len(colors)-1)]
else:
positions = list(map(lambda val: (val - data_range[0])/(data_range[1] - data_range[0]), positions))
colors = list(map(norm_color, colors)) # Normalize color values for colormap creation.
positions = [0.0] + positions + [1.0]
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color,str) else color) for color in colors]
cdict = {'red':[], 'green':[], 'blue':[]}
for pos, color in zip(positions, color_rgbs):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
return LinearSegmentedColormap(cmap_name, cdict) | [
"def",
"create_gradient_color_map",
"(",
"data_range",
",",
"colors",
",",
"positions",
"=",
"None",
",",
"cmap_name",
"=",
"'my_cmap'",
")",
":",
"# Normalize position values based on the data range.",
"if",
"positions",
"is",
"None",
":",
"range_size",
"=",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
"spacing",
"=",
"range_size",
"/",
"(",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"positions",
"=",
"[",
"spacing",
"*",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"colors",
")",
"-",
"1",
")",
"]",
"else",
":",
"positions",
"=",
"list",
"(",
"map",
"(",
"lambda",
"val",
":",
"(",
"val",
"-",
"data_range",
"[",
"0",
"]",
")",
"/",
"(",
"data_range",
"[",
"1",
"]",
"-",
"data_range",
"[",
"0",
"]",
")",
",",
"positions",
")",
")",
"colors",
"=",
"list",
"(",
"map",
"(",
"norm_color",
",",
"colors",
")",
")",
"# Normalize color values for colormap creation.",
"positions",
"=",
"[",
"0.0",
"]",
"+",
"positions",
"+",
"[",
"1.0",
"]",
"cdict",
"=",
"{",
"}",
"# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.",
"primary_colors",
"=",
"[",
"'red'",
",",
"'green'",
",",
"'blue'",
"]",
"# Get the 3-tuples of rgb values for the colors.",
"color_rgbs",
"=",
"[",
"(",
"mpl",
".",
"colors",
".",
"to_rgb",
"(",
"color",
")",
"if",
"isinstance",
"(",
"color",
",",
"str",
")",
"else",
"color",
")",
"for",
"color",
"in",
"colors",
"]",
"cdict",
"=",
"{",
"'red'",
":",
"[",
"]",
",",
"'green'",
":",
"[",
"]",
",",
"'blue'",
":",
"[",
"]",
"}",
"for",
"pos",
",",
"color",
"in",
"zip",
"(",
"positions",
",",
"color_rgbs",
")",
":",
"cdict",
"[",
"'red'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"0",
"]",
",",
"color",
"[",
"0",
"]",
")",
")",
"cdict",
"[",
"'green'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"1",
"]",
",",
"color",
"[",
"1",
"]",
")",
")",
"cdict",
"[",
"'blue'",
"]",
".",
"append",
"(",
"(",
"pos",
",",
"color",
"[",
"2",
"]",
",",
"color",
"[",
"2",
"]",
")",
")",
"return",
"LinearSegmentedColormap",
"(",
"cmap_name",
",",
"cdict",
")"
] | [
887,
0
] | [
934,
52
] | python | en | ['en', 'error', 'th'] | False |
binary_class_change_plot | (dataarrays, mask=None, x_coord='longitude', y_coord='latitude',
colors=None, class_legend_label=None, width=10, fig=None, ax=None,
title=None, fig_kwargs={}, title_kwargs={}, imshow_kwargs={},
x_label_kwargs={}, y_label_kwargs={}, legend_kwargs={}) |
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `color_mask`.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like:
A list-like of list-likes of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color.
If `dataarrays` contains one DataArray, these are the colors for pixels.
Provide 3 color entries - for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, these are the colors for pixels that have zero
or more than zero times in which they are members of the class between the time periods.
Provide 4 color entires - (never,never),(never,some),(some,never),(some,some) class membership.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield legend labels
like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
title: str
The title of the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
Returns
-------
(fig,ax), pcts:
A 2-tuple of the figure and axes followed by a list of either 3 or 4 percents of
pixel membership, depending on whether `dataarray` contains one or two DataArrays.
If `dataarrays` contains one DataArray, there are 3 percents for never, sometimes,
and always class membership.
If `dataarrays` contains two DataArrays, there are 4 percents for
(never,never),(never,some),(some,never),(some,some) class membership.
:Authors:
John Rattz ([email protected])
|
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `color_mask`.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like:
A list-like of list-likes of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color.
If `dataarrays` contains one DataArray, these are the colors for pixels.
Provide 3 color entries - for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, these are the colors for pixels that have zero
or more than zero times in which they are members of the class between the time periods.
Provide 4 color entires - (never,never),(never,some),(some,never),(some,some) class membership.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield legend labels
like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
title: str
The title of the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
Returns
-------
(fig,ax), pcts:
A 2-tuple of the figure and axes followed by a list of either 3 or 4 percents of
pixel membership, depending on whether `dataarray` contains one or two DataArrays. | def binary_class_change_plot(dataarrays, mask=None, x_coord='longitude', y_coord='latitude',
colors=None, class_legend_label=None, width=10, fig=None, ax=None,
title=None, fig_kwargs={}, title_kwargs={}, imshow_kwargs={},
x_label_kwargs={}, y_label_kwargs={}, legend_kwargs={}):
"""
Creates a figure showing one of the following, depending on the format of arguments:
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `color_mask`.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like:
A list-like of list-likes of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color.
If `dataarrays` contains one DataArray, these are the colors for pixels.
Provide 3 color entries - for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, these are the colors for pixels that have zero
or more than zero times in which they are members of the class between the time periods.
Provide 4 color entires - (never,never),(never,some),(some,never),(some,some) class membership.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield legend labels
like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
title: str
The title of the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
Returns
-------
(fig,ax), pcts:
A 2-tuple of the figure and axes followed by a list of either 3 or 4 percents of
pixel membership, depending on whether `dataarray` contains one or two DataArrays.
If `dataarrays` contains one DataArray, there are 3 percents for never, sometimes,
and always class membership.
If `dataarrays` contains two DataArrays, there are 4 percents for
(never,never),(never,some),(some,never),(some,some) class membership.
:Authors:
John Rattz ([email protected])
"""
# Avoid modifying the original arguments.
fig_kwargs, title_kwargs, legend_kwargs = \
fig_kwargs.copy(), title_kwargs.copy(), legend_kwargs.copy()
# Handle conversion of matplotlib color names to lists of rgb values (range [0,255] for plt.imshow()).
colors = list(map(convert_name_rgb_255, colors))
def get_none_chng_perm_masks(dataarray, time_dim='time'):
"""
For a DataArray of binary classifications (0 or 1) with a 'time' dimension,
get a list of masks indicating where the points are, in order, never, sometimes,
or always a member of the class (1 indicates membership), considering only
non-NaN values for those points.
"""
# Get the sum of classifications across time.
sum_cls = dataarray.sum(dim=time_dim)
# The number of acquistions that were not nan for each point.
num_times_not_nan = dataarray.count(dim=time_dim)
# Find where pixels are permanent, changing, or never a member of the class.
none_mask = sum_cls == 0
chng_mask = xr_and(0 < sum_cls, sum_cls < num_times_not_nan)
perm_mask = sum_cls == num_times_not_nan
return [none_mask, chng_mask, perm_mask]
# Assemble the color masks.
masks = []
if len(dataarrays) == 1: # Determine extent change in one time period.
dataarray = dataarrays[0]
masks += get_none_chng_perm_masks(dataarray)
else: # Determine change between two time periods.
baseline_da, analysis_da = dataarrays
baseline_none_mask, baseline_chng_mask, baseline_perm_mask = get_none_chng_perm_masks(baseline_da)
analysis_none_mask, analysis_chng_mask, analysis_perm_mask = get_none_chng_perm_masks(analysis_da)
# Find where points are never a member of the class or are a member at one or more times.
baseline_cls_ever = xr_or(baseline_chng_mask, baseline_perm_mask)
analysis_cls_ever = xr_or(analysis_chng_mask, analysis_perm_mask)
# Find where points change between never being a member of the class
# and being a member at one or more times between the two periods.
no_cls_no_cls_mask = xr_and(baseline_none_mask, analysis_none_mask)
no_cls_cls_mask = xr_and(baseline_none_mask, analysis_cls_ever)
cls_no_cls_mask = xr_and(baseline_cls_ever, analysis_none_mask)
cls_cls_mask = xr_and(baseline_cls_ever, analysis_cls_ever)
masks += [no_cls_no_cls_mask, no_cls_cls_mask, cls_no_cls_mask, cls_cls_mask]
# Determine the overriding mask.
y_x_shape = len(dataarrays[0][y_coord]), len(dataarrays[0][x_coord])
mask = np.zeros(y_x_shape, dtype=np.bool) if mask is None else mask
# Color the image with the masks.
color_array = np.zeros((*y_x_shape, 3)).astype(np.int16)
for i, mask in enumerate(masks):
color_array[mask.values] = colors[i]
fig_kwargs['figsize'] = fig_kwargs.get('figsize', figure_ratio(dataarrays[0], x_coord, y_coord,
fixed_width = width))
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
# Set the tick and axes labels.
xarray_set_axes_labels(dataarrays[0], ax, x_coord, y_coord, x_label_kwargs, y_label_kwargs)
# Title the plot.
if title is None:
title = "Class Extents Change" if len(dataarrays)==1 else \
"Class Extents Change (Baseline/Analysis)"
ax.set_title(title, **title_kwargs)
# Create the legend.
colors = [np.array(color)/255 for color in colors] # Colors must be in range [0,1] for color patches.
if len(dataarrays)==1:
class_legend_label = "a Member of the Class" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label),
['Never {}', 'Sometimes {}', 'Always {}']))
else:
class_legend_label = "Class Membership" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label, class_legend_label),
['No {} to No {}', 'No {} to {}', '{} to No {}', '{} to {}']))
color_patches = list(map(lambda color, label: mpatches.Patch(color=color, label=label), colors, labels))
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = color_patches
ax.legend(**legend_kwargs)
ax.imshow(color_array, **imshow_kwargs)
# Calculate the percentage of pixels that are permanent, changing, or never members.
pcts = [float((mask.sum() / (y_x_shape[0]*y_x_shape[1])).values) for mask in masks]
return [fig,ax], pcts | [
"def",
"binary_class_change_plot",
"(",
"dataarrays",
",",
"mask",
"=",
"None",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"colors",
"=",
"None",
",",
"class_legend_label",
"=",
"None",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"title",
"=",
"None",
",",
"fig_kwargs",
"=",
"{",
"}",
",",
"title_kwargs",
"=",
"{",
"}",
",",
"imshow_kwargs",
"=",
"{",
"}",
",",
"x_label_kwargs",
"=",
"{",
"}",
",",
"y_label_kwargs",
"=",
"{",
"}",
",",
"legend_kwargs",
"=",
"{",
"}",
")",
":",
"# Avoid modifying the original arguments.",
"fig_kwargs",
",",
"title_kwargs",
",",
"legend_kwargs",
"=",
"fig_kwargs",
".",
"copy",
"(",
")",
",",
"title_kwargs",
".",
"copy",
"(",
")",
",",
"legend_kwargs",
".",
"copy",
"(",
")",
"# Handle conversion of matplotlib color names to lists of rgb values (range [0,255] for plt.imshow()).",
"colors",
"=",
"list",
"(",
"map",
"(",
"convert_name_rgb_255",
",",
"colors",
")",
")",
"def",
"get_none_chng_perm_masks",
"(",
"dataarray",
",",
"time_dim",
"=",
"'time'",
")",
":",
"\"\"\"\n For a DataArray of binary classifications (0 or 1) with a 'time' dimension, \n get a list of masks indicating where the points are, in order, never, sometimes, \n or always a member of the class (1 indicates membership), considering only \n non-NaN values for those points.\n \"\"\"",
"# Get the sum of classifications across time.",
"sum_cls",
"=",
"dataarray",
".",
"sum",
"(",
"dim",
"=",
"time_dim",
")",
"# The number of acquistions that were not nan for each point.",
"num_times_not_nan",
"=",
"dataarray",
".",
"count",
"(",
"dim",
"=",
"time_dim",
")",
"# Find where pixels are permanent, changing, or never a member of the class.",
"none_mask",
"=",
"sum_cls",
"==",
"0",
"chng_mask",
"=",
"xr_and",
"(",
"0",
"<",
"sum_cls",
",",
"sum_cls",
"<",
"num_times_not_nan",
")",
"perm_mask",
"=",
"sum_cls",
"==",
"num_times_not_nan",
"return",
"[",
"none_mask",
",",
"chng_mask",
",",
"perm_mask",
"]",
"# Assemble the color masks.",
"masks",
"=",
"[",
"]",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
":",
"# Determine extent change in one time period.",
"dataarray",
"=",
"dataarrays",
"[",
"0",
"]",
"masks",
"+=",
"get_none_chng_perm_masks",
"(",
"dataarray",
")",
"else",
":",
"# Determine change between two time periods.",
"baseline_da",
",",
"analysis_da",
"=",
"dataarrays",
"baseline_none_mask",
",",
"baseline_chng_mask",
",",
"baseline_perm_mask",
"=",
"get_none_chng_perm_masks",
"(",
"baseline_da",
")",
"analysis_none_mask",
",",
"analysis_chng_mask",
",",
"analysis_perm_mask",
"=",
"get_none_chng_perm_masks",
"(",
"analysis_da",
")",
"# Find where points are never a member of the class or are a member at one or more times.",
"baseline_cls_ever",
"=",
"xr_or",
"(",
"baseline_chng_mask",
",",
"baseline_perm_mask",
")",
"analysis_cls_ever",
"=",
"xr_or",
"(",
"analysis_chng_mask",
",",
"analysis_perm_mask",
")",
"# Find where points change between never being a member of the class ",
"# and being a member at one or more times between the two periods.",
"no_cls_no_cls_mask",
"=",
"xr_and",
"(",
"baseline_none_mask",
",",
"analysis_none_mask",
")",
"no_cls_cls_mask",
"=",
"xr_and",
"(",
"baseline_none_mask",
",",
"analysis_cls_ever",
")",
"cls_no_cls_mask",
"=",
"xr_and",
"(",
"baseline_cls_ever",
",",
"analysis_none_mask",
")",
"cls_cls_mask",
"=",
"xr_and",
"(",
"baseline_cls_ever",
",",
"analysis_cls_ever",
")",
"masks",
"+=",
"[",
"no_cls_no_cls_mask",
",",
"no_cls_cls_mask",
",",
"cls_no_cls_mask",
",",
"cls_cls_mask",
"]",
"# Determine the overriding mask.",
"y_x_shape",
"=",
"len",
"(",
"dataarrays",
"[",
"0",
"]",
"[",
"y_coord",
"]",
")",
",",
"len",
"(",
"dataarrays",
"[",
"0",
"]",
"[",
"x_coord",
"]",
")",
"mask",
"=",
"np",
".",
"zeros",
"(",
"y_x_shape",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"if",
"mask",
"is",
"None",
"else",
"mask",
"# Color the image with the masks.",
"color_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"*",
"y_x_shape",
",",
"3",
")",
")",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"for",
"i",
",",
"mask",
"in",
"enumerate",
"(",
"masks",
")",
":",
"color_array",
"[",
"mask",
".",
"values",
"]",
"=",
"colors",
"[",
"i",
"]",
"fig_kwargs",
"[",
"'figsize'",
"]",
"=",
"fig_kwargs",
".",
"get",
"(",
"'figsize'",
",",
"figure_ratio",
"(",
"dataarrays",
"[",
"0",
"]",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"# Set the tick and axes labels.",
"xarray_set_axes_labels",
"(",
"dataarrays",
"[",
"0",
"]",
",",
"ax",
",",
"x_coord",
",",
"y_coord",
",",
"x_label_kwargs",
",",
"y_label_kwargs",
")",
"# Title the plot.",
"if",
"title",
"is",
"None",
":",
"title",
"=",
"\"Class Extents Change\"",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
"else",
"\"Class Extents Change (Baseline/Analysis)\"",
"ax",
".",
"set_title",
"(",
"title",
",",
"*",
"*",
"title_kwargs",
")",
"# Create the legend.",
"colors",
"=",
"[",
"np",
".",
"array",
"(",
"color",
")",
"/",
"255",
"for",
"color",
"in",
"colors",
"]",
"# Colors must be in range [0,1] for color patches.",
"if",
"len",
"(",
"dataarrays",
")",
"==",
"1",
":",
"class_legend_label",
"=",
"\"a Member of the Class\"",
"if",
"class_legend_label",
"is",
"None",
"else",
"class_legend_label",
"labels",
"=",
"list",
"(",
"map",
"(",
"lambda",
"str",
":",
"str",
".",
"format",
"(",
"class_legend_label",
")",
",",
"[",
"'Never {}'",
",",
"'Sometimes {}'",
",",
"'Always {}'",
"]",
")",
")",
"else",
":",
"class_legend_label",
"=",
"\"Class Membership\"",
"if",
"class_legend_label",
"is",
"None",
"else",
"class_legend_label",
"labels",
"=",
"list",
"(",
"map",
"(",
"lambda",
"str",
":",
"str",
".",
"format",
"(",
"class_legend_label",
",",
"class_legend_label",
")",
",",
"[",
"'No {} to No {}'",
",",
"'No {} to {}'",
",",
"'{} to No {}'",
",",
"'{} to {}'",
"]",
")",
")",
"color_patches",
"=",
"list",
"(",
"map",
"(",
"lambda",
"color",
",",
"label",
":",
"mpatches",
".",
"Patch",
"(",
"color",
"=",
"color",
",",
"label",
"=",
"label",
")",
",",
"colors",
",",
"labels",
")",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"'loc'",
",",
"'best'",
")",
"legend_kwargs",
"[",
"'handles'",
"]",
"=",
"color_patches",
"ax",
".",
"legend",
"(",
"*",
"*",
"legend_kwargs",
")",
"ax",
".",
"imshow",
"(",
"color_array",
",",
"*",
"*",
"imshow_kwargs",
")",
"# Calculate the percentage of pixels that are permanent, changing, or never members.",
"pcts",
"=",
"[",
"float",
"(",
"(",
"mask",
".",
"sum",
"(",
")",
"/",
"(",
"y_x_shape",
"[",
"0",
"]",
"*",
"y_x_shape",
"[",
"1",
"]",
")",
")",
".",
"values",
")",
"for",
"mask",
"in",
"masks",
"]",
"return",
"[",
"fig",
",",
"ax",
"]",
",",
"pcts"
] | [
940,
0
] | [
1105,
25
] | python | en | ['en', 'error', 'th'] | False |
intersection_threshold_plot | (first, second, th, mask = None, color_none='black',
color_first='green', color_second='red',
color_both='white', color_mask='gray',
width = 10, fig=None, ax=None, *args, **kwargs) |
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
|
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
| def intersection_threshold_plot(first, second, th, mask = None, color_none='black',
color_first='green', color_second='red',
color_both='white', color_mask='gray',
width = 10, fig=None, ax=None, *args, **kwargs):
"""
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
"""
# Handle conversion of matplotlib color names to lists of rgb values.
color_none, color_first, color_second, color_both, color_mask = \
list(map(convert_name_rgb_255, [color_none, color_first, color_second, color_both, color_mask]))
# Determine the regions.
first_in = np.logical_and(th[0] <= first, first < th[1])
second_in = np.logical_and(th[0] <= second, second < th[1])
both_in = np.logical_and(first_in, second_in)
none_in = np.invert(both_in)
# Determine the overriding mask.
mask = np.zeros(first.shape).astype(bool) if mask is None else mask
# The colors for each pixel.
color_array = np.zeros((*first.shape, 3)).astype(np.int16)
color_array[none_in] = color_none
color_array[first_in] = color_first
color_array[second_in] = color_second
color_array[both_in] = color_both
color_array[mask] = color_mask
fig, ax = retrieve_or_create_fig_ax(fig, ax, figsize=figure_ratio(first, x_coord, y_coord, fixed_width = width))
plt.title("Threshold: {} < x < {}".format(th[0], th[1]))
max_num_ticks = 10 # Max ticks per axis.
lon = first.longitude.values
label_every = int(round(len(lon)/max_num_ticks))
lon_labels = ["{0:.4f}".format(lon_val) for lon_val in lon[::label_every]]
plt.xlabel('Longitude')
plt.xticks(range(len(lon))[::label_every], lon_labels, rotation='vertical')
lat = first.latitude.values
label_every = int(round(len(lat)/max_num_ticks))
lat_labels = ["{0:.4f}".format(lat_val) for lat_val in lat[::label_every]]
plt.ylabel('Latitude')
plt.yticks(range(len(lat))[::label_every], lat_labels)
plt.imshow(color_array, *args, **kwargs)
plt.show() | [
"def",
"intersection_threshold_plot",
"(",
"first",
",",
"second",
",",
"th",
",",
"mask",
"=",
"None",
",",
"color_none",
"=",
"'black'",
",",
"color_first",
"=",
"'green'",
",",
"color_second",
"=",
"'red'",
",",
"color_both",
"=",
"'white'",
",",
"color_mask",
"=",
"'gray'",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Handle conversion of matplotlib color names to lists of rgb values.",
"color_none",
",",
"color_first",
",",
"color_second",
",",
"color_both",
",",
"color_mask",
"=",
"list",
"(",
"map",
"(",
"convert_name_rgb_255",
",",
"[",
"color_none",
",",
"color_first",
",",
"color_second",
",",
"color_both",
",",
"color_mask",
"]",
")",
")",
"# Determine the regions.",
"first_in",
"=",
"np",
".",
"logical_and",
"(",
"th",
"[",
"0",
"]",
"<=",
"first",
",",
"first",
"<",
"th",
"[",
"1",
"]",
")",
"second_in",
"=",
"np",
".",
"logical_and",
"(",
"th",
"[",
"0",
"]",
"<=",
"second",
",",
"second",
"<",
"th",
"[",
"1",
"]",
")",
"both_in",
"=",
"np",
".",
"logical_and",
"(",
"first_in",
",",
"second_in",
")",
"none_in",
"=",
"np",
".",
"invert",
"(",
"both_in",
")",
"# Determine the overriding mask.",
"mask",
"=",
"np",
".",
"zeros",
"(",
"first",
".",
"shape",
")",
".",
"astype",
"(",
"bool",
")",
"if",
"mask",
"is",
"None",
"else",
"mask",
"# The colors for each pixel.",
"color_array",
"=",
"np",
".",
"zeros",
"(",
"(",
"*",
"first",
".",
"shape",
",",
"3",
")",
")",
".",
"astype",
"(",
"np",
".",
"int16",
")",
"color_array",
"[",
"none_in",
"]",
"=",
"color_none",
"color_array",
"[",
"first_in",
"]",
"=",
"color_first",
"color_array",
"[",
"second_in",
"]",
"=",
"color_second",
"color_array",
"[",
"both_in",
"]",
"=",
"color_both",
"color_array",
"[",
"mask",
"]",
"=",
"color_mask",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"figsize",
"=",
"figure_ratio",
"(",
"first",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"plt",
".",
"title",
"(",
"\"Threshold: {} < x < {}\"",
".",
"format",
"(",
"th",
"[",
"0",
"]",
",",
"th",
"[",
"1",
"]",
")",
")",
"max_num_ticks",
"=",
"10",
"# Max ticks per axis.",
"lon",
"=",
"first",
".",
"longitude",
".",
"values",
"label_every",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"lon",
")",
"/",
"max_num_ticks",
")",
")",
"lon_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"lon_val",
")",
"for",
"lon_val",
"in",
"lon",
"[",
":",
":",
"label_every",
"]",
"]",
"plt",
".",
"xlabel",
"(",
"'Longitude'",
")",
"plt",
".",
"xticks",
"(",
"range",
"(",
"len",
"(",
"lon",
")",
")",
"[",
":",
":",
"label_every",
"]",
",",
"lon_labels",
",",
"rotation",
"=",
"'vertical'",
")",
"lat",
"=",
"first",
".",
"latitude",
".",
"values",
"label_every",
"=",
"int",
"(",
"round",
"(",
"len",
"(",
"lat",
")",
"/",
"max_num_ticks",
")",
")",
"lat_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"lat_val",
")",
"for",
"lat_val",
"in",
"lat",
"[",
":",
":",
"label_every",
"]",
"]",
"plt",
".",
"ylabel",
"(",
"'Latitude'",
")",
"plt",
".",
"yticks",
"(",
"range",
"(",
"len",
"(",
"lat",
")",
")",
"[",
":",
":",
"label_every",
"]",
",",
"lat_labels",
")",
"plt",
".",
"imshow",
"(",
"color_array",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"plt",
".",
"show",
"(",
")"
] | [
1109,
0
] | [
1203,
14
] | python | en | ['en', 'error', 'th'] | False |
print_matrix | (cell_value_mtx, cell_label_mtx=None, row_labels=None, col_labels=None,
show_row_labels=True, show_col_labels=True, show_cell_labels=True,
cmap=None, cell_val_fmt='2g', annot_kwargs={}, tick_fontsize=14,
x_axis_tick_kwargs=None, y_axis_tick_kwargs=None,
x_axis_ticks_position='default', y_axis_ticks_position='default',
fig=None, ax=None, heatmap_kwargs={}, fig_kwargs={}) |
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list
A list of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
|
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list
A list of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
| def print_matrix(cell_value_mtx, cell_label_mtx=None, row_labels=None, col_labels=None,
show_row_labels=True, show_col_labels=True, show_cell_labels=True,
cmap=None, cell_val_fmt='2g', annot_kwargs={}, tick_fontsize=14,
x_axis_tick_kwargs=None, y_axis_tick_kwargs=None,
x_axis_ticks_position='default', y_axis_ticks_position='default',
fig=None, ax=None, heatmap_kwargs={}, fig_kwargs={}):
"""
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list
A list of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
"""
cell_label_mtx = cell_value_mtx if cell_label_mtx is None else cell_label_mtx
row_labels = ['']*cell_value_mtx.shape[0] if not show_row_labels else row_labels
col_labels = ['']*cell_value_mtx.shape[1] if not show_col_labels else col_labels
heatmap_kwargs.setdefault('cbar', False)
df = pd.DataFrame(cell_value_mtx, index=row_labels, columns=col_labels)
cell_labels = cell_label_mtx if show_cell_labels else None
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
heatmap = sns.heatmap(df, cmap=cmap, annot=cell_labels, fmt=cell_val_fmt,
annot_kws=annot_kwargs, ax=ax, **heatmap_kwargs)
if not show_row_labels:
heatmap.set_yticks([]) # Ticks must be hidden explicitly.
else:
if y_axis_tick_kwargs is None:
y_axis_tick_kwargs = dict(rotation=0, ha='right')
y_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), **y_axis_tick_kwargs)
heatmap.yaxis.set_ticks_position(y_axis_ticks_position)
heatmap.yaxis.tick_left() # Ticks may also appear on the right side otherwise.
if not show_col_labels:
heatmap.set_xticks([])
else:
if x_axis_tick_kwargs is None:
x_axis_tick_kwargs = dict(rotation=45, ha='right')
x_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), **x_axis_tick_kwargs)
heatmap.xaxis.set_ticks_position(x_axis_ticks_position)
return fig, ax | [
"def",
"print_matrix",
"(",
"cell_value_mtx",
",",
"cell_label_mtx",
"=",
"None",
",",
"row_labels",
"=",
"None",
",",
"col_labels",
"=",
"None",
",",
"show_row_labels",
"=",
"True",
",",
"show_col_labels",
"=",
"True",
",",
"show_cell_labels",
"=",
"True",
",",
"cmap",
"=",
"None",
",",
"cell_val_fmt",
"=",
"'2g'",
",",
"annot_kwargs",
"=",
"{",
"}",
",",
"tick_fontsize",
"=",
"14",
",",
"x_axis_tick_kwargs",
"=",
"None",
",",
"y_axis_tick_kwargs",
"=",
"None",
",",
"x_axis_ticks_position",
"=",
"'default'",
",",
"y_axis_ticks_position",
"=",
"'default'",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"heatmap_kwargs",
"=",
"{",
"}",
",",
"fig_kwargs",
"=",
"{",
"}",
")",
":",
"cell_label_mtx",
"=",
"cell_value_mtx",
"if",
"cell_label_mtx",
"is",
"None",
"else",
"cell_label_mtx",
"row_labels",
"=",
"[",
"''",
"]",
"*",
"cell_value_mtx",
".",
"shape",
"[",
"0",
"]",
"if",
"not",
"show_row_labels",
"else",
"row_labels",
"col_labels",
"=",
"[",
"''",
"]",
"*",
"cell_value_mtx",
".",
"shape",
"[",
"1",
"]",
"if",
"not",
"show_col_labels",
"else",
"col_labels",
"heatmap_kwargs",
".",
"setdefault",
"(",
"'cbar'",
",",
"False",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"cell_value_mtx",
",",
"index",
"=",
"row_labels",
",",
"columns",
"=",
"col_labels",
")",
"cell_labels",
"=",
"cell_label_mtx",
"if",
"show_cell_labels",
"else",
"None",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"heatmap",
"=",
"sns",
".",
"heatmap",
"(",
"df",
",",
"cmap",
"=",
"cmap",
",",
"annot",
"=",
"cell_labels",
",",
"fmt",
"=",
"cell_val_fmt",
",",
"annot_kws",
"=",
"annot_kwargs",
",",
"ax",
"=",
"ax",
",",
"*",
"*",
"heatmap_kwargs",
")",
"if",
"not",
"show_row_labels",
":",
"heatmap",
".",
"set_yticks",
"(",
"[",
"]",
")",
"# Ticks must be hidden explicitly.",
"else",
":",
"if",
"y_axis_tick_kwargs",
"is",
"None",
":",
"y_axis_tick_kwargs",
"=",
"dict",
"(",
"rotation",
"=",
"0",
",",
"ha",
"=",
"'right'",
")",
"y_axis_tick_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"tick_fontsize",
")",
"heatmap",
".",
"yaxis",
".",
"set_ticklabels",
"(",
"heatmap",
".",
"yaxis",
".",
"get_ticklabels",
"(",
")",
",",
"*",
"*",
"y_axis_tick_kwargs",
")",
"heatmap",
".",
"yaxis",
".",
"set_ticks_position",
"(",
"y_axis_ticks_position",
")",
"heatmap",
".",
"yaxis",
".",
"tick_left",
"(",
")",
"# Ticks may also appear on the right side otherwise.",
"if",
"not",
"show_col_labels",
":",
"heatmap",
".",
"set_xticks",
"(",
"[",
"]",
")",
"else",
":",
"if",
"x_axis_tick_kwargs",
"is",
"None",
":",
"x_axis_tick_kwargs",
"=",
"dict",
"(",
"rotation",
"=",
"45",
",",
"ha",
"=",
"'right'",
")",
"x_axis_tick_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"tick_fontsize",
")",
"heatmap",
".",
"xaxis",
".",
"set_ticklabels",
"(",
"heatmap",
".",
"xaxis",
".",
"get_ticklabels",
"(",
")",
",",
"*",
"*",
"x_axis_tick_kwargs",
")",
"heatmap",
".",
"xaxis",
".",
"set_ticks_position",
"(",
"x_axis_ticks_position",
")",
"return",
"fig",
",",
"ax"
] | [
1211,
0
] | [
1294,
18
] | python | en | ['en', 'error', 'th'] | False |
get_ax_size | (fig, ax) |
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
|
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
| def get_ax_size(fig, ax):
"""
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
"""
# Credit goes to https://stackoverflow.com/a/19306776/5449970.
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
return [bbox.width, bbox.height] | [
"def",
"get_ax_size",
"(",
"fig",
",",
"ax",
")",
":",
"# Credit goes to https://stackoverflow.com/a/19306776/5449970.",
"bbox",
"=",
"ax",
".",
"get_window_extent",
"(",
")",
".",
"transformed",
"(",
"fig",
".",
"dpi_scale_trans",
".",
"inverted",
"(",
")",
")",
"return",
"[",
"bbox",
".",
"width",
",",
"bbox",
".",
"height",
"]"
] | [
1296,
0
] | [
1303,
36
] | python | en | ['en', 'error', 'th'] | False |
xarray_imshow | (data, x_coord='longitude', y_coord='latitude', width=10,
fig=None, ax=None, use_colorbar=True, cbar_labels=None,
use_legend=False, legend_labels=None, fig_kwargs=None,
imshow_kwargs=None, x_label_kwargs=None, y_label_kwargs=None,
cbar_kwargs=None, nan_color='white', legend_kwargs=None,
ax_tick_label_kwargs=None, x_tick_label_kwargs=None,
y_tick_label_kwargs=None, title=None, title_kwargs=None,
ax_lbl_font_scaling=(8, np.inf, 2),
ax_tick_lbl_font_scaling=(8, np.inf, 1.5),
title_font_scaling=(8, np.inf, 1.5),
legend_font_scaling=(8, np.inf, 1.5)) |
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike `data.plot.imshow()`, this sets axes ticks and labels - including
labeling "Latitude" and "Longitude". It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
ax_lbl_font_scaling,
ax_tick_lbl_font_scaling,
title_font_scaling,
legend_font_scaling: list-like of float
Some list-likes of the minimum font size, maximum font size, and the
rate at which they scale with the figure dimensions. So each contains
3 numeric values. These variables are for, respectively, axis label
font scaling, axis tick label font scaling, title font scaling, and
legend font scaling. The axis label, tick label, and title font sizes
scale on the average of the width and height of the axes. The legend
font size also scales on the width and height of the axes, but the
number of legend elements and the maximum legend element length are also factored.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
:Authors:
John Rattz ([email protected])
|
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike `data.plot.imshow()`, this sets axes ticks and labels - including
labeling "Latitude" and "Longitude". It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
ax_lbl_font_scaling,
ax_tick_lbl_font_scaling,
title_font_scaling,
legend_font_scaling: list-like of float
Some list-likes of the minimum font size, maximum font size, and the
rate at which they scale with the figure dimensions. So each contains
3 numeric values. These variables are for, respectively, axis label
font scaling, axis tick label font scaling, title font scaling, and
legend font scaling. The axis label, tick label, and title font sizes
scale on the average of the width and height of the axes. The legend
font size also scales on the width and height of the axes, but the
number of legend elements and the maximum legend element length are also factored.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
:Authors:
John Rattz (john.c.rattz | def xarray_imshow(data, x_coord='longitude', y_coord='latitude', width=10,
fig=None, ax=None, use_colorbar=True, cbar_labels=None,
use_legend=False, legend_labels=None, fig_kwargs=None,
imshow_kwargs=None, x_label_kwargs=None, y_label_kwargs=None,
cbar_kwargs=None, nan_color='white', legend_kwargs=None,
ax_tick_label_kwargs=None, x_tick_label_kwargs=None,
y_tick_label_kwargs=None, title=None, title_kwargs=None,
ax_lbl_font_scaling=(8, np.inf, 2),
ax_tick_lbl_font_scaling=(8, np.inf, 1.5),
title_font_scaling=(8, np.inf, 1.5),
legend_font_scaling=(8, np.inf, 1.5)):
"""
Shows a heatmap of an xarray DataArray with only latitude and longitude dimensions.
Unlike `data.plot.imshow()`, this sets axes ticks and labels - including
labeling "Latitude" and "Longitude". It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
ax_lbl_font_scaling,
ax_tick_lbl_font_scaling,
title_font_scaling,
legend_font_scaling: list-like of float
Some list-likes of the minimum font size, maximum font size, and the
rate at which they scale with the figure dimensions. So each contains
3 numeric values. These variables are for, respectively, axis label
font scaling, axis tick label font scaling, title font scaling, and
legend font scaling. The axis label, tick label, and title font sizes
scale on the average of the width and height of the axes. The legend
font size also scales on the width and height of the axes, but the
number of legend elements and the maximum legend element length are also factored.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
:Authors:
John Rattz ([email protected])
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Figure kwargs
# Use `copy()` to avoid modifying the original dictionaries.
fig_kwargs = {} if fig_kwargs is None else fig_kwargs.copy()
figsize = \
fig_kwargs.setdefault('figsize', figure_ratio(data, x_coord, y_coord,
fixed_width = width))
# Imshow kwargs
imshow_kwargs = {} if imshow_kwargs is None else imshow_kwargs.copy()
imshow_kwargs.setdefault('interpolation', 'nearest')
nan_color = norm_color(nan_color) # Normalize color value for matplotlib.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
axsize = get_ax_size(fig,ax) # Scale fonts on axis size, not figure size.
# Axis label kwargs
ax_lbl_fnt_sz = max(ax_lbl_font_scaling[0],
min(ax_lbl_font_scaling[2]*(axsize[0]+axsize[1])/2,#(x_lbl_fnt_sz+y_lbl_fnt_sz)/2,
ax_lbl_font_scaling[1]))
x_label_kwargs = {} if x_label_kwargs is None else x_label_kwargs.copy()
x_label_kwargs.setdefault("fontsize", ax_lbl_fnt_sz)
y_label_kwargs = {} if y_label_kwargs is None else y_label_kwargs.copy()
y_label_kwargs.setdefault("fontsize", ax_lbl_fnt_sz)
# Axis tick label kwargs
ax_tick_label_kwargs = {} if ax_tick_label_kwargs is None else \
ax_tick_label_kwargs.copy()
ax_tick_lbl_fnt_sz = max(ax_tick_lbl_font_scaling[0],
min(ax_tick_lbl_font_scaling[2]*(axsize[0]+axsize[1])/2,
ax_tick_lbl_font_scaling[1]))
x_tick_label_kwargs = {} if x_tick_label_kwargs is None else \
x_tick_label_kwargs
x_tick_label_kwargs.setdefault('fontsize', ax_tick_lbl_fnt_sz)
y_tick_label_kwargs = {} if y_tick_label_kwargs is None else \
y_tick_label_kwargs
y_tick_label_kwargs.setdefault('fontsize', ax_tick_lbl_fnt_sz)
# Handle display of NaN values.
data_arr = data.values
masked_array = np.ma.array(data_arr, mask=np.isnan(data_arr))
cmap = imshow_kwargs.setdefault('cmap', plt.get_cmap('viridis'))
cmap.set_bad(nan_color)
im = ax.imshow(masked_array, **imshow_kwargs)
# Set axis labels and tick labels.
xarray_set_axes_labels(data, ax, x_coord, y_coord,
x_label_kwargs, y_label_kwargs,
ax_tick_label_kwargs,
x_tick_label_kwargs, y_tick_label_kwargs)
# Set the title.
if title is not None:
title_fnt_sz = max(title_font_scaling[0],
min(title_font_scaling[2]*((axsize[0]+axsize[1])/2+3),#-len(title)/12
title_font_scaling[1]))
title_kwargs = {} if title_kwargs is None else title_kwargs.copy()
title_kwargs.setdefault('fontdict', dict(fontsize=title_fnt_sz))
ax.set_title(title, **title_kwargs)
# Create a colorbar.
if use_colorbar:
cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs.copy()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7.5%", pad=0.05)
cbar = fig.colorbar(im, ax=ax, cax=cax, **cbar_kwargs)
if cbar_labels is not None:
cbar.ax.set_yticklabels(cbar_labels)
else:
cbar = None
# Create a legend.
if use_legend:
legend_kwargs = {} if legend_kwargs is None else legend_kwargs.copy()
legend_kwargs.setdefault("framealpha", 0.4)
# Determine the legend labels.
unique_values = np.unique(data.values)
unique_values = unique_values[~np.isnan(unique_values)]
if legend_labels is None:
legend_labels = ["{}".format(value) for value in unique_values]
else:
legend_labels = [legend_labels.get(value,"{}".format(value)) for value in unique_values]
colors = [im.cmap(im.norm(unique_values)) for unique_values in unique_values]
patches = [mpatches.Patch(color=colors[i], label=legend_labels[i])
for i in range(len(legend_labels))]
# Determine the font size of the legend.
legend_num_elems = len(legend_labels)
legend_max_len = len(max(legend_labels, key=len))
legend_hz_sz = legend_font_scaling[2] * (axsize[0] - legend_max_len/9 + 3)
legend_vt_sz = legend_font_scaling[2] * (axsize[1] - legend_num_elems/9 + 3)
legend_fnt_sz = \
max(legend_font_scaling[0],
min(min(legend_hz_sz, legend_vt_sz), legend_font_scaling[1]))
legend_kwargs.setdefault("fontsize", legend_fnt_sz)
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = patches
ax.legend(**legend_kwargs)
return fig, ax, im, cbar | [
"def",
"xarray_imshow",
"(",
"data",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"width",
"=",
"10",
",",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"use_colorbar",
"=",
"True",
",",
"cbar_labels",
"=",
"None",
",",
"use_legend",
"=",
"False",
",",
"legend_labels",
"=",
"None",
",",
"fig_kwargs",
"=",
"None",
",",
"imshow_kwargs",
"=",
"None",
",",
"x_label_kwargs",
"=",
"None",
",",
"y_label_kwargs",
"=",
"None",
",",
"cbar_kwargs",
"=",
"None",
",",
"nan_color",
"=",
"'white'",
",",
"legend_kwargs",
"=",
"None",
",",
"ax_tick_label_kwargs",
"=",
"None",
",",
"x_tick_label_kwargs",
"=",
"None",
",",
"y_tick_label_kwargs",
"=",
"None",
",",
"title",
"=",
"None",
",",
"title_kwargs",
"=",
"None",
",",
"ax_lbl_font_scaling",
"=",
"(",
"8",
",",
"np",
".",
"inf",
",",
"2",
")",
",",
"ax_tick_lbl_font_scaling",
"=",
"(",
"8",
",",
"np",
".",
"inf",
",",
"1.5",
")",
",",
"title_font_scaling",
"=",
"(",
"8",
",",
"np",
".",
"inf",
",",
"1.5",
")",
",",
"legend_font_scaling",
"=",
"(",
"8",
",",
"np",
".",
"inf",
",",
"1.5",
")",
")",
":",
"from",
"mpl_toolkits",
".",
"axes_grid1",
"import",
"make_axes_locatable",
"# Figure kwargs",
"# Use `copy()` to avoid modifying the original dictionaries.",
"fig_kwargs",
"=",
"{",
"}",
"if",
"fig_kwargs",
"is",
"None",
"else",
"fig_kwargs",
".",
"copy",
"(",
")",
"figsize",
"=",
"fig_kwargs",
".",
"setdefault",
"(",
"'figsize'",
",",
"figure_ratio",
"(",
"data",
",",
"x_coord",
",",
"y_coord",
",",
"fixed_width",
"=",
"width",
")",
")",
"# Imshow kwargs",
"imshow_kwargs",
"=",
"{",
"}",
"if",
"imshow_kwargs",
"is",
"None",
"else",
"imshow_kwargs",
".",
"copy",
"(",
")",
"imshow_kwargs",
".",
"setdefault",
"(",
"'interpolation'",
",",
"'nearest'",
")",
"nan_color",
"=",
"norm_color",
"(",
"nan_color",
")",
"# Normalize color value for matplotlib.",
"fig",
",",
"ax",
"=",
"retrieve_or_create_fig_ax",
"(",
"fig",
",",
"ax",
",",
"*",
"*",
"fig_kwargs",
")",
"axsize",
"=",
"get_ax_size",
"(",
"fig",
",",
"ax",
")",
"# Scale fonts on axis size, not figure size.",
"# Axis label kwargs",
"ax_lbl_fnt_sz",
"=",
"max",
"(",
"ax_lbl_font_scaling",
"[",
"0",
"]",
",",
"min",
"(",
"ax_lbl_font_scaling",
"[",
"2",
"]",
"*",
"(",
"axsize",
"[",
"0",
"]",
"+",
"axsize",
"[",
"1",
"]",
")",
"/",
"2",
",",
"#(x_lbl_fnt_sz+y_lbl_fnt_sz)/2, ",
"ax_lbl_font_scaling",
"[",
"1",
"]",
")",
")",
"x_label_kwargs",
"=",
"{",
"}",
"if",
"x_label_kwargs",
"is",
"None",
"else",
"x_label_kwargs",
".",
"copy",
"(",
")",
"x_label_kwargs",
".",
"setdefault",
"(",
"\"fontsize\"",
",",
"ax_lbl_fnt_sz",
")",
"y_label_kwargs",
"=",
"{",
"}",
"if",
"y_label_kwargs",
"is",
"None",
"else",
"y_label_kwargs",
".",
"copy",
"(",
")",
"y_label_kwargs",
".",
"setdefault",
"(",
"\"fontsize\"",
",",
"ax_lbl_fnt_sz",
")",
"# Axis tick label kwargs",
"ax_tick_label_kwargs",
"=",
"{",
"}",
"if",
"ax_tick_label_kwargs",
"is",
"None",
"else",
"ax_tick_label_kwargs",
".",
"copy",
"(",
")",
"ax_tick_lbl_fnt_sz",
"=",
"max",
"(",
"ax_tick_lbl_font_scaling",
"[",
"0",
"]",
",",
"min",
"(",
"ax_tick_lbl_font_scaling",
"[",
"2",
"]",
"*",
"(",
"axsize",
"[",
"0",
"]",
"+",
"axsize",
"[",
"1",
"]",
")",
"/",
"2",
",",
"ax_tick_lbl_font_scaling",
"[",
"1",
"]",
")",
")",
"x_tick_label_kwargs",
"=",
"{",
"}",
"if",
"x_tick_label_kwargs",
"is",
"None",
"else",
"x_tick_label_kwargs",
"x_tick_label_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"ax_tick_lbl_fnt_sz",
")",
"y_tick_label_kwargs",
"=",
"{",
"}",
"if",
"y_tick_label_kwargs",
"is",
"None",
"else",
"y_tick_label_kwargs",
"y_tick_label_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"ax_tick_lbl_fnt_sz",
")",
"# Handle display of NaN values.",
"data_arr",
"=",
"data",
".",
"values",
"masked_array",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"data_arr",
",",
"mask",
"=",
"np",
".",
"isnan",
"(",
"data_arr",
")",
")",
"cmap",
"=",
"imshow_kwargs",
".",
"setdefault",
"(",
"'cmap'",
",",
"plt",
".",
"get_cmap",
"(",
"'viridis'",
")",
")",
"cmap",
".",
"set_bad",
"(",
"nan_color",
")",
"im",
"=",
"ax",
".",
"imshow",
"(",
"masked_array",
",",
"*",
"*",
"imshow_kwargs",
")",
"# Set axis labels and tick labels.",
"xarray_set_axes_labels",
"(",
"data",
",",
"ax",
",",
"x_coord",
",",
"y_coord",
",",
"x_label_kwargs",
",",
"y_label_kwargs",
",",
"ax_tick_label_kwargs",
",",
"x_tick_label_kwargs",
",",
"y_tick_label_kwargs",
")",
"# Set the title.",
"if",
"title",
"is",
"not",
"None",
":",
"title_fnt_sz",
"=",
"max",
"(",
"title_font_scaling",
"[",
"0",
"]",
",",
"min",
"(",
"title_font_scaling",
"[",
"2",
"]",
"*",
"(",
"(",
"axsize",
"[",
"0",
"]",
"+",
"axsize",
"[",
"1",
"]",
")",
"/",
"2",
"+",
"3",
")",
",",
"#-len(title)/12",
"title_font_scaling",
"[",
"1",
"]",
")",
")",
"title_kwargs",
"=",
"{",
"}",
"if",
"title_kwargs",
"is",
"None",
"else",
"title_kwargs",
".",
"copy",
"(",
")",
"title_kwargs",
".",
"setdefault",
"(",
"'fontdict'",
",",
"dict",
"(",
"fontsize",
"=",
"title_fnt_sz",
")",
")",
"ax",
".",
"set_title",
"(",
"title",
",",
"*",
"*",
"title_kwargs",
")",
"# Create a colorbar.",
"if",
"use_colorbar",
":",
"cbar_kwargs",
"=",
"{",
"}",
"if",
"cbar_kwargs",
"is",
"None",
"else",
"cbar_kwargs",
".",
"copy",
"(",
")",
"divider",
"=",
"make_axes_locatable",
"(",
"ax",
")",
"cax",
"=",
"divider",
".",
"append_axes",
"(",
"\"right\"",
",",
"size",
"=",
"\"7.5%\"",
",",
"pad",
"=",
"0.05",
")",
"cbar",
"=",
"fig",
".",
"colorbar",
"(",
"im",
",",
"ax",
"=",
"ax",
",",
"cax",
"=",
"cax",
",",
"*",
"*",
"cbar_kwargs",
")",
"if",
"cbar_labels",
"is",
"not",
"None",
":",
"cbar",
".",
"ax",
".",
"set_yticklabels",
"(",
"cbar_labels",
")",
"else",
":",
"cbar",
"=",
"None",
"# Create a legend.",
"if",
"use_legend",
":",
"legend_kwargs",
"=",
"{",
"}",
"if",
"legend_kwargs",
"is",
"None",
"else",
"legend_kwargs",
".",
"copy",
"(",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"\"framealpha\"",
",",
"0.4",
")",
"# Determine the legend labels.",
"unique_values",
"=",
"np",
".",
"unique",
"(",
"data",
".",
"values",
")",
"unique_values",
"=",
"unique_values",
"[",
"~",
"np",
".",
"isnan",
"(",
"unique_values",
")",
"]",
"if",
"legend_labels",
"is",
"None",
":",
"legend_labels",
"=",
"[",
"\"{}\"",
".",
"format",
"(",
"value",
")",
"for",
"value",
"in",
"unique_values",
"]",
"else",
":",
"legend_labels",
"=",
"[",
"legend_labels",
".",
"get",
"(",
"value",
",",
"\"{}\"",
".",
"format",
"(",
"value",
")",
")",
"for",
"value",
"in",
"unique_values",
"]",
"colors",
"=",
"[",
"im",
".",
"cmap",
"(",
"im",
".",
"norm",
"(",
"unique_values",
")",
")",
"for",
"unique_values",
"in",
"unique_values",
"]",
"patches",
"=",
"[",
"mpatches",
".",
"Patch",
"(",
"color",
"=",
"colors",
"[",
"i",
"]",
",",
"label",
"=",
"legend_labels",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"legend_labels",
")",
")",
"]",
"# Determine the font size of the legend.",
"legend_num_elems",
"=",
"len",
"(",
"legend_labels",
")",
"legend_max_len",
"=",
"len",
"(",
"max",
"(",
"legend_labels",
",",
"key",
"=",
"len",
")",
")",
"legend_hz_sz",
"=",
"legend_font_scaling",
"[",
"2",
"]",
"*",
"(",
"axsize",
"[",
"0",
"]",
"-",
"legend_max_len",
"/",
"9",
"+",
"3",
")",
"legend_vt_sz",
"=",
"legend_font_scaling",
"[",
"2",
"]",
"*",
"(",
"axsize",
"[",
"1",
"]",
"-",
"legend_num_elems",
"/",
"9",
"+",
"3",
")",
"legend_fnt_sz",
"=",
"max",
"(",
"legend_font_scaling",
"[",
"0",
"]",
",",
"min",
"(",
"min",
"(",
"legend_hz_sz",
",",
"legend_vt_sz",
")",
",",
"legend_font_scaling",
"[",
"1",
"]",
")",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"\"fontsize\"",
",",
"legend_fnt_sz",
")",
"legend_kwargs",
".",
"setdefault",
"(",
"'loc'",
",",
"'best'",
")",
"legend_kwargs",
"[",
"'handles'",
"]",
"=",
"patches",
"ax",
".",
"legend",
"(",
"*",
"*",
"legend_kwargs",
")",
"return",
"fig",
",",
"ax",
",",
"im",
",",
"cbar"
] | [
1305,
0
] | [
1496,
28
] | python | en | ['en', 'error', 'th'] | False |
xarray_set_axes_labels | (data, ax, x_coord='longitude', y_coord='latitude',
x_label_kwargs=None, y_label_kwargs=None,
ax_tick_label_kwargs=None,
x_tick_label_kwargs=None, y_tick_label_kwargs=None) |
Sets tick locations and labels for x and y axes on a `matplotlib.axes.Axes`
object such that the tick labels do not overlap. This currently only supports
numeric coordinates.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The xarray Dataset or DataArray containing latitude and longitude coordinates.
ax: matplotlib.axes.Axes
The matplotlib Axes object to set tick locations and labels for.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
Unless 'xlabel' or 'ylabel' are specified, the labels will be
the capitalized versions of `x_coord` and `y_coord`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
:Authors:
John Rattz ([email protected])
|
Sets tick locations and labels for x and y axes on a `matplotlib.axes.Axes`
object such that the tick labels do not overlap. This currently only supports
numeric coordinates.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The xarray Dataset or DataArray containing latitude and longitude coordinates.
ax: matplotlib.axes.Axes
The matplotlib Axes object to set tick locations and labels for.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
Unless 'xlabel' or 'ylabel' are specified, the labels will be
the capitalized versions of `x_coord` and `y_coord`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
:Authors:
John Rattz (john.c.rattz | def xarray_set_axes_labels(data, ax, x_coord='longitude', y_coord='latitude',
x_label_kwargs=None, y_label_kwargs=None,
ax_tick_label_kwargs=None,
x_tick_label_kwargs=None, y_tick_label_kwargs=None):
"""
Sets tick locations and labels for x and y axes on a `matplotlib.axes.Axes`
object such that the tick labels do not overlap. This currently only supports
numeric coordinates.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The xarray Dataset or DataArray containing latitude and longitude coordinates.
ax: matplotlib.axes.Axes
The matplotlib Axes object to set tick locations and labels for.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
Unless 'xlabel' or 'ylabel' are specified, the labels will be
the capitalized versions of `x_coord` and `y_coord`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
:Authors:
John Rattz ([email protected])
"""
import string
# Avoid modifying the original arguments.
x_label_kwargs = {} if x_label_kwargs is None else x_label_kwargs.copy()
y_label_kwargs = {} if y_label_kwargs is None else y_label_kwargs.copy()
ax_tick_label_kwargs = {} if ax_tick_label_kwargs is None else \
ax_tick_label_kwargs.copy()
x_tick_label_kwargs = {} if x_tick_label_kwargs is None else \
x_tick_label_kwargs.copy()
y_tick_label_kwargs = {} if y_tick_label_kwargs is None else \
y_tick_label_kwargs.copy()
# x_label_kwargs, y_label_kwargs = \
# x_label_kwargs.copy(), y_label_kwargs.copy()
# x_tick_label_kwargs, y_tick_label_kwargs = \
# x_tick_label_kwargs.copy(), y_tick_label_kwargs.copy()
width, height = get_ax_size(ax.figure, ax)
# Labels
x_label_kwargs.setdefault('xlabel', string.capwords(x_coord))
ax.set_xlabel(**x_label_kwargs)
y_label_kwargs.setdefault('ylabel', string.capwords(y_coord))
ax.set_ylabel(**y_label_kwargs)
# Tick labels
ax.tick_params(**ax_tick_label_kwargs)
# X ticks
x_vals = data[x_coord].values
x_fontsize = \
x_tick_label_kwargs.setdefault('fontsize', mpl.rcParams['font.size'])
label_every = max(1, int(round(1/10*len(x_vals)*x_fontsize/width)))
x_labels = ["{0:.4f}".format(float(x_val)) for x_val in x_vals[::label_every]]
ax.set_xticks(range(len(x_vals))[::label_every])
x_tick_label_kwargs.setdefault('rotation', 30)
ax.set_xticklabels(x_labels, **x_tick_label_kwargs)
# Y ticks
y_vals = data[y_coord].values
y_fontsize = \
y_tick_label_kwargs.setdefault('fontsize', mpl.rcParams['font.size'])
label_every = max(1, int(round(1/10*len(y_vals)*y_fontsize/height)))
y_labels = ["{0:.4f}".format(float(y_val)) for y_val in y_vals[::label_every]]
ax.set_yticks(range(len(y_vals))[::label_every])
ax.set_yticklabels(y_labels, **y_tick_label_kwargs) | [
"def",
"xarray_set_axes_labels",
"(",
"data",
",",
"ax",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"x_label_kwargs",
"=",
"None",
",",
"y_label_kwargs",
"=",
"None",
",",
"ax_tick_label_kwargs",
"=",
"None",
",",
"x_tick_label_kwargs",
"=",
"None",
",",
"y_tick_label_kwargs",
"=",
"None",
")",
":",
"import",
"string",
"# Avoid modifying the original arguments.",
"x_label_kwargs",
"=",
"{",
"}",
"if",
"x_label_kwargs",
"is",
"None",
"else",
"x_label_kwargs",
".",
"copy",
"(",
")",
"y_label_kwargs",
"=",
"{",
"}",
"if",
"y_label_kwargs",
"is",
"None",
"else",
"y_label_kwargs",
".",
"copy",
"(",
")",
"ax_tick_label_kwargs",
"=",
"{",
"}",
"if",
"ax_tick_label_kwargs",
"is",
"None",
"else",
"ax_tick_label_kwargs",
".",
"copy",
"(",
")",
"x_tick_label_kwargs",
"=",
"{",
"}",
"if",
"x_tick_label_kwargs",
"is",
"None",
"else",
"x_tick_label_kwargs",
".",
"copy",
"(",
")",
"y_tick_label_kwargs",
"=",
"{",
"}",
"if",
"y_tick_label_kwargs",
"is",
"None",
"else",
"y_tick_label_kwargs",
".",
"copy",
"(",
")",
"# x_label_kwargs, y_label_kwargs = \\",
"# x_label_kwargs.copy(), y_label_kwargs.copy()",
"# x_tick_label_kwargs, y_tick_label_kwargs = \\",
"# x_tick_label_kwargs.copy(), y_tick_label_kwargs.copy()",
"width",
",",
"height",
"=",
"get_ax_size",
"(",
"ax",
".",
"figure",
",",
"ax",
")",
"# Labels",
"x_label_kwargs",
".",
"setdefault",
"(",
"'xlabel'",
",",
"string",
".",
"capwords",
"(",
"x_coord",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"*",
"*",
"x_label_kwargs",
")",
"y_label_kwargs",
".",
"setdefault",
"(",
"'ylabel'",
",",
"string",
".",
"capwords",
"(",
"y_coord",
")",
")",
"ax",
".",
"set_ylabel",
"(",
"*",
"*",
"y_label_kwargs",
")",
"# Tick labels",
"ax",
".",
"tick_params",
"(",
"*",
"*",
"ax_tick_label_kwargs",
")",
"# X ticks",
"x_vals",
"=",
"data",
"[",
"x_coord",
"]",
".",
"values",
"x_fontsize",
"=",
"x_tick_label_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"mpl",
".",
"rcParams",
"[",
"'font.size'",
"]",
")",
"label_every",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"round",
"(",
"1",
"/",
"10",
"*",
"len",
"(",
"x_vals",
")",
"*",
"x_fontsize",
"/",
"width",
")",
")",
")",
"x_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"float",
"(",
"x_val",
")",
")",
"for",
"x_val",
"in",
"x_vals",
"[",
":",
":",
"label_every",
"]",
"]",
"ax",
".",
"set_xticks",
"(",
"range",
"(",
"len",
"(",
"x_vals",
")",
")",
"[",
":",
":",
"label_every",
"]",
")",
"x_tick_label_kwargs",
".",
"setdefault",
"(",
"'rotation'",
",",
"30",
")",
"ax",
".",
"set_xticklabels",
"(",
"x_labels",
",",
"*",
"*",
"x_tick_label_kwargs",
")",
"# Y ticks",
"y_vals",
"=",
"data",
"[",
"y_coord",
"]",
".",
"values",
"y_fontsize",
"=",
"y_tick_label_kwargs",
".",
"setdefault",
"(",
"'fontsize'",
",",
"mpl",
".",
"rcParams",
"[",
"'font.size'",
"]",
")",
"label_every",
"=",
"max",
"(",
"1",
",",
"int",
"(",
"round",
"(",
"1",
"/",
"10",
"*",
"len",
"(",
"y_vals",
")",
"*",
"y_fontsize",
"/",
"height",
")",
")",
")",
"y_labels",
"=",
"[",
"\"{0:.4f}\"",
".",
"format",
"(",
"float",
"(",
"y_val",
")",
")",
"for",
"y_val",
"in",
"y_vals",
"[",
":",
":",
"label_every",
"]",
"]",
"ax",
".",
"set_yticks",
"(",
"range",
"(",
"len",
"(",
"y_vals",
")",
")",
"[",
":",
":",
"label_every",
"]",
")",
"ax",
".",
"set_yticklabels",
"(",
"y_labels",
",",
"*",
"*",
"y_tick_label_kwargs",
")"
] | [
1498,
0
] | [
1571,
55
] | python | en | ['en', 'error', 'th'] | False |
figure_ratio | (data, x_coord='longitude', y_coord='latitude',
fixed_width=8, fixed_height=None,
num_cols=1, num_rows=1) |
Returns a list of the width and height that match constraints on height
and width for a figure while maintaining aspect ratio if possible.
Also handles a grid of plots of identically sized cells.
Parameters
----------
data: xarray.Dataset or xarray.DataArray or list-like
Can be either of the following:
1. A list-like of x and y dimension sizes, respectively
2. An xarray Dataset or DataArray containing x and y dimensions
x_coord, y_coord: str
Names of the x and y coordinates in `data`.
fixed_width, fixed_height: float
The desired width or height. If both are specified, the aspect
ratio is maintained and `fixed_width` and `fixed_height` are
treated as maximum values for the size of a single grid element.
num_cols, num_rows: int
The number of columns and rows in the grid the plots will be in.
Zero, one, or both may be specified.
|
Returns a list of the width and height that match constraints on height
and width for a figure while maintaining aspect ratio if possible.
Also handles a grid of plots of identically sized cells.
Parameters
----------
data: xarray.Dataset or xarray.DataArray or list-like
Can be either of the following:
1. A list-like of x and y dimension sizes, respectively
2. An xarray Dataset or DataArray containing x and y dimensions
x_coord, y_coord: str
Names of the x and y coordinates in `data`.
fixed_width, fixed_height: float
The desired width or height. If both are specified, the aspect
ratio is maintained and `fixed_width` and `fixed_height` are
treated as maximum values for the size of a single grid element.
num_cols, num_rows: int
The number of columns and rows in the grid the plots will be in.
Zero, one, or both may be specified.
| def figure_ratio(data, x_coord='longitude', y_coord='latitude',
fixed_width=8, fixed_height=None,
num_cols=1, num_rows=1):
"""
Returns a list of the width and height that match constraints on height
and width for a figure while maintaining aspect ratio if possible.
Also handles a grid of plots of identically sized cells.
Parameters
----------
data: xarray.Dataset or xarray.DataArray or list-like
Can be either of the following:
1. A list-like of x and y dimension sizes, respectively
2. An xarray Dataset or DataArray containing x and y dimensions
x_coord, y_coord: str
Names of the x and y coordinates in `data`.
fixed_width, fixed_height: float
The desired width or height. If both are specified, the aspect
ratio is maintained and `fixed_width` and `fixed_height` are
treated as maximum values for the size of a single grid element.
num_cols, num_rows: int
The number of columns and rows in the grid the plots will be in.
Zero, one, or both may be specified.
"""
assert (fixed_width is not None) or (fixed_height is not None),\
"At least one of `fixed_width` or `fixed_height` must be specified."
# Determine the x and y dimension sizes and the aspect ratio.
if isinstance(data, xr.Dataset) or isinstance(data, xr.DataArray):
x_sz, y_sz = len(data[x_coord]), len(data[y_coord])
else:
x_sz, y_sz = data[0], data[1]
aspect_ratio = y_sz / x_sz
# Determine the figure size.
if fixed_width is not None:
width = fixed_width; height = width*aspect_ratio
elif fixed_height is not None:
height = fixed_height; width = height/aspect_ratio
# If both `fixed_width` and `fixed_height` are specified, treat as maximums.
if (fixed_width is not None) and (fixed_height is not None):
if width > fixed_width:
height *= fixed_width/width
width = fixed_width
if height > fixed_height:
width *= fixed_height/height
height = fixed_height
return [width*num_cols, height*num_rows] | [
"def",
"figure_ratio",
"(",
"data",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
",",
"fixed_width",
"=",
"8",
",",
"fixed_height",
"=",
"None",
",",
"num_cols",
"=",
"1",
",",
"num_rows",
"=",
"1",
")",
":",
"assert",
"(",
"fixed_width",
"is",
"not",
"None",
")",
"or",
"(",
"fixed_height",
"is",
"not",
"None",
")",
",",
"\"At least one of `fixed_width` or `fixed_height` must be specified.\"",
"# Determine the x and y dimension sizes and the aspect ratio.",
"if",
"isinstance",
"(",
"data",
",",
"xr",
".",
"Dataset",
")",
"or",
"isinstance",
"(",
"data",
",",
"xr",
".",
"DataArray",
")",
":",
"x_sz",
",",
"y_sz",
"=",
"len",
"(",
"data",
"[",
"x_coord",
"]",
")",
",",
"len",
"(",
"data",
"[",
"y_coord",
"]",
")",
"else",
":",
"x_sz",
",",
"y_sz",
"=",
"data",
"[",
"0",
"]",
",",
"data",
"[",
"1",
"]",
"aspect_ratio",
"=",
"y_sz",
"/",
"x_sz",
"# Determine the figure size.",
"if",
"fixed_width",
"is",
"not",
"None",
":",
"width",
"=",
"fixed_width",
"height",
"=",
"width",
"*",
"aspect_ratio",
"elif",
"fixed_height",
"is",
"not",
"None",
":",
"height",
"=",
"fixed_height",
"width",
"=",
"height",
"/",
"aspect_ratio",
"# If both `fixed_width` and `fixed_height` are specified, treat as maximums.",
"if",
"(",
"fixed_width",
"is",
"not",
"None",
")",
"and",
"(",
"fixed_height",
"is",
"not",
"None",
")",
":",
"if",
"width",
">",
"fixed_width",
":",
"height",
"*=",
"fixed_width",
"/",
"width",
"width",
"=",
"fixed_width",
"if",
"height",
">",
"fixed_height",
":",
"width",
"*=",
"fixed_height",
"/",
"height",
"height",
"=",
"fixed_height",
"return",
"[",
"width",
"*",
"num_cols",
",",
"height",
"*",
"num_rows",
"]"
] | [
1573,
0
] | [
1618,
44
] | python | en | ['en', 'error', 'th'] | False |
retrieve_or_create_fig_ax | (fig=None, ax=None, **fig_params) |
Returns appropriate matplotlib Figure and Axes objects given Figure and/or Axes objects.
If neither is supplied, a new figure will be created with associated axes.
If only `fig` is supplied, `(fig,fig.axes[0])` is returned. That is, the first Axes object will be used (and created if necessary).
If `ax` is supplied, `(fig, ax)` is returned.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and the axes of that figure.
|
Returns appropriate matplotlib Figure and Axes objects given Figure and/or Axes objects.
If neither is supplied, a new figure will be created with associated axes.
If only `fig` is supplied, `(fig,fig.axes[0])` is returned. That is, the first Axes object will be used (and created if necessary).
If `ax` is supplied, `(fig, ax)` is returned.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and the axes of that figure.
| def retrieve_or_create_fig_ax(fig=None, ax=None, **fig_params):
"""
Returns appropriate matplotlib Figure and Axes objects given Figure and/or Axes objects.
If neither is supplied, a new figure will be created with associated axes.
If only `fig` is supplied, `(fig,fig.axes[0])` is returned. That is, the first Axes object will be used (and created if necessary).
If `ax` is supplied, `(fig, ax)` is returned.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and the axes of that figure.
"""
if ax is None:
if fig is None:
fig, ax = plt.subplots(**fig_params)
else:
if len(fig.axes) == 0:
fig.add_axes([1,1,1,1])
ax = fig.axes[0]
return fig, ax | [
"def",
"retrieve_or_create_fig_ax",
"(",
"fig",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"fig_params",
")",
":",
"if",
"ax",
"is",
"None",
":",
"if",
"fig",
"is",
"None",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"*",
"*",
"fig_params",
")",
"else",
":",
"if",
"len",
"(",
"fig",
".",
"axes",
")",
"==",
"0",
":",
"fig",
".",
"add_axes",
"(",
"[",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"ax",
"=",
"fig",
".",
"axes",
"[",
"0",
"]",
"return",
"fig",
",",
"ax"
] | [
1620,
0
] | [
1639,
18
] | python | en | ['en', 'error', 'th'] | False |
skip_plot | (n_pts, plot_type, kwargs={}) | Returns a boolean denoting whether to skip plotting data given the number of points it contains. | Returns a boolean denoting whether to skip plotting data given the number of points it contains. | def skip_plot(n_pts, plot_type, kwargs={}):
"""Returns a boolean denoting whether to skip plotting data given the number of points it contains."""
min_pts_dict = {'scatter': 1, 'box': 1, 'gaussian': 3, 'poly': 1, 'cubic_spline': 3, 'line':2}
min_pts = min_pts_dict[plot_type]
if plot_type == 'poly':
assert 'degree' in kwargs.keys(), "When plotting a polynomal fit, there must be" \
"a 'degree' entry in the fit_kwargs parameter."
degree = kwargs['degree']
min_pts = min_pts + degree
return n_pts < min_pts | [
"def",
"skip_plot",
"(",
"n_pts",
",",
"plot_type",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"min_pts_dict",
"=",
"{",
"'scatter'",
":",
"1",
",",
"'box'",
":",
"1",
",",
"'gaussian'",
":",
"3",
",",
"'poly'",
":",
"1",
",",
"'cubic_spline'",
":",
"3",
",",
"'line'",
":",
"2",
"}",
"min_pts",
"=",
"min_pts_dict",
"[",
"plot_type",
"]",
"if",
"plot_type",
"==",
"'poly'",
":",
"assert",
"'degree'",
"in",
"kwargs",
".",
"keys",
"(",
")",
",",
"\"When plotting a polynomal fit, there must be\"",
"\"a 'degree' entry in the fit_kwargs parameter.\"",
"degree",
"=",
"kwargs",
"[",
"'degree'",
"]",
"min_pts",
"=",
"min_pts",
"+",
"degree",
"return",
"n_pts",
"<",
"min_pts"
] | [
1641,
0
] | [
1650,
26
] | python | en | ['en', 'en', 'en'] | True |
remove_non_unique_ordered_list_str | (ordered_list) |
Sets all occurrences of a value in an ordered list after its first occurence to ''.
For example, ['a', 'a', 'b', 'b', 'c'] would become ['a', '', 'b', '', 'c'].
|
Sets all occurrences of a value in an ordered list after its first occurence to ''.
For example, ['a', 'a', 'b', 'b', 'c'] would become ['a', '', 'b', '', 'c'].
| def remove_non_unique_ordered_list_str(ordered_list):
"""
Sets all occurrences of a value in an ordered list after its first occurence to ''.
For example, ['a', 'a', 'b', 'b', 'c'] would become ['a', '', 'b', '', 'c'].
"""
prev_unique_str = ""
for i in range(len(ordered_list)):
current_str = ordered_list[i]
if current_str != prev_unique_str:
prev_unique_str = current_str
else:
ordered_list[i] = ""
return ordered_list | [
"def",
"remove_non_unique_ordered_list_str",
"(",
"ordered_list",
")",
":",
"prev_unique_str",
"=",
"\"\"",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"ordered_list",
")",
")",
":",
"current_str",
"=",
"ordered_list",
"[",
"i",
"]",
"if",
"current_str",
"!=",
"prev_unique_str",
":",
"prev_unique_str",
"=",
"current_str",
"else",
":",
"ordered_list",
"[",
"i",
"]",
"=",
"\"\"",
"return",
"ordered_list"
] | [
1652,
0
] | [
1664,
23
] | python | en | ['en', 'error', 'th'] | False |
get_weeks_per_month | (num_weeks) |
Including January, give 5 weeks to every third month - accounting for
variation between 52 and 54 weeks in a year by adding weeks to the last 3 months.
|
Including January, give 5 weeks to every third month - accounting for
variation between 52 and 54 weeks in a year by adding weeks to the last 3 months.
| def get_weeks_per_month(num_weeks):
"""
Including January, give 5 weeks to every third month - accounting for
variation between 52 and 54 weeks in a year by adding weeks to the last 3 months.
"""
last_months_num_weeks = None
if num_weeks <= 52:
last_months_num_weeks = [5,4,4]
elif num_weeks == 53:
last_months_num_weeks = [5,4,5]
elif num_weeks == 54:
last_months_num_weeks = [5,5,5]
return {month_int:num_weeks for (month_int,num_weeks) in zip(days_per_month.keys(), [5,4,4]*3+last_months_num_weeks)} | [
"def",
"get_weeks_per_month",
"(",
"num_weeks",
")",
":",
"last_months_num_weeks",
"=",
"None",
"if",
"num_weeks",
"<=",
"52",
":",
"last_months_num_weeks",
"=",
"[",
"5",
",",
"4",
",",
"4",
"]",
"elif",
"num_weeks",
"==",
"53",
":",
"last_months_num_weeks",
"=",
"[",
"5",
",",
"4",
",",
"5",
"]",
"elif",
"num_weeks",
"==",
"54",
":",
"last_months_num_weeks",
"=",
"[",
"5",
",",
"5",
",",
"5",
"]",
"return",
"{",
"month_int",
":",
"num_weeks",
"for",
"(",
"month_int",
",",
"num_weeks",
")",
"in",
"zip",
"(",
"days_per_month",
".",
"keys",
"(",
")",
",",
"[",
"5",
",",
"4",
",",
"4",
"]",
"*",
"3",
"+",
"last_months_num_weeks",
")",
"}"
] | [
1670,
0
] | [
1682,
121
] | python | en | ['en', 'error', 'th'] | False |
month_ints_to_month_names | (month_ints) |
Converts ordinal numbers for months (in range [1,12]) to their 3-letter names.
|
Converts ordinal numbers for months (in range [1,12]) to their 3-letter names.
| def month_ints_to_month_names(month_ints):
"""
Converts ordinal numbers for months (in range [1,12]) to their 3-letter names.
"""
return [month_names[i-1] for i in month_ints] | [
"def",
"month_ints_to_month_names",
"(",
"month_ints",
")",
":",
"return",
"[",
"month_names",
"[",
"i",
"-",
"1",
"]",
"for",
"i",
"in",
"month_ints",
"]"
] | [
1687,
0
] | [
1691,
49
] | python | en | ['en', 'error', 'th'] | False |
week_ints_to_month_names | (week_ints) |
Converts ordinal numbers for weeks (in range [1,54]) to their months' 3-letter names.
|
Converts ordinal numbers for weeks (in range [1,54]) to their months' 3-letter names.
| def week_ints_to_month_names(week_ints):
"""
Converts ordinal numbers for weeks (in range [1,54]) to their months' 3-letter names.
"""
weeks_per_month = get_weeks_per_month(max(week_ints))
week_month_strs = []
for week_int in week_ints:
month_int = -1
for current_month_int, current_month_weeks in weeks_per_month.items():
week_int -= current_month_weeks
if week_int <= 0:
month_int = current_month_int
break
week_month_strs.append(month_names[month_int-1])
return week_month_strs | [
"def",
"week_ints_to_month_names",
"(",
"week_ints",
")",
":",
"weeks_per_month",
"=",
"get_weeks_per_month",
"(",
"max",
"(",
"week_ints",
")",
")",
"week_month_strs",
"=",
"[",
"]",
"for",
"week_int",
"in",
"week_ints",
":",
"month_int",
"=",
"-",
"1",
"for",
"current_month_int",
",",
"current_month_weeks",
"in",
"weeks_per_month",
".",
"items",
"(",
")",
":",
"week_int",
"-=",
"current_month_weeks",
"if",
"week_int",
"<=",
"0",
":",
"month_int",
"=",
"current_month_int",
"break",
"week_month_strs",
".",
"append",
"(",
"month_names",
"[",
"month_int",
"-",
"1",
"]",
")",
"return",
"week_month_strs"
] | [
1693,
0
] | [
1707,
26
] | python | en | ['en', 'error', 'th'] | False |
naive_months_ticks_by_week | (week_ints=None) |
Given a list of week numbers (in range [1,54]), returns a list of month strings separated by spaces.
Covers 54 weeks if no list-like of week numbers is given.
This is only intended to be used for labeling axes in plotting.
|
Given a list of week numbers (in range [1,54]), returns a list of month strings separated by spaces.
Covers 54 weeks if no list-like of week numbers is given.
This is only intended to be used for labeling axes in plotting.
| def naive_months_ticks_by_week(week_ints=None):
"""
Given a list of week numbers (in range [1,54]), returns a list of month strings separated by spaces.
Covers 54 weeks if no list-like of week numbers is given.
This is only intended to be used for labeling axes in plotting.
"""
month_ticks_by_week = []
if week_ints is None: # Give month ticks for all weeks.
month_ticks_by_week = week_ints_to_month_names(list(range(54)))
else:
month_ticks_by_week = remove_non_unique_ordered_list_str(week_ints_to_month_names(week_ints))
return month_ticks_by_week | [
"def",
"naive_months_ticks_by_week",
"(",
"week_ints",
"=",
"None",
")",
":",
"month_ticks_by_week",
"=",
"[",
"]",
"if",
"week_ints",
"is",
"None",
":",
"# Give month ticks for all weeks.",
"month_ticks_by_week",
"=",
"week_ints_to_month_names",
"(",
"list",
"(",
"range",
"(",
"54",
")",
")",
")",
"else",
":",
"month_ticks_by_week",
"=",
"remove_non_unique_ordered_list_str",
"(",
"week_ints_to_month_names",
"(",
"week_ints",
")",
")",
"return",
"month_ticks_by_week"
] | [
1709,
0
] | [
1720,
30
] | python | en | ['en', 'error', 'th'] | False |
HtmlSiteStore.get_url_for_resource | (self, resource_identifier=None, only_if_exists=True) |
Return the URL of the HTML document that renders a resource
(e.g., an expectation suite or a validation result).
:param resource_identifier: ExpectationSuiteIdentifier, ValidationResultIdentifier
or any other type's identifier. The argument is optional - when
not supplied, the method returns the URL of the index page.
:return: URL (string)
|
Return the URL of the HTML document that renders a resource
(e.g., an expectation suite or a validation result). | def get_url_for_resource(self, resource_identifier=None, only_if_exists=True):
"""
Return the URL of the HTML document that renders a resource
(e.g., an expectation suite or a validation result).
:param resource_identifier: ExpectationSuiteIdentifier, ValidationResultIdentifier
or any other type's identifier. The argument is optional - when
not supplied, the method returns the URL of the index page.
:return: URL (string)
"""
if resource_identifier is None:
store_backend = self.store_backends["index_page"]
key = ()
elif isinstance(resource_identifier, ExpectationSuiteIdentifier):
store_backend = self.store_backends[ExpectationSuiteIdentifier]
key = resource_identifier.to_tuple()
elif isinstance(resource_identifier, ValidationResultIdentifier):
store_backend = self.store_backends[ValidationResultIdentifier]
key = resource_identifier.to_tuple()
else:
# this method does not support getting the URL of static assets
raise ValueError(
"Cannot get URL for resource {:s}".format(str(resource_identifier))
)
# <WILL> : this is a hack for Taylor. Change this back. 20200924
# if only_if_exists:
# return (
# store_backend.get_url_for_key(key)
# if store_backend.has_key(key)
# else None
# )
# return store_backend.get_url_for_key(key)
if store_backend.base_public_path:
if only_if_exists:
return (
store_backend.get_public_url_for_key(key)
if store_backend.has_key(key)
else None
)
else:
return store_backend.get_public_url_for_key(key)
else:
if only_if_exists:
return (
store_backend.get_url_for_key(key)
if store_backend.has_key(key)
else None
)
else:
return store_backend.get_url_for_key(key) | [
"def",
"get_url_for_resource",
"(",
"self",
",",
"resource_identifier",
"=",
"None",
",",
"only_if_exists",
"=",
"True",
")",
":",
"if",
"resource_identifier",
"is",
"None",
":",
"store_backend",
"=",
"self",
".",
"store_backends",
"[",
"\"index_page\"",
"]",
"key",
"=",
"(",
")",
"elif",
"isinstance",
"(",
"resource_identifier",
",",
"ExpectationSuiteIdentifier",
")",
":",
"store_backend",
"=",
"self",
".",
"store_backends",
"[",
"ExpectationSuiteIdentifier",
"]",
"key",
"=",
"resource_identifier",
".",
"to_tuple",
"(",
")",
"elif",
"isinstance",
"(",
"resource_identifier",
",",
"ValidationResultIdentifier",
")",
":",
"store_backend",
"=",
"self",
".",
"store_backends",
"[",
"ValidationResultIdentifier",
"]",
"key",
"=",
"resource_identifier",
".",
"to_tuple",
"(",
")",
"else",
":",
"# this method does not support getting the URL of static assets",
"raise",
"ValueError",
"(",
"\"Cannot get URL for resource {:s}\"",
".",
"format",
"(",
"str",
"(",
"resource_identifier",
")",
")",
")",
"# <WILL> : this is a hack for Taylor. Change this back. 20200924",
"# if only_if_exists:",
"# return (",
"# store_backend.get_url_for_key(key)",
"# if store_backend.has_key(key)",
"# else None",
"# )",
"# return store_backend.get_url_for_key(key)",
"if",
"store_backend",
".",
"base_public_path",
":",
"if",
"only_if_exists",
":",
"return",
"(",
"store_backend",
".",
"get_public_url_for_key",
"(",
"key",
")",
"if",
"store_backend",
".",
"has_key",
"(",
"key",
")",
"else",
"None",
")",
"else",
":",
"return",
"store_backend",
".",
"get_public_url_for_key",
"(",
"key",
")",
"else",
":",
"if",
"only_if_exists",
":",
"return",
"(",
"store_backend",
".",
"get_url_for_key",
"(",
"key",
")",
"if",
"store_backend",
".",
"has_key",
"(",
"key",
")",
"else",
"None",
")",
"else",
":",
"return",
"store_backend",
".",
"get_url_for_key",
"(",
"key",
")"
] | [
238,
4
] | [
289,
57
] | python | en | ['en', 'error', 'th'] | False |
HtmlSiteStore.write_index_page | (self, page) | This third param_store has a special method, which uses a zero-length tuple as a key. | This third param_store has a special method, which uses a zero-length tuple as a key. | def write_index_page(self, page):
"""This third param_store has a special method, which uses a zero-length tuple as a key."""
return self.store_backends["index_page"].set(
(),
page,
content_encoding="utf-8",
content_type="text/html; " "charset=utf-8",
) | [
"def",
"write_index_page",
"(",
"self",
",",
"page",
")",
":",
"return",
"self",
".",
"store_backends",
"[",
"\"index_page\"",
"]",
".",
"set",
"(",
"(",
")",
",",
"page",
",",
"content_encoding",
"=",
"\"utf-8\"",
",",
"content_type",
"=",
"\"text/html; \"",
"\"charset=utf-8\"",
",",
")"
] | [
333,
4
] | [
340,
9
] | python | en | ['en', 'en', 'en'] | True |
HtmlSiteStore.copy_static_assets | (self, static_assets_source_dir=None) |
Copies static assets, using a special "static_assets" backend store that accepts variable-length tuples as
keys, with no filepath_template.
|
Copies static assets, using a special "static_assets" backend store that accepts variable-length tuples as
keys, with no filepath_template.
| def copy_static_assets(self, static_assets_source_dir=None):
"""
Copies static assets, using a special "static_assets" backend store that accepts variable-length tuples as
keys, with no filepath_template.
"""
file_exclusions = [".DS_Store"]
dir_exclusions = []
if not static_assets_source_dir:
static_assets_source_dir = file_relative_path(
__file__, os.path.join("..", "..", "render", "view", "static")
)
for item in os.listdir(static_assets_source_dir):
# Directory
if os.path.isdir(os.path.join(static_assets_source_dir, item)):
if item in dir_exclusions:
continue
# Recurse
new_source_dir = os.path.join(static_assets_source_dir, item)
self.copy_static_assets(new_source_dir)
# File
else:
# Copy file over using static assets store backend
if item in file_exclusions:
continue
source_name = os.path.join(static_assets_source_dir, item)
with open(source_name, "rb") as f:
# Only use path elements starting from static/ for key
store_key = tuple(os.path.normpath(source_name).split(os.sep))
store_key = store_key[store_key.index("static") :]
content_type, content_encoding = guess_type(item, strict=False)
if content_type is None:
# Use GE-known content-type if possible
if source_name.endswith(".otf"):
content_type = "font/opentype"
else:
# fallback
logger.warning(
"Unable to automatically determine content_type for {}".format(
source_name
)
)
content_type = "text/html; charset=utf8"
self.store_backends["static_assets"].set(
store_key,
f.read(),
content_encoding=content_encoding,
content_type=content_type,
) | [
"def",
"copy_static_assets",
"(",
"self",
",",
"static_assets_source_dir",
"=",
"None",
")",
":",
"file_exclusions",
"=",
"[",
"\".DS_Store\"",
"]",
"dir_exclusions",
"=",
"[",
"]",
"if",
"not",
"static_assets_source_dir",
":",
"static_assets_source_dir",
"=",
"file_relative_path",
"(",
"__file__",
",",
"os",
".",
"path",
".",
"join",
"(",
"\"..\"",
",",
"\"..\"",
",",
"\"render\"",
",",
"\"view\"",
",",
"\"static\"",
")",
")",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"static_assets_source_dir",
")",
":",
"# Directory",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"static_assets_source_dir",
",",
"item",
")",
")",
":",
"if",
"item",
"in",
"dir_exclusions",
":",
"continue",
"# Recurse",
"new_source_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"static_assets_source_dir",
",",
"item",
")",
"self",
".",
"copy_static_assets",
"(",
"new_source_dir",
")",
"# File",
"else",
":",
"# Copy file over using static assets store backend",
"if",
"item",
"in",
"file_exclusions",
":",
"continue",
"source_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"static_assets_source_dir",
",",
"item",
")",
"with",
"open",
"(",
"source_name",
",",
"\"rb\"",
")",
"as",
"f",
":",
"# Only use path elements starting from static/ for key",
"store_key",
"=",
"tuple",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"source_name",
")",
".",
"split",
"(",
"os",
".",
"sep",
")",
")",
"store_key",
"=",
"store_key",
"[",
"store_key",
".",
"index",
"(",
"\"static\"",
")",
":",
"]",
"content_type",
",",
"content_encoding",
"=",
"guess_type",
"(",
"item",
",",
"strict",
"=",
"False",
")",
"if",
"content_type",
"is",
"None",
":",
"# Use GE-known content-type if possible",
"if",
"source_name",
".",
"endswith",
"(",
"\".otf\"",
")",
":",
"content_type",
"=",
"\"font/opentype\"",
"else",
":",
"# fallback",
"logger",
".",
"warning",
"(",
"\"Unable to automatically determine content_type for {}\"",
".",
"format",
"(",
"source_name",
")",
")",
"content_type",
"=",
"\"text/html; charset=utf8\"",
"self",
".",
"store_backends",
"[",
"\"static_assets\"",
"]",
".",
"set",
"(",
"store_key",
",",
"f",
".",
"read",
"(",
")",
",",
"content_encoding",
"=",
"content_encoding",
",",
"content_type",
"=",
"content_type",
",",
")"
] | [
348,
4
] | [
399,
21
] | python | en | ['en', 'error', 'th'] | False |
test_checkpoint_new_raises_error_on_existing_checkpoint | (
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
) |
What does this test and why?
The `checkpoint new` CLI flow should raise an error if the Checkpoint name being created already exists in your checkpoint store.
|
What does this test and why?
The `checkpoint new` CLI flow should raise an error if the Checkpoint name being created already exists in your checkpoint store.
| def test_checkpoint_new_raises_error_on_existing_checkpoint(
mock_emit,
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates,
):
"""
What does this test and why?
The `checkpoint new` CLI flow should raise an error if the Checkpoint name being created already exists in your checkpoint store.
"""
context: DataContext = titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates
monkeypatch.chdir(os.path.dirname(context.root_directory))
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint new my_minimal_simple_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 1
stdout: str = result.stdout
assert (
"A Checkpoint named `my_minimal_simple_checkpoint` already exists. Please choose a new name."
in stdout
)
assert mock_emit.call_count == 3
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"success": False,
}
),
]
assert_no_logging_messages_or_tracebacks(
caplog,
result,
) | [
"def",
"test_checkpoint_new_raises_error_on_existing_checkpoint",
"(",
"mock_emit",
",",
"caplog",
",",
"monkeypatch",
",",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
",",
")",
":",
"context",
":",
"DataContext",
"=",
"titanic_pandas_data_context_with_v013_datasource_stats_enabled_with_checkpoints_v1_with_templates",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"runner",
":",
"CliRunner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
":",
"Result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"--v3-api checkpoint new my_minimal_simple_checkpoint\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"1",
"stdout",
":",
"str",
"=",
"result",
".",
"stdout",
"assert",
"(",
"\"A Checkpoint named `my_minimal_simple_checkpoint` already exists. Please choose a new name.\"",
"in",
"stdout",
")",
"assert",
"mock_emit",
".",
"call_count",
"==",
"3",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.checkpoint.new.begin\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v3\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.checkpoint.new.end\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v3\"",
"}",
",",
"\"success\"",
":",
"False",
",",
"}",
")",
",",
"]",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
",",
")"
] | [
600,
0
] | [
652,
5
] | python | en | ['en', 'error', 'th'] | False |
test_checkpoint_new_happy_path_generates_a_notebook_and_checkpoint | (
mock_webbroser,
mock_subprocess,
mock_emit,
caplog,
monkeypatch,
deterministic_asset_dataconnector_context,
titanic_expectation_suite,
) |
What does this test and why?
The v3 (Batch Request) API `checkpoint new` CLI flow includes creating a notebook to configure the Checkpoint.
This test builds that notebook and runs it to generate a Checkpoint and then tests the resulting configuration in the Checkpoint file.
The notebook that is generated does create a sample configuration using one of the available Data Assets, this is what is used to generate the Checkpoint configuration.
|
What does this test and why?
The v3 (Batch Request) API `checkpoint new` CLI flow includes creating a notebook to configure the Checkpoint.
This test builds that notebook and runs it to generate a Checkpoint and then tests the resulting configuration in the Checkpoint file.
The notebook that is generated does create a sample configuration using one of the available Data Assets, this is what is used to generate the Checkpoint configuration.
| def test_checkpoint_new_happy_path_generates_a_notebook_and_checkpoint(
mock_webbroser,
mock_subprocess,
mock_emit,
caplog,
monkeypatch,
deterministic_asset_dataconnector_context,
titanic_expectation_suite,
):
"""
What does this test and why?
The v3 (Batch Request) API `checkpoint new` CLI flow includes creating a notebook to configure the Checkpoint.
This test builds that notebook and runs it to generate a Checkpoint and then tests the resulting configuration in the Checkpoint file.
The notebook that is generated does create a sample configuration using one of the available Data Assets, this is what is used to generate the Checkpoint configuration.
"""
context: DataContext = deterministic_asset_dataconnector_context
root_dir: str = context.root_directory
monkeypatch.chdir(os.path.dirname(root_dir))
assert context.list_checkpoints() == []
context.save_expectation_suite(titanic_expectation_suite)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
# Clear the "data_context.save_expectation_suite" call
mock_emit.reset_mock()
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint new passengers",
input="1\n1\n",
catch_exceptions=False,
)
assert result.exit_code == 0
stdout: str = result.stdout
assert "open a notebook for you now" in stdout
assert mock_emit.call_count == 3
assert mock_emit.call_args_list == [
mock.call(
{"event_payload": {}, "event": "data_context.__init__", "success": True}
),
mock.call(
{
"event": "cli.checkpoint.new.begin",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
mock.call(
{
"event": "cli.checkpoint.new.end",
"event_payload": {"api_version": "v3"},
"success": True,
}
),
]
assert mock_subprocess.call_count == 1
assert mock_webbroser.call_count == 0
expected_notebook_path: str = os.path.join(
root_dir, "uncommitted", "edit_checkpoint_passengers.ipynb"
)
assert os.path.isfile(expected_notebook_path)
with open(expected_notebook_path) as f:
nb: NotebookNode = nbformat.read(f, as_version=4)
uncommitted_dir: str = os.path.join(root_dir, "uncommitted")
# Run notebook
# TODO: <ANTHONY>We should mock the datadocs call or skip running that cell within the notebook (rather than commenting it out in the notebook)</ANTHONY>
ep: ExecutePreprocessor = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb, {"metadata": {"path": uncommitted_dir}})
# Ensure the checkpoint file was created
expected_checkpoint_path: str = os.path.join(
root_dir, "checkpoints", "passengers.yml"
)
assert os.path.isfile(expected_checkpoint_path)
# Ensure the Checkpoint configuration in the file is as expected
with open(expected_checkpoint_path) as f:
checkpoint_config: str = f.read()
expected_checkpoint_config: str = """name: passengers
config_version: 1.0
template_name:
module_name: great_expectations.checkpoint
class_name: Checkpoint
run_name_template: '%Y%m%d-%H%M%S-my-run-name-template'
expectation_suite_name:
batch_request:
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
site_names: []
evaluation_parameters: {}
runtime_configuration: {}
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_other_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
profilers: []
ge_cloud_id:
"""
assert checkpoint_config == expected_checkpoint_config
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
) | [
"def",
"test_checkpoint_new_happy_path_generates_a_notebook_and_checkpoint",
"(",
"mock_webbroser",
",",
"mock_subprocess",
",",
"mock_emit",
",",
"caplog",
",",
"monkeypatch",
",",
"deterministic_asset_dataconnector_context",
",",
"titanic_expectation_suite",
",",
")",
":",
"context",
":",
"DataContext",
"=",
"deterministic_asset_dataconnector_context",
"root_dir",
":",
"str",
"=",
"context",
".",
"root_directory",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"root_dir",
")",
")",
"assert",
"context",
".",
"list_checkpoints",
"(",
")",
"==",
"[",
"]",
"context",
".",
"save_expectation_suite",
"(",
"titanic_expectation_suite",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"Titanic.warning\"",
"]",
"# Clear the \"data_context.save_expectation_suite\" call",
"mock_emit",
".",
"reset_mock",
"(",
")",
"runner",
":",
"CliRunner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
":",
"Result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"--v3-api checkpoint new passengers\"",
",",
"input",
"=",
"\"1\\n1\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"stdout",
":",
"str",
"=",
"result",
".",
"stdout",
"assert",
"\"open a notebook for you now\"",
"in",
"stdout",
"assert",
"mock_emit",
".",
"call_count",
"==",
"3",
"assert",
"mock_emit",
".",
"call_args_list",
"==",
"[",
"mock",
".",
"call",
"(",
"{",
"\"event_payload\"",
":",
"{",
"}",
",",
"\"event\"",
":",
"\"data_context.__init__\"",
",",
"\"success\"",
":",
"True",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.checkpoint.new.begin\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v3\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"mock",
".",
"call",
"(",
"{",
"\"event\"",
":",
"\"cli.checkpoint.new.end\"",
",",
"\"event_payload\"",
":",
"{",
"\"api_version\"",
":",
"\"v3\"",
"}",
",",
"\"success\"",
":",
"True",
",",
"}",
")",
",",
"]",
"assert",
"mock_subprocess",
".",
"call_count",
"==",
"1",
"assert",
"mock_webbroser",
".",
"call_count",
"==",
"0",
"expected_notebook_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
",",
"\"edit_checkpoint_passengers.ipynb\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_notebook_path",
")",
"with",
"open",
"(",
"expected_notebook_path",
")",
"as",
"f",
":",
"nb",
":",
"NotebookNode",
"=",
"nbformat",
".",
"read",
"(",
"f",
",",
"as_version",
"=",
"4",
")",
"uncommitted_dir",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"uncommitted\"",
")",
"# Run notebook",
"# TODO: <ANTHONY>We should mock the datadocs call or skip running that cell within the notebook (rather than commenting it out in the notebook)</ANTHONY>",
"ep",
":",
"ExecutePreprocessor",
"=",
"ExecutePreprocessor",
"(",
"timeout",
"=",
"600",
",",
"kernel_name",
"=",
"\"python3\"",
")",
"ep",
".",
"preprocess",
"(",
"nb",
",",
"{",
"\"metadata\"",
":",
"{",
"\"path\"",
":",
"uncommitted_dir",
"}",
"}",
")",
"# Ensure the checkpoint file was created",
"expected_checkpoint_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"checkpoints\"",
",",
"\"passengers.yml\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"expected_checkpoint_path",
")",
"# Ensure the Checkpoint configuration in the file is as expected",
"with",
"open",
"(",
"expected_checkpoint_path",
")",
"as",
"f",
":",
"checkpoint_config",
":",
"str",
"=",
"f",
".",
"read",
"(",
")",
"expected_checkpoint_config",
":",
"str",
"=",
"\"\"\"name: passengers\nconfig_version: 1.0\ntemplate_name:\nmodule_name: great_expectations.checkpoint\nclass_name: Checkpoint\nrun_name_template: '%Y%m%d-%H%M%S-my-run-name-template'\nexpectation_suite_name:\nbatch_request:\naction_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n site_names: []\nevaluation_parameters: {}\nruntime_configuration: {}\nvalidations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_other_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n expectation_suite_name: Titanic.warning\nprofilers: []\nge_cloud_id:\n\"\"\"",
"assert",
"checkpoint_config",
"==",
"expected_checkpoint_config",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")"
] | [
660,
0
] | [
783,
5
] | python | en | ['en', 'error', 'th'] | False |
test_checkpoint_script_happy_path_executable_successful_validation_pandas | (
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
) |
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 0 status code
- print a success message
|
We call the "checkpoint script" command on a project with a Checkpoint. | def test_checkpoint_script_happy_path_executable_successful_validation_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 0 status code
- print a success message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
suite: ExpectationSuite = context.create_expectation_suite(
expectation_suite_name="users.delivery"
)
context.save_expectation_suite(expectation_suite=suite)
assert context.list_expectation_suite_names() == ["users.delivery"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: users.delivery
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 0
assert "Validation succeeded!" in output | [
"def",
"test_checkpoint_script_happy_path_executable_successful_validation_pandas",
"(",
"caplog",
",",
"monkeypatch",
",",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
",",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"\"VAR\"",
",",
"\"test\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"MY_PARAM\"",
",",
"\"1\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"OLD_PARAM\"",
",",
"\"2\"",
")",
"context",
":",
"DataContext",
"=",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
"suite",
":",
"ExpectationSuite",
"=",
"context",
".",
"create_expectation_suite",
"(",
"expectation_suite_name",
"=",
"\"users.delivery\"",
")",
"context",
".",
"save_expectation_suite",
"(",
"expectation_suite",
"=",
"suite",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"users.delivery\"",
"]",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"checkpoint_file_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"DataContextConfigDefaults",
".",
"CHECKPOINTS_BASE_DIRECTORY",
".",
"value",
",",
"\"my_fancy_checkpoint.yml\"",
",",
")",
"checkpoint_yaml_config",
":",
"str",
"=",
"f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n expectation_suite_name: users.delivery\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"",
"config",
":",
"dict",
"=",
"dict",
"(",
"yaml",
".",
"load",
"(",
"checkpoint_yaml_config",
")",
")",
"_write_checkpoint_dict_to_file",
"(",
"config",
"=",
"config",
",",
"checkpoint_file_path",
"=",
"checkpoint_file_path",
")",
"runner",
":",
"CliRunner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
":",
"Result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"--v3-api checkpoint script my_fancy_checkpoint\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")",
"script_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"\"run_my_fancy_checkpoint.py\"",
",",
")",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"script_path",
")",
"# In travis on osx, python may not execute from the build dir",
"cmdstring",
":",
"str",
"=",
"f\"python {script_path}\"",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_OS_NAME\"",
")",
"==",
"\"osx\"",
":",
"build_dir",
":",
"str",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_BUILD_DIR\"",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"build_dir",
")",
")",
"cmdstring",
"=",
"f\"python3 {script_path}\"",
"print",
"(",
"\"about to run: \"",
"+",
"cmdstring",
")",
"print",
"(",
"os",
".",
"curdir",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"curdir",
")",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"..\"",
")",
")",
")",
")",
"status",
":",
"int",
"output",
":",
"str",
"status",
",",
"output",
"=",
"subprocess",
".",
"getstatusoutput",
"(",
"cmdstring",
")",
"print",
"(",
"f\"\\n\\nScript exited with code: {status} and output:\\n{output}\"",
")",
"assert",
"status",
"==",
"0",
"assert",
"\"Validation succeeded!\"",
"in",
"output"
] | [
2654,
0
] | [
2764,
44
] | python | en | ['en', 'error', 'th'] | False |
test_checkpoint_script_happy_path_executable_failed_validation_pandas | (
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
) |
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
|
We call the "checkpoint script" command on a project with a Checkpoint. | def test_checkpoint_script_happy_path_executable_failed_validation_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
# To fail an expectation, make number of rows less than 1313 (the original number of rows in the "Titanic" dataset).
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
df: pd.DataFrame = pd.read_csv(filepath_or_buffer=csv_path)
df = df.sample(frac=0.5, replace=True, random_state=1)
df.to_csv(path_or_buf=csv_path)
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 1
assert "Validation failed!" in output | [
"def",
"test_checkpoint_script_happy_path_executable_failed_validation_pandas",
"(",
"caplog",
",",
"monkeypatch",
",",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
",",
"titanic_expectation_suite",
",",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"\"VAR\"",
",",
"\"test\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"MY_PARAM\"",
",",
"\"1\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"OLD_PARAM\"",
",",
"\"2\"",
")",
"context",
":",
"DataContext",
"=",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
"context",
".",
"save_expectation_suite",
"(",
"expectation_suite",
"=",
"titanic_expectation_suite",
",",
"expectation_suite_name",
"=",
"\"Titanic.warning\"",
",",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"Titanic.warning\"",
"]",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"# To fail an expectation, make number of rows less than 1313 (the original number of rows in the \"Titanic\" dataset).",
"csv_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"..\"",
",",
"\"data\"",
",",
"\"titanic\"",
",",
"\"Titanic_19120414_1313.csv\"",
")",
"df",
":",
"pd",
".",
"DataFrame",
"=",
"pd",
".",
"read_csv",
"(",
"filepath_or_buffer",
"=",
"csv_path",
")",
"df",
"=",
"df",
".",
"sample",
"(",
"frac",
"=",
"0.5",
",",
"replace",
"=",
"True",
",",
"random_state",
"=",
"1",
")",
"df",
".",
"to_csv",
"(",
"path_or_buf",
"=",
"csv_path",
")",
"checkpoint_file_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"DataContextConfigDefaults",
".",
"CHECKPOINTS_BASE_DIRECTORY",
".",
"value",
",",
"\"my_fancy_checkpoint.yml\"",
",",
")",
"checkpoint_yaml_config",
":",
"str",
"=",
"f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n expectation_suite_name: Titanic.warning\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"",
"config",
":",
"dict",
"=",
"dict",
"(",
"yaml",
".",
"load",
"(",
"checkpoint_yaml_config",
")",
")",
"_write_checkpoint_dict_to_file",
"(",
"config",
"=",
"config",
",",
"checkpoint_file_path",
"=",
"checkpoint_file_path",
")",
"runner",
":",
"CliRunner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
":",
"Result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"--v3-api checkpoint script my_fancy_checkpoint\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")",
"script_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"\"run_my_fancy_checkpoint.py\"",
",",
")",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"script_path",
")",
"# In travis on osx, python may not execute from the build dir",
"cmdstring",
":",
"str",
"=",
"f\"python {script_path}\"",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_OS_NAME\"",
")",
"==",
"\"osx\"",
":",
"build_dir",
":",
"str",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_BUILD_DIR\"",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"build_dir",
")",
")",
"cmdstring",
"=",
"f\"python3 {script_path}\"",
"print",
"(",
"\"about to run: \"",
"+",
"cmdstring",
")",
"print",
"(",
"os",
".",
"curdir",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"curdir",
")",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"..\"",
")",
")",
")",
")",
"status",
":",
"int",
"output",
":",
"str",
"status",
",",
"output",
"=",
"subprocess",
".",
"getstatusoutput",
"(",
"cmdstring",
")",
"print",
"(",
"f\"\\n\\nScript exited with code: {status} and output:\\n{output}\"",
")",
"assert",
"status",
"==",
"1",
"assert",
"\"Validation failed!\"",
"in",
"output"
] | [
2767,
0
] | [
2885,
41
] | python | en | ['en', 'error', 'th'] | False |
test_checkpoint_script_happy_path_executable_failed_validation_due_to_bad_data_pandas | (
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
) |
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
|
We call the "checkpoint script" command on a project with a Checkpoint. | def test_checkpoint_script_happy_path_executable_failed_validation_due_to_bad_data_pandas(
caplog,
monkeypatch,
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
titanic_expectation_suite,
):
"""
We call the "checkpoint script" command on a project with a Checkpoint.
The command should:
- create the script (note output is tested in other tests)
When run the script should:
- execute
- return a 1 status code
- print a failure message
"""
monkeypatch.setenv("VAR", "test")
monkeypatch.setenv("MY_PARAM", "1")
monkeypatch.setenv("OLD_PARAM", "2")
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
context.save_expectation_suite(
expectation_suite=titanic_expectation_suite,
expectation_suite_name="Titanic.warning",
)
assert context.list_expectation_suite_names() == ["Titanic.warning"]
monkeypatch.chdir(os.path.dirname(context.root_directory))
csv_path: str = os.path.join(
context.root_directory, "..", "data", "titanic", "Titanic_19120414_1313.csv"
)
# mangle the csv
with open(csv_path, "w") as f:
f.write("foo,bar\n1,2\n")
checkpoint_file_path: str = os.path.join(
context.root_directory,
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
"my_fancy_checkpoint.yml",
)
checkpoint_yaml_config: str = f"""
name: my_fancy_checkpoint
config_version: 1
class_name: Checkpoint
run_name_template: "%Y-%M-foo-bar-template-$VAR"
validations:
- batch_request:
datasource_name: my_datasource
data_connector_name: my_special_data_connector
data_asset_name: users
data_connector_query:
index: -1
expectation_suite_name: Titanic.warning
action_list:
- name: store_validation_result
action:
class_name: StoreValidationResultAction
- name: store_evaluation_params
action:
class_name: StoreEvaluationParametersAction
- name: update_data_docs
action:
class_name: UpdateDataDocsAction
evaluation_parameters:
param1: "$MY_PARAM"
param2: 1 + "$OLD_PARAM"
runtime_configuration:
result_format:
result_format: BASIC
partial_unexpected_count: 20
"""
config: dict = dict(yaml.load(checkpoint_yaml_config))
_write_checkpoint_dict_to_file(
config=config, checkpoint_file_path=checkpoint_file_path
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
f"--v3-api checkpoint script my_fancy_checkpoint",
catch_exceptions=False,
)
assert result.exit_code == 0
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
script_path: str = os.path.abspath(
os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"run_my_fancy_checkpoint.py",
)
)
assert os.path.isfile(script_path)
# In travis on osx, python may not execute from the build dir
cmdstring: str = f"python {script_path}"
if os.environ.get("TRAVIS_OS_NAME") == "osx":
build_dir: str = os.environ.get("TRAVIS_BUILD_DIR")
print(os.listdir(build_dir))
cmdstring = f"python3 {script_path}"
print("about to run: " + cmdstring)
print(os.curdir)
print(os.listdir(os.curdir))
print(os.listdir(os.path.abspath(os.path.join(context.root_directory, ".."))))
status: int
output: str
status, output = subprocess.getstatusoutput(cmdstring)
print(f"\n\nScript exited with code: {status} and output:\n{output}")
assert status == 1
assert (
'ExecutionEngineError: Error: The column "Name" in BatchData does not exist.'
in output
) | [
"def",
"test_checkpoint_script_happy_path_executable_failed_validation_due_to_bad_data_pandas",
"(",
"caplog",
",",
"monkeypatch",
",",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
",",
"titanic_expectation_suite",
",",
")",
":",
"monkeypatch",
".",
"setenv",
"(",
"\"VAR\"",
",",
"\"test\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"MY_PARAM\"",
",",
"\"1\"",
")",
"monkeypatch",
".",
"setenv",
"(",
"\"OLD_PARAM\"",
",",
"\"2\"",
")",
"context",
":",
"DataContext",
"=",
"titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled",
"context",
".",
"save_expectation_suite",
"(",
"expectation_suite",
"=",
"titanic_expectation_suite",
",",
"expectation_suite_name",
"=",
"\"Titanic.warning\"",
",",
")",
"assert",
"context",
".",
"list_expectation_suite_names",
"(",
")",
"==",
"[",
"\"Titanic.warning\"",
"]",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"csv_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"..\"",
",",
"\"data\"",
",",
"\"titanic\"",
",",
"\"Titanic_19120414_1313.csv\"",
")",
"# mangle the csv",
"with",
"open",
"(",
"csv_path",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"foo,bar\\n1,2\\n\"",
")",
"checkpoint_file_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"DataContextConfigDefaults",
".",
"CHECKPOINTS_BASE_DIRECTORY",
".",
"value",
",",
"\"my_fancy_checkpoint.yml\"",
",",
")",
"checkpoint_yaml_config",
":",
"str",
"=",
"f\"\"\"\n name: my_fancy_checkpoint\n config_version: 1\n class_name: Checkpoint\n run_name_template: \"%Y-%M-foo-bar-template-$VAR\"\n validations:\n - batch_request:\n datasource_name: my_datasource\n data_connector_name: my_special_data_connector\n data_asset_name: users\n data_connector_query:\n index: -1\n expectation_suite_name: Titanic.warning\n action_list:\n - name: store_validation_result\n action:\n class_name: StoreValidationResultAction\n - name: store_evaluation_params\n action:\n class_name: StoreEvaluationParametersAction\n - name: update_data_docs\n action:\n class_name: UpdateDataDocsAction\n evaluation_parameters:\n param1: \"$MY_PARAM\"\n param2: 1 + \"$OLD_PARAM\"\n runtime_configuration:\n result_format:\n result_format: BASIC\n partial_unexpected_count: 20\n \"\"\"",
"config",
":",
"dict",
"=",
"dict",
"(",
"yaml",
".",
"load",
"(",
"checkpoint_yaml_config",
")",
")",
"_write_checkpoint_dict_to_file",
"(",
"config",
"=",
"config",
",",
"checkpoint_file_path",
"=",
"checkpoint_file_path",
")",
"runner",
":",
"CliRunner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"result",
":",
"Result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"f\"--v3-api checkpoint script my_fancy_checkpoint\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert_no_logging_messages_or_tracebacks",
"(",
"my_caplog",
"=",
"caplog",
",",
"click_result",
"=",
"result",
",",
")",
"script_path",
":",
"str",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"\"run_my_fancy_checkpoint.py\"",
",",
")",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"script_path",
")",
"# In travis on osx, python may not execute from the build dir",
"cmdstring",
":",
"str",
"=",
"f\"python {script_path}\"",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_OS_NAME\"",
")",
"==",
"\"osx\"",
":",
"build_dir",
":",
"str",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"TRAVIS_BUILD_DIR\"",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"build_dir",
")",
")",
"cmdstring",
"=",
"f\"python3 {script_path}\"",
"print",
"(",
"\"about to run: \"",
"+",
"cmdstring",
")",
"print",
"(",
"os",
".",
"curdir",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"curdir",
")",
")",
"print",
"(",
"os",
".",
"listdir",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"..\"",
")",
")",
")",
")",
"status",
":",
"int",
"output",
":",
"str",
"status",
",",
"output",
"=",
"subprocess",
".",
"getstatusoutput",
"(",
"cmdstring",
")",
"print",
"(",
"f\"\\n\\nScript exited with code: {status} and output:\\n{output}\"",
")",
"assert",
"status",
"==",
"1",
"assert",
"(",
"'ExecutionEngineError: Error: The column \"Name\" in BatchData does not exist.'",
"in",
"output",
")"
] | [
2888,
0
] | [
3008,
5
] | python | en | ['en', 'error', 'th'] | False |
retry_on_exception | (tries=6, delay=1, backoff=2, max_delay=32) |
Decorator for implementing exponential backoff for retrying on failures.
tries: Max number of tries to execute the wrapped function before failing.
delay: Delay time in seconds before the FIRST retry.
backoff: Multiplier to extend the initial delay by for each retry.
max_delay: Max time in seconds to wait between retries.
|
Decorator for implementing exponential backoff for retrying on failures. | def retry_on_exception(tries=6, delay=1, backoff=2, max_delay=32):
'''
Decorator for implementing exponential backoff for retrying on failures.
tries: Max number of tries to execute the wrapped function before failing.
delay: Delay time in seconds before the FIRST retry.
backoff: Multiplier to extend the initial delay by for each retry.
max_delay: Max time in seconds to wait between retries.
'''
tries = math.floor(tries)
if tries < 1:
raise ValueError('"tries" must be greater than or equal to 1.')
if delay < 0:
raise ValueError('"delay" must be greater than or equal to 0.')
if backoff < 1:
raise ValueError('"backoff" must be greater than or equal to 1.')
if max_delay < delay:
raise ValueError('"max_delay" must be greater than or equal to delay.')
def decorated_function_with_retry(func):
@wraps(func)
def function_to_retry(*args, **kwargs):
local_tries, local_delay = tries, delay
while local_tries > 1:
try:
return func(*args, **kwargs)
except Exception as e:
if local_delay > max_delay:
local_delay = max_delay
logging.exception('%s: Retrying in %d seconds...'
% (str(e), local_delay))
time.sleep(local_delay)
local_tries -= 1
local_delay *= backoff
return func(*args, **kwargs)
return function_to_retry
return decorated_function_with_retry | [
"def",
"retry_on_exception",
"(",
"tries",
"=",
"6",
",",
"delay",
"=",
"1",
",",
"backoff",
"=",
"2",
",",
"max_delay",
"=",
"32",
")",
":",
"tries",
"=",
"math",
".",
"floor",
"(",
"tries",
")",
"if",
"tries",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'\"tries\" must be greater than or equal to 1.'",
")",
"if",
"delay",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'\"delay\" must be greater than or equal to 0.'",
")",
"if",
"backoff",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'\"backoff\" must be greater than or equal to 1.'",
")",
"if",
"max_delay",
"<",
"delay",
":",
"raise",
"ValueError",
"(",
"'\"max_delay\" must be greater than or equal to delay.'",
")",
"def",
"decorated_function_with_retry",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"function_to_retry",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"local_tries",
",",
"local_delay",
"=",
"tries",
",",
"delay",
"while",
"local_tries",
">",
"1",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"e",
":",
"if",
"local_delay",
">",
"max_delay",
":",
"local_delay",
"=",
"max_delay",
"logging",
".",
"exception",
"(",
"'%s: Retrying in %d seconds...'",
"%",
"(",
"str",
"(",
"e",
")",
",",
"local_delay",
")",
")",
"time",
".",
"sleep",
"(",
"local_delay",
")",
"local_tries",
"-=",
"1",
"local_delay",
"*=",
"backoff",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"function_to_retry",
"return",
"decorated_function_with_retry"
] | [
10,
0
] | [
46,
40
] | python | en | ['en', 'error', 'th'] | False |
rate_limited | (max_per_second) | This decorator limits how often a method can get called in a second.
If the limit is exceeded, the call will be held in a queue until
enough time has passed.
Useful when trying to avoid overloading a system with rapid calls. | This decorator limits how often a method can get called in a second.
If the limit is exceeded, the call will be held in a queue until
enough time has passed.
Useful when trying to avoid overloading a system with rapid calls. | def rate_limited(max_per_second):
""" This decorator limits how often a method can get called in a second.
If the limit is exceeded, the call will be held in a queue until
enough time has passed.
Useful when trying to avoid overloading a system with rapid calls. """
min_interval = 1.0 / float(max_per_second)
def decorate(func):
last_time_called = [0.0]
rate_lock = threading.Lock() # To support multi-threading
def rate_limited_function(*args, **kargs):
try:
rate_lock.acquire(True)
elapsed = None
if sys.version_info[0] >= 3:
elapsed = time.process_time() - last_time_called[0]
else:
elapsed = time.clock() - last_time_called[0]
wait_time_remaining = min_interval - elapsed
if wait_time_remaining > 0:
time.sleep(wait_time_remaining)
if sys.version_info[0] >= 3:
last_time_called[0] = time.process_time()
else:
last_time_called[0] = time.clock()
finally:
rate_lock.release()
return func(*args, **kargs)
return rate_limited_function
return decorate | [
"def",
"rate_limited",
"(",
"max_per_second",
")",
":",
"min_interval",
"=",
"1.0",
"/",
"float",
"(",
"max_per_second",
")",
"def",
"decorate",
"(",
"func",
")",
":",
"last_time_called",
"=",
"[",
"0.0",
"]",
"rate_lock",
"=",
"threading",
".",
"Lock",
"(",
")",
"# To support multi-threading",
"def",
"rate_limited_function",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"try",
":",
"rate_lock",
".",
"acquire",
"(",
"True",
")",
"elapsed",
"=",
"None",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"elapsed",
"=",
"time",
".",
"process_time",
"(",
")",
"-",
"last_time_called",
"[",
"0",
"]",
"else",
":",
"elapsed",
"=",
"time",
".",
"clock",
"(",
")",
"-",
"last_time_called",
"[",
"0",
"]",
"wait_time_remaining",
"=",
"min_interval",
"-",
"elapsed",
"if",
"wait_time_remaining",
">",
"0",
":",
"time",
".",
"sleep",
"(",
"wait_time_remaining",
")",
"if",
"sys",
".",
"version_info",
"[",
"0",
"]",
">=",
"3",
":",
"last_time_called",
"[",
"0",
"]",
"=",
"time",
".",
"process_time",
"(",
")",
"else",
":",
"last_time_called",
"[",
"0",
"]",
"=",
"time",
".",
"clock",
"(",
")",
"finally",
":",
"rate_lock",
".",
"release",
"(",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
"return",
"rate_limited_function",
"return",
"decorate"
] | [
49,
0
] | [
79,
19
] | python | en | ['en', 'en', 'en'] | True |
deprecated | (message=None) | This decorator marks methods as deprecated.
A warning is displayed if the method is called. | This decorator marks methods as deprecated.
A warning is displayed if the method is called. | def deprecated(message=None):
""" This decorator marks methods as deprecated.
A warning is displayed if the method is called. """
def decorated_method_to_deprecate(func):
if inspect.isclass(func):
# Handle a deprecated class differently from a deprecated method
msg = "Class {}() is DEPRECATED! *** ".format(func.__name__)
if message:
msg += "<> %s <>" % message
warnings.simplefilter('always', DeprecationWarning) # See Warnings
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # Set Default
return func
@wraps(func)
def new_func(*args, **kwargs):
msg = "Method {}() is DEPRECATED! *** ".format(func.__name__)
if message:
msg += "<> %s <>" % message
warnings.simplefilter('always', DeprecationWarning) # See Warnings
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # Set Default
return func(*args, **kwargs)
return new_func
return decorated_method_to_deprecate | [
"def",
"deprecated",
"(",
"message",
"=",
"None",
")",
":",
"def",
"decorated_method_to_deprecate",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"isclass",
"(",
"func",
")",
":",
"# Handle a deprecated class differently from a deprecated method",
"msg",
"=",
"\"Class {}() is DEPRECATED! *** \"",
".",
"format",
"(",
"func",
".",
"__name__",
")",
"if",
"message",
":",
"msg",
"+=",
"\"<> %s <>\"",
"%",
"message",
"warnings",
".",
"simplefilter",
"(",
"'always'",
",",
"DeprecationWarning",
")",
"# See Warnings",
"warnings",
".",
"warn",
"(",
"msg",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"warnings",
".",
"simplefilter",
"(",
"'default'",
",",
"DeprecationWarning",
")",
"# Set Default",
"return",
"func",
"@",
"wraps",
"(",
"func",
")",
"def",
"new_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"msg",
"=",
"\"Method {}() is DEPRECATED! *** \"",
".",
"format",
"(",
"func",
".",
"__name__",
")",
"if",
"message",
":",
"msg",
"+=",
"\"<> %s <>\"",
"%",
"message",
"warnings",
".",
"simplefilter",
"(",
"'always'",
",",
"DeprecationWarning",
")",
"# See Warnings",
"warnings",
".",
"warn",
"(",
"msg",
",",
"category",
"=",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"warnings",
".",
"simplefilter",
"(",
"'default'",
",",
"DeprecationWarning",
")",
"# Set Default",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_func",
"return",
"decorated_method_to_deprecate"
] | [
82,
0
] | [
107,
40
] | python | en | ['en', 'en', 'en'] | True |
benchmark_querying | (n_docs_options,
retriever_doc_stores,
data_dir,
data_s3_url,
filename_gold,
filename_negative,
n_queries,
embeddings_filenames,
embeddings_dir,
update_json,
save_markdown,
**kwargs) | Benchmark the time it takes to perform querying. Doc embeddings are loaded from file. | Benchmark the time it takes to perform querying. Doc embeddings are loaded from file. | def benchmark_querying(n_docs_options,
retriever_doc_stores,
data_dir,
data_s3_url,
filename_gold,
filename_negative,
n_queries,
embeddings_filenames,
embeddings_dir,
update_json,
save_markdown,
**kwargs):
""" Benchmark the time it takes to perform querying. Doc embeddings are loaded from file."""
retriever_results = []
for n_docs in n_docs_options:
for retriever_name, doc_store_name in retriever_doc_stores:
try:
logger.info(f"##### Start querying run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### ")
if retriever_name == "elastic":
similarity = "cosine"
else:
similarity = "dot_product"
doc_store = get_document_store(doc_store_name, similarity=similarity)
retriever = get_retriever(retriever_name, doc_store)
add_precomputed = retriever_name in ["dpr"]
# For DPR, precomputed embeddings are loaded from file
docs, labels = prepare_data(data_dir=data_dir,
filename_gold=filename_gold,
filename_negative=filename_negative,
data_s3_url=data_s3_url,
embeddings_filenames=embeddings_filenames,
embeddings_dir=embeddings_dir,
n_docs=n_docs,
n_queries=n_queries,
add_precomputed=add_precomputed)
logger.info("Start indexing...")
index_to_doc_store(doc_store, docs, retriever, labels)
logger.info("Start queries...")
raw_results = retriever.eval()
results = {
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"n_queries": raw_results["n_questions"],
"retrieve_time": raw_results["retrieve_time"],
"queries_per_second": raw_results["n_questions"] / raw_results["retrieve_time"],
"seconds_per_query": raw_results["retrieve_time"]/ raw_results["n_questions"],
"recall": raw_results["recall"] * 100,
"map": raw_results["map"] * 100,
"top_k": raw_results["top_k"],
"date_time": datetime.datetime.now(),
"error": None
}
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
time.sleep(5)
del doc_store
del retriever
except Exception:
tb = traceback.format_exc()
logging.error(f"##### The following Error was raised while running querying run: {retriever_name}, {doc_store_name}, {n_docs} docs #####")
logging.error(tb)
results = {
"retriever": retriever_name,
"doc_store": doc_store_name,
"n_docs": n_docs,
"n_queries": 0,
"retrieve_time": 0.,
"queries_per_second": 0.,
"seconds_per_query": 0.,
"recall": 0.,
"map": 0.,
"top_k": 0,
"date_time": datetime.datetime.now(),
"error": str(tb)
}
logger.info("Deleting all docs from this run ...")
if isinstance(doc_store, FAISSDocumentStore):
doc_store.session.close()
else:
doc_store.delete_all_documents(index=doc_index)
doc_store.delete_all_documents(index=label_index)
time.sleep(5)
del doc_store
del retriever
logger.info(results)
retriever_results.append(results)
retriever_df = pd.DataFrame.from_records(retriever_results)
retriever_df = retriever_df.sort_values(by="retriever").sort_values(by="doc_store")
retriever_df.to_csv(query_results_file)
if save_markdown:
md_file = query_results_file.replace(".csv", ".md")
with open(md_file, "w") as f:
f.write(str(retriever_df.to_markdown()))
if update_json:
populate_retriever_json() | [
"def",
"benchmark_querying",
"(",
"n_docs_options",
",",
"retriever_doc_stores",
",",
"data_dir",
",",
"data_s3_url",
",",
"filename_gold",
",",
"filename_negative",
",",
"n_queries",
",",
"embeddings_filenames",
",",
"embeddings_dir",
",",
"update_json",
",",
"save_markdown",
",",
"*",
"*",
"kwargs",
")",
":",
"retriever_results",
"=",
"[",
"]",
"for",
"n_docs",
"in",
"n_docs_options",
":",
"for",
"retriever_name",
",",
"doc_store_name",
"in",
"retriever_doc_stores",
":",
"try",
":",
"logger",
".",
"info",
"(",
"f\"##### Start querying run: {retriever_name}, {doc_store_name}, {n_docs} docs ##### \"",
")",
"if",
"retriever_name",
"==",
"\"elastic\"",
":",
"similarity",
"=",
"\"cosine\"",
"else",
":",
"similarity",
"=",
"\"dot_product\"",
"doc_store",
"=",
"get_document_store",
"(",
"doc_store_name",
",",
"similarity",
"=",
"similarity",
")",
"retriever",
"=",
"get_retriever",
"(",
"retriever_name",
",",
"doc_store",
")",
"add_precomputed",
"=",
"retriever_name",
"in",
"[",
"\"dpr\"",
"]",
"# For DPR, precomputed embeddings are loaded from file",
"docs",
",",
"labels",
"=",
"prepare_data",
"(",
"data_dir",
"=",
"data_dir",
",",
"filename_gold",
"=",
"filename_gold",
",",
"filename_negative",
"=",
"filename_negative",
",",
"data_s3_url",
"=",
"data_s3_url",
",",
"embeddings_filenames",
"=",
"embeddings_filenames",
",",
"embeddings_dir",
"=",
"embeddings_dir",
",",
"n_docs",
"=",
"n_docs",
",",
"n_queries",
"=",
"n_queries",
",",
"add_precomputed",
"=",
"add_precomputed",
")",
"logger",
".",
"info",
"(",
"\"Start indexing...\"",
")",
"index_to_doc_store",
"(",
"doc_store",
",",
"docs",
",",
"retriever",
",",
"labels",
")",
"logger",
".",
"info",
"(",
"\"Start queries...\"",
")",
"raw_results",
"=",
"retriever",
".",
"eval",
"(",
")",
"results",
"=",
"{",
"\"retriever\"",
":",
"retriever_name",
",",
"\"doc_store\"",
":",
"doc_store_name",
",",
"\"n_docs\"",
":",
"n_docs",
",",
"\"n_queries\"",
":",
"raw_results",
"[",
"\"n_questions\"",
"]",
",",
"\"retrieve_time\"",
":",
"raw_results",
"[",
"\"retrieve_time\"",
"]",
",",
"\"queries_per_second\"",
":",
"raw_results",
"[",
"\"n_questions\"",
"]",
"/",
"raw_results",
"[",
"\"retrieve_time\"",
"]",
",",
"\"seconds_per_query\"",
":",
"raw_results",
"[",
"\"retrieve_time\"",
"]",
"/",
"raw_results",
"[",
"\"n_questions\"",
"]",
",",
"\"recall\"",
":",
"raw_results",
"[",
"\"recall\"",
"]",
"*",
"100",
",",
"\"map\"",
":",
"raw_results",
"[",
"\"map\"",
"]",
"*",
"100",
",",
"\"top_k\"",
":",
"raw_results",
"[",
"\"top_k\"",
"]",
",",
"\"date_time\"",
":",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"\"error\"",
":",
"None",
"}",
"logger",
".",
"info",
"(",
"\"Deleting all docs from this run ...\"",
")",
"if",
"isinstance",
"(",
"doc_store",
",",
"FAISSDocumentStore",
")",
":",
"doc_store",
".",
"session",
".",
"close",
"(",
")",
"else",
":",
"doc_store",
".",
"delete_all_documents",
"(",
"index",
"=",
"doc_index",
")",
"doc_store",
".",
"delete_all_documents",
"(",
"index",
"=",
"label_index",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"del",
"doc_store",
"del",
"retriever",
"except",
"Exception",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"logging",
".",
"error",
"(",
"f\"##### The following Error was raised while running querying run: {retriever_name}, {doc_store_name}, {n_docs} docs #####\"",
")",
"logging",
".",
"error",
"(",
"tb",
")",
"results",
"=",
"{",
"\"retriever\"",
":",
"retriever_name",
",",
"\"doc_store\"",
":",
"doc_store_name",
",",
"\"n_docs\"",
":",
"n_docs",
",",
"\"n_queries\"",
":",
"0",
",",
"\"retrieve_time\"",
":",
"0.",
",",
"\"queries_per_second\"",
":",
"0.",
",",
"\"seconds_per_query\"",
":",
"0.",
",",
"\"recall\"",
":",
"0.",
",",
"\"map\"",
":",
"0.",
",",
"\"top_k\"",
":",
"0",
",",
"\"date_time\"",
":",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"\"error\"",
":",
"str",
"(",
"tb",
")",
"}",
"logger",
".",
"info",
"(",
"\"Deleting all docs from this run ...\"",
")",
"if",
"isinstance",
"(",
"doc_store",
",",
"FAISSDocumentStore",
")",
":",
"doc_store",
".",
"session",
".",
"close",
"(",
")",
"else",
":",
"doc_store",
".",
"delete_all_documents",
"(",
"index",
"=",
"doc_index",
")",
"doc_store",
".",
"delete_all_documents",
"(",
"index",
"=",
"label_index",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"del",
"doc_store",
"del",
"retriever",
"logger",
".",
"info",
"(",
"results",
")",
"retriever_results",
".",
"append",
"(",
"results",
")",
"retriever_df",
"=",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"retriever_results",
")",
"retriever_df",
"=",
"retriever_df",
".",
"sort_values",
"(",
"by",
"=",
"\"retriever\"",
")",
".",
"sort_values",
"(",
"by",
"=",
"\"doc_store\"",
")",
"retriever_df",
".",
"to_csv",
"(",
"query_results_file",
")",
"if",
"save_markdown",
":",
"md_file",
"=",
"query_results_file",
".",
"replace",
"(",
"\".csv\"",
",",
"\".md\"",
")",
"with",
"open",
"(",
"md_file",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"str",
"(",
"retriever_df",
".",
"to_markdown",
"(",
")",
")",
")",
"if",
"update_json",
":",
"populate_retriever_json",
"(",
")"
] | [
117,
0
] | [
220,
33
] | python | en | ['en', 'en', 'en'] | True |
prepare_data | (data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, n_docs=None, n_queries=None, add_precomputed=False) |
filename_gold points to a squad format file.
filename_negative points to a csv file where the first column is doc_id and second is document text.
If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document
|
filename_gold points to a squad format file.
filename_negative points to a csv file where the first column is doc_id and second is document text.
If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document
| def prepare_data(data_dir, filename_gold, filename_negative, data_s3_url, embeddings_filenames, embeddings_dir, n_docs=None, n_queries=None, add_precomputed=False):
"""
filename_gold points to a squad format file.
filename_negative points to a csv file where the first column is doc_id and second is document text.
If add_precomputed is True, this fn will look in the embeddings files for precomputed embeddings to add to each Document
"""
logging.getLogger("farm").setLevel(logging.INFO)
download_from_s3(data_s3_url + filename_gold, cache_dir=data_dir)
download_from_s3(data_s3_url + filename_negative, cache_dir=data_dir)
if add_precomputed:
for embedding_filename in embeddings_filenames:
download_from_s3(data_s3_url + str(embeddings_dir) + embedding_filename, cache_dir=data_dir)
logging.getLogger("farm").setLevel(logging.WARN)
gold_docs, labels = eval_data_from_json(data_dir + filename_gold)
# Reduce number of docs
gold_docs = gold_docs[:n_docs]
# Remove labels whose gold docs have been removed
doc_ids = [x.id for x in gold_docs]
labels = [x for x in labels if x.document_id in doc_ids]
# Filter labels down to n_queries
selected_queries = list(set(f"{x.document_id} | {x.question}" for x in labels))
selected_queries = selected_queries[:n_queries]
labels = [x for x in labels if f"{x.document_id} | {x.question}" in selected_queries]
n_neg_docs = max(0, n_docs - len(gold_docs))
neg_docs = prepare_negative_passages(data_dir, filename_negative, n_neg_docs)
docs = gold_docs + neg_docs
if add_precomputed:
docs = add_precomputed_embeddings(data_dir + embeddings_dir, embeddings_filenames, docs)
return docs, labels | [
"def",
"prepare_data",
"(",
"data_dir",
",",
"filename_gold",
",",
"filename_negative",
",",
"data_s3_url",
",",
"embeddings_filenames",
",",
"embeddings_dir",
",",
"n_docs",
"=",
"None",
",",
"n_queries",
"=",
"None",
",",
"add_precomputed",
"=",
"False",
")",
":",
"logging",
".",
"getLogger",
"(",
"\"farm\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"download_from_s3",
"(",
"data_s3_url",
"+",
"filename_gold",
",",
"cache_dir",
"=",
"data_dir",
")",
"download_from_s3",
"(",
"data_s3_url",
"+",
"filename_negative",
",",
"cache_dir",
"=",
"data_dir",
")",
"if",
"add_precomputed",
":",
"for",
"embedding_filename",
"in",
"embeddings_filenames",
":",
"download_from_s3",
"(",
"data_s3_url",
"+",
"str",
"(",
"embeddings_dir",
")",
"+",
"embedding_filename",
",",
"cache_dir",
"=",
"data_dir",
")",
"logging",
".",
"getLogger",
"(",
"\"farm\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"WARN",
")",
"gold_docs",
",",
"labels",
"=",
"eval_data_from_json",
"(",
"data_dir",
"+",
"filename_gold",
")",
"# Reduce number of docs",
"gold_docs",
"=",
"gold_docs",
"[",
":",
"n_docs",
"]",
"# Remove labels whose gold docs have been removed",
"doc_ids",
"=",
"[",
"x",
".",
"id",
"for",
"x",
"in",
"gold_docs",
"]",
"labels",
"=",
"[",
"x",
"for",
"x",
"in",
"labels",
"if",
"x",
".",
"document_id",
"in",
"doc_ids",
"]",
"# Filter labels down to n_queries",
"selected_queries",
"=",
"list",
"(",
"set",
"(",
"f\"{x.document_id} | {x.question}\"",
"for",
"x",
"in",
"labels",
")",
")",
"selected_queries",
"=",
"selected_queries",
"[",
":",
"n_queries",
"]",
"labels",
"=",
"[",
"x",
"for",
"x",
"in",
"labels",
"if",
"f\"{x.document_id} | {x.question}\"",
"in",
"selected_queries",
"]",
"n_neg_docs",
"=",
"max",
"(",
"0",
",",
"n_docs",
"-",
"len",
"(",
"gold_docs",
")",
")",
"neg_docs",
"=",
"prepare_negative_passages",
"(",
"data_dir",
",",
"filename_negative",
",",
"n_neg_docs",
")",
"docs",
"=",
"gold_docs",
"+",
"neg_docs",
"if",
"add_precomputed",
":",
"docs",
"=",
"add_precomputed_embeddings",
"(",
"data_dir",
"+",
"embeddings_dir",
",",
"embeddings_filenames",
",",
"docs",
")",
"return",
"docs",
",",
"labels"
] | [
256,
0
] | [
292,
23
] | python | en | ['en', 'error', 'th'] | False |
recipe_image_file_path | (instance, filename) | Generate file path for new recipe image | Generate file path for new recipe image | def recipe_image_file_path(instance, filename):
"""Generate file path for new recipe image"""
ext = filename.split('.')[-1]
filename = f'{uuid.uuid4()}.{ext}'
return os.path.join('upload/recipe/', filename) | [
"def",
"recipe_image_file_path",
"(",
"instance",
",",
"filename",
")",
":",
"ext",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"filename",
"=",
"f'{uuid.uuid4()}.{ext}'",
"return",
"os",
".",
"path",
".",
"join",
"(",
"'upload/recipe/'",
",",
"filename",
")"
] | [
9,
0
] | [
14,
51
] | python | en | ['en', 'en', 'en'] | True |
UserManager.create_user | (self, email, password=None, **extra_fields) | Creates and saves a new user | Creates and saves a new user | def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user | [
"def",
"create_user",
"(",
"self",
",",
"email",
",",
"password",
"=",
"None",
",",
"*",
"*",
"extra_fields",
")",
":",
"if",
"not",
"email",
":",
"raise",
"ValueError",
"(",
"'Users must have an email address'",
")",
"user",
"=",
"self",
".",
"model",
"(",
"email",
"=",
"self",
".",
"normalize_email",
"(",
"email",
")",
",",
"*",
"*",
"extra_fields",
")",
"user",
".",
"set_password",
"(",
"password",
")",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"return",
"user"
] | [
19,
4
] | [
27,
19
] | python | en | ['en', 'en', 'en'] | True |
UserManager.create_superuser | (self, email, password) | Creates and saves a new super user | Creates and saves a new super user | def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user | [
"def",
"create_superuser",
"(",
"self",
",",
"email",
",",
"password",
")",
":",
"user",
"=",
"self",
".",
"create_user",
"(",
"email",
",",
"password",
")",
"user",
".",
"is_staff",
"=",
"True",
"user",
".",
"is_superuser",
"=",
"True",
"user",
".",
"save",
"(",
"using",
"=",
"self",
".",
"_db",
")",
"return",
"user"
] | [
29,
4
] | [
36,
19
] | python | en | ['en', 'en', 'en'] | True |
ExpectTableColumnsToMatchSet.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
# Setting up a configuration
super().validate_configuration(configuration)
# Ensuring that a proper value has been provided
try:
assert "column_set" in configuration.kwargs, "column_set is required"
assert (
isinstance(configuration.kwargs["column_set"], (list, set, dict))
or configuration.kwargs["column_set"] is None
), "column_set must be a list, set, or None"
if isinstance(configuration.kwargs["column_set"], dict):
assert (
"$PARAMETER" in configuration.kwargs["column_set"]
), 'Evaluation Parameter dict for column_set kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"# Setting up a configuration",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"# Ensuring that a proper value has been provided",
"try",
":",
"assert",
"\"column_set\"",
"in",
"configuration",
".",
"kwargs",
",",
"\"column_set is required\"",
"assert",
"(",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"column_set\"",
"]",
",",
"(",
"list",
",",
"set",
",",
"dict",
")",
")",
"or",
"configuration",
".",
"kwargs",
"[",
"\"column_set\"",
"]",
"is",
"None",
")",
",",
"\"column_set must be a list, set, or None\"",
"if",
"isinstance",
"(",
"configuration",
".",
"kwargs",
"[",
"\"column_set\"",
"]",
",",
"dict",
")",
":",
"assert",
"(",
"\"$PARAMETER\"",
"in",
"configuration",
".",
"kwargs",
"[",
"\"column_set\"",
"]",
")",
",",
"'Evaluation Parameter dict for column_set kwarg must have \"$PARAMETER\" key.'",
"except",
"AssertionError",
"as",
"e",
":",
"raise",
"InvalidExpectationConfigurationError",
"(",
"str",
"(",
"e",
")",
")",
"return",
"True"
] | [
70,
4
] | [
98,
19
] | python | en | ['en', 'error', 'th'] | False |
UnboundedQueue.qsize | (self) | Returns the number of items currently in the queue. | Returns the number of items currently in the queue. | def qsize(self):
"""Returns the number of items currently in the queue."""
return len(self._data) | [
"def",
"qsize",
"(",
"self",
")",
":",
"return",
"len",
"(",
"self",
".",
"_data",
")"
] | [
58,
4
] | [
60,
30
] | python | en | ['en', 'en', 'en'] | True |
UnboundedQueue.empty | (self) | Returns True if the queue is empty, False otherwise.
There is some subtlety to interpreting this method's return value: see
`issue #63 <https://github.com/python-trio/trio/issues/63>`__.
| Returns True if the queue is empty, False otherwise. | def empty(self):
"""Returns True if the queue is empty, False otherwise.
There is some subtlety to interpreting this method's return value: see
`issue #63 <https://github.com/python-trio/trio/issues/63>`__.
"""
return not self._data | [
"def",
"empty",
"(",
"self",
")",
":",
"return",
"not",
"self",
".",
"_data"
] | [
62,
4
] | [
69,
29
] | python | en | ['en', 'en', 'en'] | True |
UnboundedQueue.put_nowait | (self, obj) | Put an object into the queue, without blocking.
This always succeeds, because the queue is unbounded. We don't provide
a blocking ``put`` method, because it would never need to block.
Args:
obj (object): The object to enqueue.
| Put an object into the queue, without blocking. | def put_nowait(self, obj):
"""Put an object into the queue, without blocking.
This always succeeds, because the queue is unbounded. We don't provide
a blocking ``put`` method, because it would never need to block.
Args:
obj (object): The object to enqueue.
"""
if not self._data:
assert not self._can_get
if self._lot:
self._lot.unpark(count=1)
else:
self._can_get = True
self._data.append(obj) | [
"def",
"put_nowait",
"(",
"self",
",",
"obj",
")",
":",
"if",
"not",
"self",
".",
"_data",
":",
"assert",
"not",
"self",
".",
"_can_get",
"if",
"self",
".",
"_lot",
":",
"self",
".",
"_lot",
".",
"unpark",
"(",
"count",
"=",
"1",
")",
"else",
":",
"self",
".",
"_can_get",
"=",
"True",
"self",
".",
"_data",
".",
"append",
"(",
"obj",
")"
] | [
72,
4
] | [
88,
30
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.