Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
EasyProcess.is_alive | (self) |
poll process using :meth:`subprocess.Popen.poll`
:rtype: bool
|
poll process using :meth:`subprocess.Popen.poll` | def is_alive(self):
'''
poll process using :meth:`subprocess.Popen.poll`
:rtype: bool
'''
if self.popen:
return self.popen.poll() is None
else:
return False | [
"def",
"is_alive",
"(",
"self",
")",
":",
"if",
"self",
".",
"popen",
":",
"return",
"self",
".",
"popen",
".",
"poll",
"(",
")",
"is",
"None",
"else",
":",
"return",
"False"
] | [
235,
4
] | [
244,
24
] | python | en | ['en', 'error', 'th'] | False |
EasyProcess.wait | (self, timeout=None) | Wait for command to complete.
Timeout:
- discussion:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self
| Wait for command to complete. | def wait(self, timeout=None):
"""Wait for command to complete.
Timeout:
- discussion:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout
- implementation: threading
:rtype: self
"""
if timeout is not None:
if not self._thread:
self._thread = threading.Thread(target=self._wait4process)
self._thread.daemon = 1
self._thread.start()
if self._thread:
self._thread.join(timeout=timeout)
self.timeout_happened = (
self.timeout_happened or self._thread.isAlive())
else:
# no timeout and no existing thread
self._wait4process()
return self | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"not",
"None",
":",
"if",
"not",
"self",
".",
"_thread",
":",
"self",
".",
"_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_wait4process",
")",
"self",
".",
"_thread",
".",
"daemon",
"=",
"1",
"self",
".",
"_thread",
".",
"start",
"(",
")",
"if",
"self",
".",
"_thread",
":",
"self",
".",
"_thread",
".",
"join",
"(",
"timeout",
"=",
"timeout",
")",
"self",
".",
"timeout_happened",
"=",
"(",
"self",
".",
"timeout_happened",
"or",
"self",
".",
"_thread",
".",
"isAlive",
"(",
")",
")",
"else",
":",
"# no timeout and no existing thread",
"self",
".",
"_wait4process",
"(",
")",
"return",
"self"
] | [
246,
4
] | [
271,
19
] | python | en | ['en', 'en', 'en'] | True |
EasyProcess.stop | (self) | Kill process and wait for command to complete.
same as:
1. :meth:`sendstop`
2. :meth:`wait`
:rtype: self
| Kill process and wait for command to complete. | def stop(self):
"""Kill process and wait for command to complete.
same as:
1. :meth:`sendstop`
2. :meth:`wait`
:rtype: self
"""
return self.sendstop().wait() | [
"def",
"stop",
"(",
"self",
")",
":",
"return",
"self",
".",
"sendstop",
"(",
")",
".",
"wait",
"(",
")"
] | [
325,
4
] | [
335,
37
] | python | en | ['en', 'en', 'en'] | True |
EasyProcess.sendstop | (self) |
Kill process (:meth:`subprocess.Popen.terminate`).
Do not wait for command to complete.
:rtype: self
|
Kill process (:meth:`subprocess.Popen.terminate`).
Do not wait for command to complete. | def sendstop(self):
'''
Kill process (:meth:`subprocess.Popen.terminate`).
Do not wait for command to complete.
:rtype: self
'''
if not self.is_started:
raise EasyProcessError(self, 'process was not started!')
log.debug('stopping process (pid=%s cmd="%s")', self.pid, self.cmd)
if self.popen:
if self.is_alive():
log.debug('process is active -> sending SIGTERM')
try:
try:
self.popen.terminate()
except AttributeError:
os.kill(self.popen.pid, signal.SIGKILL)
except OSError as oserror:
log.debug('exception in terminate:%s', oserror)
else:
log.debug('process was already stopped')
else:
log.debug('process was not started')
return self | [
"def",
"sendstop",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_started",
":",
"raise",
"EasyProcessError",
"(",
"self",
",",
"'process was not started!'",
")",
"log",
".",
"debug",
"(",
"'stopping process (pid=%s cmd=\"%s\")'",
",",
"self",
".",
"pid",
",",
"self",
".",
"cmd",
")",
"if",
"self",
".",
"popen",
":",
"if",
"self",
".",
"is_alive",
"(",
")",
":",
"log",
".",
"debug",
"(",
"'process is active -> sending SIGTERM'",
")",
"try",
":",
"try",
":",
"self",
".",
"popen",
".",
"terminate",
"(",
")",
"except",
"AttributeError",
":",
"os",
".",
"kill",
"(",
"self",
".",
"popen",
".",
"pid",
",",
"signal",
".",
"SIGKILL",
")",
"except",
"OSError",
"as",
"oserror",
":",
"log",
".",
"debug",
"(",
"'exception in terminate:%s'",
",",
"oserror",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'process was already stopped'",
")",
"else",
":",
"log",
".",
"debug",
"(",
"'process was not started'",
")",
"return",
"self"
] | [
337,
4
] | [
365,
19
] | python | en | ['en', 'error', 'th'] | False |
EasyProcess.sleep | (self, sec) |
sleeping (same as :func:`time.sleep`)
:rtype: self
|
sleeping (same as :func:`time.sleep`) | def sleep(self, sec):
'''
sleeping (same as :func:`time.sleep`)
:rtype: self
'''
time.sleep(sec)
return self | [
"def",
"sleep",
"(",
"self",
",",
"sec",
")",
":",
"time",
".",
"sleep",
"(",
"sec",
")",
"return",
"self"
] | [
367,
4
] | [
375,
19
] | python | en | ['en', 'error', 'th'] | False |
EasyProcess.wrap | (self, func, delay=0) |
returns a function which:
1. start process
2. call func, save result
3. stop process
4. returns result
similar to :keyword:`with` statement
:rtype:
|
returns a function which:
1. start process
2. call func, save result
3. stop process
4. returns result | def wrap(self, func, delay=0):
'''
returns a function which:
1. start process
2. call func, save result
3. stop process
4. returns result
similar to :keyword:`with` statement
:rtype:
'''
def wrapped():
self.start()
if delay:
self.sleep(delay)
x = None
try:
x = func()
except OSError as oserror:
log.debug('OSError exception:%s', oserror)
self.oserror = oserror
raise EasyProcessError(self, 'wrap error!')
finally:
self.stop()
return x
return wrapped | [
"def",
"wrap",
"(",
"self",
",",
"func",
",",
"delay",
"=",
"0",
")",
":",
"def",
"wrapped",
"(",
")",
":",
"self",
".",
"start",
"(",
")",
"if",
"delay",
":",
"self",
".",
"sleep",
"(",
"delay",
")",
"x",
"=",
"None",
"try",
":",
"x",
"=",
"func",
"(",
")",
"except",
"OSError",
"as",
"oserror",
":",
"log",
".",
"debug",
"(",
"'OSError exception:%s'",
",",
"oserror",
")",
"self",
".",
"oserror",
"=",
"oserror",
"raise",
"EasyProcessError",
"(",
"self",
",",
"'wrap error!'",
")",
"finally",
":",
"self",
".",
"stop",
"(",
")",
"return",
"x",
"return",
"wrapped"
] | [
377,
4
] | [
403,
22
] | python | en | ['en', 'error', 'th'] | False |
EasyProcess.__enter__ | (self) | used by the :keyword:`with` statement | used by the :keyword:`with` statement | def __enter__(self):
'''used by the :keyword:`with` statement'''
self.start()
return self | [
"def",
"__enter__",
"(",
"self",
")",
":",
"self",
".",
"start",
"(",
")",
"return",
"self"
] | [
405,
4
] | [
408,
19
] | python | en | ['en', 'en', 'en'] | True |
EasyProcess.__exit__ | (self, *exc_info) | used by the :keyword:`with` statement | used by the :keyword:`with` statement | def __exit__(self, *exc_info):
'''used by the :keyword:`with` statement'''
self.stop() | [
"def",
"__exit__",
"(",
"self",
",",
"*",
"exc_info",
")",
":",
"self",
".",
"stop",
"(",
")"
] | [
410,
4
] | [
412,
19
] | python | en | ['en', 'en', 'en'] | True |
get_tagname_or_hash | () | return tagname if exists else hash | return tagname if exists else hash | def get_tagname_or_hash():
"""return tagname if exists else hash"""
# get hash
hash_cmd = ['git', 'rev-parse', '--short', 'HEAD']
hash_ = check_output(hash_cmd).decode('utf-8').strip()
# get tagname
tags_cmd = ['git', 'for-each-ref', '--points-at=HEAD', '--count=2', '--sort=-version:refname', '--format=%(refname:short)', 'refs/tags']
tags = check_output(tags_cmd).decode('utf-8').split()
if tags:
return tags[0] + ('+' if len(tags) > 1 else '')
elif hash_:
return hash_
return None | [
"def",
"get_tagname_or_hash",
"(",
")",
":",
"# get hash",
"hash_cmd",
"=",
"[",
"'git'",
",",
"'rev-parse'",
",",
"'--short'",
",",
"'HEAD'",
"]",
"hash_",
"=",
"check_output",
"(",
"hash_cmd",
")",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"strip",
"(",
")",
"# get tagname",
"tags_cmd",
"=",
"[",
"'git'",
",",
"'for-each-ref'",
",",
"'--points-at=HEAD'",
",",
"'--count=2'",
",",
"'--sort=-version:refname'",
",",
"'--format=%(refname:short)'",
",",
"'refs/tags'",
"]",
"tags",
"=",
"check_output",
"(",
"tags_cmd",
")",
".",
"decode",
"(",
"'utf-8'",
")",
".",
"split",
"(",
")",
"if",
"tags",
":",
"return",
"tags",
"[",
"0",
"]",
"+",
"(",
"'+'",
"if",
"len",
"(",
"tags",
")",
">",
"1",
"else",
"''",
")",
"elif",
"hash_",
":",
"return",
"hash_",
"return",
"None"
] | [
9,
0
] | [
23,
15
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine.dataframe | (self) | If a batch has been loaded, returns a Spark Dataframe containing the data within the loaded batch | If a batch has been loaded, returns a Spark Dataframe containing the data within the loaded batch | def dataframe(self):
"""If a batch has been loaded, returns a Spark Dataframe containing the data within the loaded batch"""
if not self.active_batch_data:
raise ValueError(
"Batch has not been loaded - please run load_batch() to load a batch."
)
return self.active_batch_data.dataframe | [
"def",
"dataframe",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"active_batch_data",
":",
"raise",
"ValueError",
"(",
"\"Batch has not been loaded - please run load_batch() to load a batch.\"",
")",
"return",
"self",
".",
"active_batch_data",
".",
"dataframe"
] | [
180,
4
] | [
187,
47
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine.guess_reader_method_from_path | (path) | Based on a given filepath, decides a reader method. Currently supports tsv, csv, and parquet. If none of these
file extensions are used, returns BatchKwargsError stating that it is unable to determine the current path.
Args:
path - A given file path
Returns:
A dictionary entry of format {'reader_method': reader_method}
| Based on a given filepath, decides a reader method. Currently supports tsv, csv, and parquet. If none of these
file extensions are used, returns BatchKwargsError stating that it is unable to determine the current path. | def guess_reader_method_from_path(path):
"""Based on a given filepath, decides a reader method. Currently supports tsv, csv, and parquet. If none of these
file extensions are used, returns BatchKwargsError stating that it is unable to determine the current path.
Args:
path - A given file path
Returns:
A dictionary entry of format {'reader_method': reader_method}
"""
if path.endswith(".csv") or path.endswith(".tsv"):
return "csv"
elif path.endswith(".parquet"):
return "parquet"
raise BatchKwargsError(
"Unable to determine reader method from path: %s" % path, {"path": path}
) | [
"def",
"guess_reader_method_from_path",
"(",
"path",
")",
":",
"if",
"path",
".",
"endswith",
"(",
"\".csv\"",
")",
"or",
"path",
".",
"endswith",
"(",
"\".tsv\"",
")",
":",
"return",
"\"csv\"",
"elif",
"path",
".",
"endswith",
"(",
"\".parquet\"",
")",
":",
"return",
"\"parquet\"",
"raise",
"BatchKwargsError",
"(",
"\"Unable to determine reader method from path: %s\"",
"%",
"path",
",",
"{",
"\"path\"",
":",
"path",
"}",
")"
] | [
265,
4
] | [
283,
9
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._get_reader_fn | (self, reader, reader_method=None, path=None) | Static helper for providing reader_fn
Args:
reader: the base spark reader to use; this should have had reader_options applied already
reader_method: the name of the reader_method to use, if specified
path (str): the path to use to guess reader_method if it was not specified
Returns:
ReaderMethod to use for the filepath
| Static helper for providing reader_fn | def _get_reader_fn(self, reader, reader_method=None, path=None):
"""Static helper for providing reader_fn
Args:
reader: the base spark reader to use; this should have had reader_options applied already
reader_method: the name of the reader_method to use, if specified
path (str): the path to use to guess reader_method if it was not specified
Returns:
ReaderMethod to use for the filepath
"""
if reader_method is None and path is None:
raise BatchKwargsError(
"Unable to determine spark reader function without reader_method or path.",
{"reader_method": reader_method},
)
if reader_method is None:
reader_method = self.guess_reader_method_from_path(path=path)
reader_method_op: str = reader_method.lower()
try:
if reader_method_op == "delta":
return reader.format(reader_method_op).load
return getattr(reader, reader_method_op)
except AttributeError:
raise BatchKwargsError(
"Unable to find reader_method %s in spark." % reader_method,
{"reader_method": reader_method},
) | [
"def",
"_get_reader_fn",
"(",
"self",
",",
"reader",
",",
"reader_method",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"if",
"reader_method",
"is",
"None",
"and",
"path",
"is",
"None",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to determine spark reader function without reader_method or path.\"",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")",
"if",
"reader_method",
"is",
"None",
":",
"reader_method",
"=",
"self",
".",
"guess_reader_method_from_path",
"(",
"path",
"=",
"path",
")",
"reader_method_op",
":",
"str",
"=",
"reader_method",
".",
"lower",
"(",
")",
"try",
":",
"if",
"reader_method_op",
"==",
"\"delta\"",
":",
"return",
"reader",
".",
"format",
"(",
"reader_method_op",
")",
".",
"load",
"return",
"getattr",
"(",
"reader",
",",
"reader_method_op",
")",
"except",
"AttributeError",
":",
"raise",
"BatchKwargsError",
"(",
"\"Unable to find reader_method %s in spark.\"",
"%",
"reader_method",
",",
"{",
"\"reader_method\"",
":",
"reader_method",
"}",
",",
")"
] | [
285,
4
] | [
315,
13
] | python | en | ['en', 'no', 'en'] | True |
SparkDFExecutionEngine.get_compute_domain | (
self,
domain_kwargs: dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) | Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser)
to obtain and/or query a batch. Returns in the format of a Pandas Series if only a single column is desired,
or otherwise a Data Frame.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
A tuple including:
- a DataFrame (the data on which to compute)
- a dictionary of compute_domain_kwargs, describing the DataFrame
- a dictionary of accessor_domain_kwargs, describing any accessors needed to
identify the domain within the compute domain
| Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser)
to obtain and/or query a batch. Returns in the format of a Pandas Series if only a single column is desired,
or otherwise a Data Frame. | def get_compute_domain(
self,
domain_kwargs: dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple["pyspark.sql.DataFrame", dict, dict]:
"""Uses a given batch dictionary and domain kwargs (which include a row condition and a condition parser)
to obtain and/or query a batch. Returns in the format of a Pandas Series if only a single column is desired,
or otherwise a Data Frame.
Args:
domain_kwargs (dict) - A dictionary consisting of the domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes) - an Enum value indicating which metric domain the user would
like to be using, or a corresponding string value representing it. String types include "identity",
"column", "column_pair", "table" and "other". Enum types include capitalized versions of these from the
class MetricDomainTypes.
accessor_keys (str iterable) - keys that are part of the compute domain but should be ignored when
describing the domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
A tuple including:
- a DataFrame (the data on which to compute)
- a dictionary of compute_domain_kwargs, describing the DataFrame
- a dictionary of accessor_domain_kwargs, describing any accessors needed to
identify the domain within the compute domain
"""
# Extracting value from enum if it is given for future computation
domain_type = MetricDomainTypes(domain_type)
batch_id = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.active_batch_data:
data = self.active_batch_data.dataframe
else:
raise ValidationError(
"No batch is specified, but could not identify a loaded batch."
)
else:
if batch_id in self.loaded_batch_data_dict:
data = self.loaded_batch_data_dict[batch_id].dataframe
else:
raise ValidationError(f"Unable to find batch with batch_id {batch_id}")
compute_domain_kwargs = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs = dict()
table = domain_kwargs.get("table", None)
if table:
raise ValueError(
"SparkDFExecutionEngine does not currently support multiple named tables."
)
row_condition = domain_kwargs.get("row_condition", None)
if row_condition:
condition_parser = domain_kwargs.get("condition_parser", None)
if condition_parser == "spark":
data = data.filter(row_condition)
elif condition_parser == "great_expectations__experimental__":
parsed_condition = parse_condition_to_spark(row_condition)
data = data.filter(parsed_condition)
else:
raise GreatExpectationsError(
f"unrecognized condition_parser {str(condition_parser)}for Spark execution engine"
)
# Warning user if accessor keys are in any domain that is not of type table, will be ignored
if (
domain_type != MetricDomainTypes.TABLE
and accessor_keys is not None
and len(list(accessor_keys)) > 0
):
logger.warning(
"Accessor keys ignored since Metric Domain Type is not 'table"
)
if domain_type == MetricDomainTypes.TABLE:
if accessor_keys is not None and len(list(accessor_keys)) > 0:
for key in accessor_keys:
accessor_domain_kwargs[key] = compute_domain_kwargs.pop(key)
if len(compute_domain_kwargs.keys()) > 0:
# Warn user if kwarg not "normal".
unexpected_keys: set = set(compute_domain_kwargs.keys()).difference(
{
"batch_id",
"table",
"row_condition",
"condition_parser",
}
)
if len(unexpected_keys) > 0:
unexpected_keys_str: str = ", ".join(
map(lambda element: f'"{element}"', unexpected_keys)
)
logger.warning(
f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type "{domain_type.value}".'
)
return data, compute_domain_kwargs, accessor_domain_kwargs
# If user has stated they want a column, checking if one is provided, and
elif domain_type == MetricDomainTypes.COLUMN:
if "column" in compute_domain_kwargs:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
else:
# If column not given
raise GreatExpectationsError(
"Column not provided in compute_domain_kwargs"
)
# Else, if column pair values requested
elif domain_type == MetricDomainTypes.COLUMN_PAIR:
# Ensuring column_A and column_B parameters provided
if (
"column_A" in compute_domain_kwargs
and "column_B" in compute_domain_kwargs
):
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop(
"column_A"
)
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop(
"column_B"
)
else:
raise GreatExpectationsError(
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if table or identity or other provided, column is not specified. If it is, warning the user
elif domain_type == MetricDomainTypes.MULTICOLUMN:
if "column_list" in compute_domain_kwargs:
# If column_list exists
accessor_domain_kwargs["column_list"] = compute_domain_kwargs.pop(
"column_list"
)
# Filtering if identity
elif domain_type == MetricDomainTypes.IDENTITY:
# If we would like our data to become a single column
if "column" in compute_domain_kwargs:
data = data.select(compute_domain_kwargs["column"])
# If we would like our data to now become a column pair
elif ("column_A" in compute_domain_kwargs) and (
"column_B" in compute_domain_kwargs
):
data = data.select(
compute_domain_kwargs["column_A"], compute_domain_kwargs["column_B"]
)
else:
# If we would like our data to become a multicolumn
if "column_list" in compute_domain_kwargs:
data = data.select(compute_domain_kwargs["column_list"])
return data, compute_domain_kwargs, accessor_domain_kwargs | [
"def",
"get_compute_domain",
"(",
"self",
",",
"domain_kwargs",
":",
"dict",
",",
"domain_type",
":",
"Union",
"[",
"str",
",",
"MetricDomainTypes",
"]",
",",
"accessor_keys",
":",
"Optional",
"[",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Tuple",
"[",
"\"pyspark.sql.DataFrame\"",
",",
"dict",
",",
"dict",
"]",
":",
"# Extracting value from enum if it is given for future computation",
"domain_type",
"=",
"MetricDomainTypes",
"(",
"domain_type",
")",
"batch_id",
"=",
"domain_kwargs",
".",
"get",
"(",
"\"batch_id\"",
")",
"if",
"batch_id",
"is",
"None",
":",
"# We allow no batch id specified if there is only one batch",
"if",
"self",
".",
"active_batch_data",
":",
"data",
"=",
"self",
".",
"active_batch_data",
".",
"dataframe",
"else",
":",
"raise",
"ValidationError",
"(",
"\"No batch is specified, but could not identify a loaded batch.\"",
")",
"else",
":",
"if",
"batch_id",
"in",
"self",
".",
"loaded_batch_data_dict",
":",
"data",
"=",
"self",
".",
"loaded_batch_data_dict",
"[",
"batch_id",
"]",
".",
"dataframe",
"else",
":",
"raise",
"ValidationError",
"(",
"f\"Unable to find batch with batch_id {batch_id}\"",
")",
"compute_domain_kwargs",
"=",
"copy",
".",
"deepcopy",
"(",
"domain_kwargs",
")",
"accessor_domain_kwargs",
"=",
"dict",
"(",
")",
"table",
"=",
"domain_kwargs",
".",
"get",
"(",
"\"table\"",
",",
"None",
")",
"if",
"table",
":",
"raise",
"ValueError",
"(",
"\"SparkDFExecutionEngine does not currently support multiple named tables.\"",
")",
"row_condition",
"=",
"domain_kwargs",
".",
"get",
"(",
"\"row_condition\"",
",",
"None",
")",
"if",
"row_condition",
":",
"condition_parser",
"=",
"domain_kwargs",
".",
"get",
"(",
"\"condition_parser\"",
",",
"None",
")",
"if",
"condition_parser",
"==",
"\"spark\"",
":",
"data",
"=",
"data",
".",
"filter",
"(",
"row_condition",
")",
"elif",
"condition_parser",
"==",
"\"great_expectations__experimental__\"",
":",
"parsed_condition",
"=",
"parse_condition_to_spark",
"(",
"row_condition",
")",
"data",
"=",
"data",
".",
"filter",
"(",
"parsed_condition",
")",
"else",
":",
"raise",
"GreatExpectationsError",
"(",
"f\"unrecognized condition_parser {str(condition_parser)}for Spark execution engine\"",
")",
"# Warning user if accessor keys are in any domain that is not of type table, will be ignored",
"if",
"(",
"domain_type",
"!=",
"MetricDomainTypes",
".",
"TABLE",
"and",
"accessor_keys",
"is",
"not",
"None",
"and",
"len",
"(",
"list",
"(",
"accessor_keys",
")",
")",
">",
"0",
")",
":",
"logger",
".",
"warning",
"(",
"\"Accessor keys ignored since Metric Domain Type is not 'table\"",
")",
"if",
"domain_type",
"==",
"MetricDomainTypes",
".",
"TABLE",
":",
"if",
"accessor_keys",
"is",
"not",
"None",
"and",
"len",
"(",
"list",
"(",
"accessor_keys",
")",
")",
">",
"0",
":",
"for",
"key",
"in",
"accessor_keys",
":",
"accessor_domain_kwargs",
"[",
"key",
"]",
"=",
"compute_domain_kwargs",
".",
"pop",
"(",
"key",
")",
"if",
"len",
"(",
"compute_domain_kwargs",
".",
"keys",
"(",
")",
")",
">",
"0",
":",
"# Warn user if kwarg not \"normal\".",
"unexpected_keys",
":",
"set",
"=",
"set",
"(",
"compute_domain_kwargs",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"{",
"\"batch_id\"",
",",
"\"table\"",
",",
"\"row_condition\"",
",",
"\"condition_parser\"",
",",
"}",
")",
"if",
"len",
"(",
"unexpected_keys",
")",
">",
"0",
":",
"unexpected_keys_str",
":",
"str",
"=",
"\", \"",
".",
"join",
"(",
"map",
"(",
"lambda",
"element",
":",
"f'\"{element}\"'",
",",
"unexpected_keys",
")",
")",
"logger",
".",
"warning",
"(",
"f'Unexpected key(s) {unexpected_keys_str} found in domain_kwargs for domain type \"{domain_type.value}\".'",
")",
"return",
"data",
",",
"compute_domain_kwargs",
",",
"accessor_domain_kwargs",
"# If user has stated they want a column, checking if one is provided, and",
"elif",
"domain_type",
"==",
"MetricDomainTypes",
".",
"COLUMN",
":",
"if",
"\"column\"",
"in",
"compute_domain_kwargs",
":",
"accessor_domain_kwargs",
"[",
"\"column\"",
"]",
"=",
"compute_domain_kwargs",
".",
"pop",
"(",
"\"column\"",
")",
"else",
":",
"# If column not given",
"raise",
"GreatExpectationsError",
"(",
"\"Column not provided in compute_domain_kwargs\"",
")",
"# Else, if column pair values requested",
"elif",
"domain_type",
"==",
"MetricDomainTypes",
".",
"COLUMN_PAIR",
":",
"# Ensuring column_A and column_B parameters provided",
"if",
"(",
"\"column_A\"",
"in",
"compute_domain_kwargs",
"and",
"\"column_B\"",
"in",
"compute_domain_kwargs",
")",
":",
"accessor_domain_kwargs",
"[",
"\"column_A\"",
"]",
"=",
"compute_domain_kwargs",
".",
"pop",
"(",
"\"column_A\"",
")",
"accessor_domain_kwargs",
"[",
"\"column_B\"",
"]",
"=",
"compute_domain_kwargs",
".",
"pop",
"(",
"\"column_B\"",
")",
"else",
":",
"raise",
"GreatExpectationsError",
"(",
"\"column_A or column_B not found within compute_domain_kwargs\"",
")",
"# Checking if table or identity or other provided, column is not specified. If it is, warning the user",
"elif",
"domain_type",
"==",
"MetricDomainTypes",
".",
"MULTICOLUMN",
":",
"if",
"\"column_list\"",
"in",
"compute_domain_kwargs",
":",
"# If column_list exists",
"accessor_domain_kwargs",
"[",
"\"column_list\"",
"]",
"=",
"compute_domain_kwargs",
".",
"pop",
"(",
"\"column_list\"",
")",
"# Filtering if identity",
"elif",
"domain_type",
"==",
"MetricDomainTypes",
".",
"IDENTITY",
":",
"# If we would like our data to become a single column",
"if",
"\"column\"",
"in",
"compute_domain_kwargs",
":",
"data",
"=",
"data",
".",
"select",
"(",
"compute_domain_kwargs",
"[",
"\"column\"",
"]",
")",
"# If we would like our data to now become a column pair",
"elif",
"(",
"\"column_A\"",
"in",
"compute_domain_kwargs",
")",
"and",
"(",
"\"column_B\"",
"in",
"compute_domain_kwargs",
")",
":",
"data",
"=",
"data",
".",
"select",
"(",
"compute_domain_kwargs",
"[",
"\"column_A\"",
"]",
",",
"compute_domain_kwargs",
"[",
"\"column_B\"",
"]",
")",
"else",
":",
"# If we would like our data to become a multicolumn",
"if",
"\"column_list\"",
"in",
"compute_domain_kwargs",
":",
"data",
"=",
"data",
".",
"select",
"(",
"compute_domain_kwargs",
"[",
"\"column_list\"",
"]",
")",
"return",
"data",
",",
"compute_domain_kwargs",
",",
"accessor_domain_kwargs"
] | [
317,
4
] | [
471,
66
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine.resolve_metric_bundle | (
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Callable, dict]],
) | For each metric name in the given metric_fn_bundle, finds the domain of the metric and calculates it using a
metric function from the given provider class.
Args:
metric_fn_bundle - A batch containing MetricEdgeKeys and their corresponding functions
metrics (dict) - A dictionary containing metrics and corresponding parameters
Returns:
A dictionary of the collected metrics over their respective domains
| For each metric name in the given metric_fn_bundle, finds the domain of the metric and calculates it using a
metric function from the given provider class. | def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[Tuple[MetricConfiguration, Callable, dict]],
) -> dict:
"""For each metric name in the given metric_fn_bundle, finds the domain of the metric and calculates it using a
metric function from the given provider class.
Args:
metric_fn_bundle - A batch containing MetricEdgeKeys and their corresponding functions
metrics (dict) - A dictionary containing metrics and corresponding parameters
Returns:
A dictionary of the collected metrics over their respective domains
"""
resolved_metrics = dict()
aggregates: Dict[Tuple, dict] = dict()
for (
metric_to_resolve,
engine_fn,
compute_domain_kwargs,
accessor_domain_kwargs,
metric_provider_kwargs,
) in metric_fn_bundle:
if not isinstance(compute_domain_kwargs, IDDict):
compute_domain_kwargs = IDDict(compute_domain_kwargs)
domain_id = compute_domain_kwargs.to_id()
if domain_id not in aggregates:
aggregates[domain_id] = {
"column_aggregates": [],
"ids": [],
"domain_kwargs": compute_domain_kwargs,
}
aggregates[domain_id]["column_aggregates"].append(engine_fn)
aggregates[domain_id]["ids"].append(metric_to_resolve.id)
for aggregate in aggregates.values():
compute_domain_kwargs = aggregate["domain_kwargs"]
df, _, _ = self.get_compute_domain(
compute_domain_kwargs, domain_type=MetricDomainTypes.IDENTITY.value
)
assert len(aggregate["column_aggregates"]) == len(aggregate["ids"])
condition_ids = []
aggregate_cols = []
for idx in range(len(aggregate["column_aggregates"])):
column_aggregate = aggregate["column_aggregates"][idx]
aggregate_id = str(uuid.uuid4())
condition_ids.append(aggregate_id)
aggregate_cols.append(column_aggregate)
res = df.agg(*aggregate_cols).collect()
assert (
len(res) == 1
), "all bundle-computed metrics must be single-value statistics"
assert len(aggregate["ids"]) == len(
res[0]
), "unexpected number of metrics returned"
logger.debug(
f"SparkDFExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}"
)
for idx, id in enumerate(aggregate["ids"]):
resolved_metrics[id] = res[0][idx]
return resolved_metrics | [
"def",
"resolve_metric_bundle",
"(",
"self",
",",
"metric_fn_bundle",
":",
"Iterable",
"[",
"Tuple",
"[",
"MetricConfiguration",
",",
"Callable",
",",
"dict",
"]",
"]",
",",
")",
"->",
"dict",
":",
"resolved_metrics",
"=",
"dict",
"(",
")",
"aggregates",
":",
"Dict",
"[",
"Tuple",
",",
"dict",
"]",
"=",
"dict",
"(",
")",
"for",
"(",
"metric_to_resolve",
",",
"engine_fn",
",",
"compute_domain_kwargs",
",",
"accessor_domain_kwargs",
",",
"metric_provider_kwargs",
",",
")",
"in",
"metric_fn_bundle",
":",
"if",
"not",
"isinstance",
"(",
"compute_domain_kwargs",
",",
"IDDict",
")",
":",
"compute_domain_kwargs",
"=",
"IDDict",
"(",
"compute_domain_kwargs",
")",
"domain_id",
"=",
"compute_domain_kwargs",
".",
"to_id",
"(",
")",
"if",
"domain_id",
"not",
"in",
"aggregates",
":",
"aggregates",
"[",
"domain_id",
"]",
"=",
"{",
"\"column_aggregates\"",
":",
"[",
"]",
",",
"\"ids\"",
":",
"[",
"]",
",",
"\"domain_kwargs\"",
":",
"compute_domain_kwargs",
",",
"}",
"aggregates",
"[",
"domain_id",
"]",
"[",
"\"column_aggregates\"",
"]",
".",
"append",
"(",
"engine_fn",
")",
"aggregates",
"[",
"domain_id",
"]",
"[",
"\"ids\"",
"]",
".",
"append",
"(",
"metric_to_resolve",
".",
"id",
")",
"for",
"aggregate",
"in",
"aggregates",
".",
"values",
"(",
")",
":",
"compute_domain_kwargs",
"=",
"aggregate",
"[",
"\"domain_kwargs\"",
"]",
"df",
",",
"_",
",",
"_",
"=",
"self",
".",
"get_compute_domain",
"(",
"compute_domain_kwargs",
",",
"domain_type",
"=",
"MetricDomainTypes",
".",
"IDENTITY",
".",
"value",
")",
"assert",
"len",
"(",
"aggregate",
"[",
"\"column_aggregates\"",
"]",
")",
"==",
"len",
"(",
"aggregate",
"[",
"\"ids\"",
"]",
")",
"condition_ids",
"=",
"[",
"]",
"aggregate_cols",
"=",
"[",
"]",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"aggregate",
"[",
"\"column_aggregates\"",
"]",
")",
")",
":",
"column_aggregate",
"=",
"aggregate",
"[",
"\"column_aggregates\"",
"]",
"[",
"idx",
"]",
"aggregate_id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"condition_ids",
".",
"append",
"(",
"aggregate_id",
")",
"aggregate_cols",
".",
"append",
"(",
"column_aggregate",
")",
"res",
"=",
"df",
".",
"agg",
"(",
"*",
"aggregate_cols",
")",
".",
"collect",
"(",
")",
"assert",
"(",
"len",
"(",
"res",
")",
"==",
"1",
")",
",",
"\"all bundle-computed metrics must be single-value statistics\"",
"assert",
"len",
"(",
"aggregate",
"[",
"\"ids\"",
"]",
")",
"==",
"len",
"(",
"res",
"[",
"0",
"]",
")",
",",
"\"unexpected number of metrics returned\"",
"logger",
".",
"debug",
"(",
"f\"SparkDFExecutionEngine computed {len(res[0])} metrics on domain_id {IDDict(compute_domain_kwargs).to_id()}\"",
")",
"for",
"idx",
",",
"id",
"in",
"enumerate",
"(",
"aggregate",
"[",
"\"ids\"",
"]",
")",
":",
"resolved_metrics",
"[",
"id",
"]",
"=",
"res",
"[",
"0",
"]",
"[",
"idx",
"]",
"return",
"resolved_metrics"
] | [
512,
4
] | [
572,
31
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine.head | (self, n=5) | Returns dataframe head. Default is 5 | Returns dataframe head. Default is 5 | def head(self, n=5):
"""Returns dataframe head. Default is 5"""
return self.dataframe.limit(n).toPandas() | [
"def",
"head",
"(",
"self",
",",
"n",
"=",
"5",
")",
":",
"return",
"self",
".",
"dataframe",
".",
"limit",
"(",
"n",
")",
".",
"toPandas",
"(",
")"
] | [
574,
4
] | [
576,
49
] | python | en | ['en', 'et', 'en'] | True |
SparkDFExecutionEngine._split_on_divided_integer | (
df, column_name: str, divisor: int, batch_identifiers: dict
) | Divide the values in the named column by `divisor`, and split on that | Divide the values in the named column by `divisor`, and split on that | def _split_on_divided_integer(
df, column_name: str, divisor: int, batch_identifiers: dict
):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_divisor = batch_identifiers[column_name]
res = (
df.withColumn(
"div_temp", (F.col(column_name) / divisor).cast(IntegerType())
)
.filter(F.col("div_temp") == matching_divisor)
.drop("div_temp")
)
return res | [
"def",
"_split_on_divided_integer",
"(",
"df",
",",
"column_name",
":",
"str",
",",
"divisor",
":",
"int",
",",
"batch_identifiers",
":",
"dict",
")",
":",
"matching_divisor",
"=",
"batch_identifiers",
"[",
"column_name",
"]",
"res",
"=",
"(",
"df",
".",
"withColumn",
"(",
"\"div_temp\"",
",",
"(",
"F",
".",
"col",
"(",
"column_name",
")",
"/",
"divisor",
")",
".",
"cast",
"(",
"IntegerType",
"(",
")",
")",
")",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"\"div_temp\"",
")",
"==",
"matching_divisor",
")",
".",
"drop",
"(",
"\"div_temp\"",
")",
")",
"return",
"res"
] | [
606,
4
] | [
618,
18
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._split_on_mod_integer | (df, column_name: str, mod: int, batch_identifiers: dict) | Divide the values in the named column by `divisor`, and split on that | Divide the values in the named column by `divisor`, and split on that | def _split_on_mod_integer(df, column_name: str, mod: int, batch_identifiers: dict):
"""Divide the values in the named column by `divisor`, and split on that"""
matching_mod_value = batch_identifiers[column_name]
res = (
df.withColumn("mod_temp", (F.col(column_name) % mod).cast(IntegerType()))
.filter(F.col("mod_temp") == matching_mod_value)
.drop("mod_temp")
)
return res | [
"def",
"_split_on_mod_integer",
"(",
"df",
",",
"column_name",
":",
"str",
",",
"mod",
":",
"int",
",",
"batch_identifiers",
":",
"dict",
")",
":",
"matching_mod_value",
"=",
"batch_identifiers",
"[",
"column_name",
"]",
"res",
"=",
"(",
"df",
".",
"withColumn",
"(",
"\"mod_temp\"",
",",
"(",
"F",
".",
"col",
"(",
"column_name",
")",
"%",
"mod",
")",
".",
"cast",
"(",
"IntegerType",
"(",
")",
")",
")",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"\"mod_temp\"",
")",
"==",
"matching_mod_value",
")",
".",
"drop",
"(",
"\"mod_temp\"",
")",
")",
"return",
"res"
] | [
621,
4
] | [
629,
18
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._split_on_multi_column_values | (df, column_names: list, batch_identifiers: dict) | Split on the joint values in the named columns | Split on the joint values in the named columns | def _split_on_multi_column_values(df, column_names: list, batch_identifiers: dict):
"""Split on the joint values in the named columns"""
for column_name in column_names:
value = batch_identifiers.get(column_name)
if not value:
raise ValueError(
f"In order for SparkDFExecutionEngine to `_split_on_multi_column_values`, "
f"all values in column_names must also exist in batch_identifiers. "
f"{column_name} was not found in batch_identifiers."
)
df = df.filter(F.col(column_name) == value)
return df | [
"def",
"_split_on_multi_column_values",
"(",
"df",
",",
"column_names",
":",
"list",
",",
"batch_identifiers",
":",
"dict",
")",
":",
"for",
"column_name",
"in",
"column_names",
":",
"value",
"=",
"batch_identifiers",
".",
"get",
"(",
"column_name",
")",
"if",
"not",
"value",
":",
"raise",
"ValueError",
"(",
"f\"In order for SparkDFExecutionEngine to `_split_on_multi_column_values`, \"",
"f\"all values in column_names must also exist in batch_identifiers. \"",
"f\"{column_name} was not found in batch_identifiers.\"",
")",
"df",
"=",
"df",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"column_name",
")",
"==",
"value",
")",
"return",
"df"
] | [
632,
4
] | [
643,
17
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._split_on_hashed_column | (
df,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
hash_function_name: str = "sha256",
) | Split on the hashed value of the named column | Split on the hashed value of the named column | def _split_on_hashed_column(
df,
column_name: str,
hash_digits: int,
batch_identifiers: dict,
hash_function_name: str = "sha256",
):
"""Split on the hashed value of the named column"""
try:
getattr(hashlib, hash_function_name)
except (TypeError, AttributeError) as e:
raise (
ge_exceptions.ExecutionEngineError(
f"""The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.
Reference to {hash_function_name} cannot be found."""
)
)
def _encrypt_value(to_encode):
hash_func = getattr(hashlib, hash_function_name)
hashed_value = hash_func(to_encode.encode()).hexdigest()[-1 * hash_digits :]
return hashed_value
encrypt_udf = F.udf(_encrypt_value, StringType())
res = (
df.withColumn("encrypted_value", encrypt_udf(column_name))
.filter(F.col("encrypted_value") == batch_identifiers["hash_value"])
.drop("encrypted_value")
)
return res | [
"def",
"_split_on_hashed_column",
"(",
"df",
",",
"column_name",
":",
"str",
",",
"hash_digits",
":",
"int",
",",
"batch_identifiers",
":",
"dict",
",",
"hash_function_name",
":",
"str",
"=",
"\"sha256\"",
",",
")",
":",
"try",
":",
"getattr",
"(",
"hashlib",
",",
"hash_function_name",
")",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
"as",
"e",
":",
"raise",
"(",
"ge_exceptions",
".",
"ExecutionEngineError",
"(",
"f\"\"\"The splitting method used with SparkDFExecutionEngine has a reference to an invalid hash_function_name.\n Reference to {hash_function_name} cannot be found.\"\"\"",
")",
")",
"def",
"_encrypt_value",
"(",
"to_encode",
")",
":",
"hash_func",
"=",
"getattr",
"(",
"hashlib",
",",
"hash_function_name",
")",
"hashed_value",
"=",
"hash_func",
"(",
"to_encode",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")",
"[",
"-",
"1",
"*",
"hash_digits",
":",
"]",
"return",
"hashed_value",
"encrypt_udf",
"=",
"F",
".",
"udf",
"(",
"_encrypt_value",
",",
"StringType",
"(",
")",
")",
"res",
"=",
"(",
"df",
".",
"withColumn",
"(",
"\"encrypted_value\"",
",",
"encrypt_udf",
"(",
"column_name",
")",
")",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"\"encrypted_value\"",
")",
"==",
"batch_identifiers",
"[",
"\"hash_value\"",
"]",
")",
".",
"drop",
"(",
"\"encrypted_value\"",
")",
")",
"return",
"res"
] | [
646,
4
] | [
675,
18
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._sample_using_random | (df, p: float = 0.1, seed: int = 1) | Take a random sample of rows, retaining proportion p | Take a random sample of rows, retaining proportion p | def _sample_using_random(df, p: float = 0.1, seed: int = 1):
"""Take a random sample of rows, retaining proportion p"""
res = (
df.withColumn("rand", F.rand(seed=seed))
.filter(F.col("rand") < p)
.drop("rand")
)
return res | [
"def",
"_sample_using_random",
"(",
"df",
",",
"p",
":",
"float",
"=",
"0.1",
",",
"seed",
":",
"int",
"=",
"1",
")",
":",
"res",
"=",
"(",
"df",
".",
"withColumn",
"(",
"\"rand\"",
",",
"F",
".",
"rand",
"(",
"seed",
"=",
"seed",
")",
")",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"\"rand\"",
")",
"<",
"p",
")",
".",
"drop",
"(",
"\"rand\"",
")",
")",
"return",
"res"
] | [
679,
4
] | [
686,
18
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._sample_using_mod | (
df,
column_name: str,
mod: int,
value: int,
) | Take the mod of named column, and only keep rows that match the given value | Take the mod of named column, and only keep rows that match the given value | def _sample_using_mod(
df,
column_name: str,
mod: int,
value: int,
):
"""Take the mod of named column, and only keep rows that match the given value"""
res = (
df.withColumn("mod_temp", (F.col(column_name) % mod).cast(IntegerType()))
.filter(F.col("mod_temp") == value)
.drop("mod_temp")
)
return res | [
"def",
"_sample_using_mod",
"(",
"df",
",",
"column_name",
":",
"str",
",",
"mod",
":",
"int",
",",
"value",
":",
"int",
",",
")",
":",
"res",
"=",
"(",
"df",
".",
"withColumn",
"(",
"\"mod_temp\"",
",",
"(",
"F",
".",
"col",
"(",
"column_name",
")",
"%",
"mod",
")",
".",
"cast",
"(",
"IntegerType",
"(",
")",
")",
")",
".",
"filter",
"(",
"F",
".",
"col",
"(",
"\"mod_temp\"",
")",
"==",
"value",
")",
".",
"drop",
"(",
"\"mod_temp\"",
")",
")",
"return",
"res"
] | [
689,
4
] | [
701,
18
] | python | en | ['en', 'en', 'en'] | True |
SparkDFExecutionEngine._sample_using_a_list | (
df,
column_name: str,
value_list: list,
) | Match the values in the named column against value_list, and only keep the matches | Match the values in the named column against value_list, and only keep the matches | def _sample_using_a_list(
df,
column_name: str,
value_list: list,
):
"""Match the values in the named column against value_list, and only keep the matches"""
return df.where(F.col(column_name).isin(value_list)) | [
"def",
"_sample_using_a_list",
"(",
"df",
",",
"column_name",
":",
"str",
",",
"value_list",
":",
"list",
",",
")",
":",
"return",
"df",
".",
"where",
"(",
"F",
".",
"col",
"(",
"column_name",
")",
".",
"isin",
"(",
"value_list",
")",
")"
] | [
704,
4
] | [
710,
60
] | python | en | ['en', 'en', 'en'] | True |
regex_opt_inner | (strings, open_paren) | Return a regex that matches any string in the sorted list of strings. | Return a regex that matches any string in the sorted list of strings. | def regex_opt_inner(strings, open_paren):
"""Return a regex that matches any string in the sorted list of strings."""
close_paren = open_paren and ')' or ''
# print strings, repr(open_paren)
if not strings:
# print '-> nothing left'
return ''
first = strings[0]
if len(strings) == 1:
# print '-> only 1 string'
return open_paren + escape(first) + close_paren
if not first:
# print '-> first string empty'
return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ '?' + close_paren
if len(first) == 1:
# multiple one-char strings? make a charset
oneletter = []
rest = []
for s in strings:
if len(s) == 1:
oneletter.append(s)
else:
rest.append(s)
if len(oneletter) > 1: # do we have more than one oneletter string?
if rest:
# print '-> 1-character + rest'
return open_paren + regex_opt_inner(rest, '') + '|' \
+ make_charset(oneletter) + close_paren
# print '-> only 1-character'
return make_charset(oneletter)
prefix = commonprefix(strings)
if prefix:
plen = len(prefix)
# we have a prefix for all strings
# print '-> prefix:', prefix
return open_paren + escape(prefix) \
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ close_paren
# is there a suffix?
strings_rev = [s[::-1] for s in strings]
suffix = commonprefix(strings_rev)
if suffix:
slen = len(suffix)
# print '-> suffix:', suffix[::-1]
return open_paren \
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ escape(suffix[::-1]) + close_paren
# recurse on common 1-string prefixes
# print '-> last resort'
return open_paren + \
'|'.join(regex_opt_inner(list(group[1]), '')
for group in groupby(strings, lambda s: s[0] == first[0])) \
+ close_paren | [
"def",
"regex_opt_inner",
"(",
"strings",
",",
"open_paren",
")",
":",
"close_paren",
"=",
"open_paren",
"and",
"')'",
"or",
"''",
"# print strings, repr(open_paren)",
"if",
"not",
"strings",
":",
"# print '-> nothing left'",
"return",
"''",
"first",
"=",
"strings",
"[",
"0",
"]",
"if",
"len",
"(",
"strings",
")",
"==",
"1",
":",
"# print '-> only 1 string'",
"return",
"open_paren",
"+",
"escape",
"(",
"first",
")",
"+",
"close_paren",
"if",
"not",
"first",
":",
"# print '-> first string empty'",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"strings",
"[",
"1",
":",
"]",
",",
"'(?:'",
")",
"+",
"'?'",
"+",
"close_paren",
"if",
"len",
"(",
"first",
")",
"==",
"1",
":",
"# multiple one-char strings? make a charset",
"oneletter",
"=",
"[",
"]",
"rest",
"=",
"[",
"]",
"for",
"s",
"in",
"strings",
":",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"oneletter",
".",
"append",
"(",
"s",
")",
"else",
":",
"rest",
".",
"append",
"(",
"s",
")",
"if",
"len",
"(",
"oneletter",
")",
">",
"1",
":",
"# do we have more than one oneletter string?",
"if",
"rest",
":",
"# print '-> 1-character + rest'",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"rest",
",",
"''",
")",
"+",
"'|'",
"+",
"make_charset",
"(",
"oneletter",
")",
"+",
"close_paren",
"# print '-> only 1-character'",
"return",
"make_charset",
"(",
"oneletter",
")",
"prefix",
"=",
"commonprefix",
"(",
"strings",
")",
"if",
"prefix",
":",
"plen",
"=",
"len",
"(",
"prefix",
")",
"# we have a prefix for all strings",
"# print '-> prefix:', prefix",
"return",
"open_paren",
"+",
"escape",
"(",
"prefix",
")",
"+",
"regex_opt_inner",
"(",
"[",
"s",
"[",
"plen",
":",
"]",
"for",
"s",
"in",
"strings",
"]",
",",
"'(?:'",
")",
"+",
"close_paren",
"# is there a suffix?",
"strings_rev",
"=",
"[",
"s",
"[",
":",
":",
"-",
"1",
"]",
"for",
"s",
"in",
"strings",
"]",
"suffix",
"=",
"commonprefix",
"(",
"strings_rev",
")",
"if",
"suffix",
":",
"slen",
"=",
"len",
"(",
"suffix",
")",
"# print '-> suffix:', suffix[::-1]",
"return",
"open_paren",
"+",
"regex_opt_inner",
"(",
"sorted",
"(",
"s",
"[",
":",
"-",
"slen",
"]",
"for",
"s",
"in",
"strings",
")",
",",
"'(?:'",
")",
"+",
"escape",
"(",
"suffix",
"[",
":",
":",
"-",
"1",
"]",
")",
"+",
"close_paren",
"# recurse on common 1-string prefixes",
"# print '-> last resort'",
"return",
"open_paren",
"+",
"'|'",
".",
"join",
"(",
"regex_opt_inner",
"(",
"list",
"(",
"group",
"[",
"1",
"]",
")",
",",
"''",
")",
"for",
"group",
"in",
"groupby",
"(",
"strings",
",",
"lambda",
"s",
":",
"s",
"[",
"0",
"]",
"==",
"first",
"[",
"0",
"]",
")",
")",
"+",
"close_paren"
] | [
26,
0
] | [
79,
21
] | python | en | ['en', 'en', 'en'] | True |
regex_opt | (strings, prefix='', suffix='') | Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
| Return a compiled regex that matches any string in the given list. | def regex_opt(strings, prefix='', suffix=''):
"""Return a compiled regex that matches any string in the given list.
The strings to match must be literal strings, not regexes. They will be
regex-escaped.
*prefix* and *suffix* are pre- and appended to the final regex.
"""
strings = sorted(strings)
return prefix + regex_opt_inner(strings, '(') + suffix | [
"def",
"regex_opt",
"(",
"strings",
",",
"prefix",
"=",
"''",
",",
"suffix",
"=",
"''",
")",
":",
"strings",
"=",
"sorted",
"(",
"strings",
")",
"return",
"prefix",
"+",
"regex_opt_inner",
"(",
"strings",
",",
"'('",
")",
"+",
"suffix"
] | [
82,
0
] | [
91,
58
] | python | en | ['en', 'en', 'en'] | True |
datasource | (ctx) | Datasource operations | Datasource operations | def datasource(ctx):
"""Datasource operations"""
directory: str = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
context: DataContext = toolkit.load_data_context_with_error_handling(
directory=directory,
from_cli_upgrade_command=False,
)
# TODO consider moving this all the way up in to the CLIState constructor
ctx.obj.data_context = context
usage_stats_prefix = f"cli.datasource.{ctx.invoked_subcommand}"
toolkit.send_usage_message(
data_context=context,
event=f"{usage_stats_prefix}.begin",
success=True,
)
ctx.obj.usage_event_end = f"{usage_stats_prefix}.end" | [
"def",
"datasource",
"(",
"ctx",
")",
":",
"directory",
":",
"str",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"context",
":",
"DataContext",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
",",
")",
"# TODO consider moving this all the way up in to the CLIState constructor",
"ctx",
".",
"obj",
".",
"data_context",
"=",
"context",
"usage_stats_prefix",
"=",
"f\"cli.datasource.{ctx.invoked_subcommand}\"",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"f\"{usage_stats_prefix}.begin\"",
",",
"success",
"=",
"True",
",",
")",
"ctx",
".",
"obj",
".",
"usage_event_end",
"=",
"f\"{usage_stats_prefix}.end\""
] | [
45,
0
] | [
62,
57
] | python | en | ['en', 'en', 'en'] | False |
datasource_new | (ctx, name, jupyter) | Add a new Datasource to the data context. | Add a new Datasource to the data context. | def datasource_new(ctx, name, jupyter):
"""Add a new Datasource to the data context."""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
_datasource_new_flow(
context,
usage_event_end=usage_event_end,
datasource_name=name,
jupyter=jupyter,
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return | [
"def",
"datasource_new",
"(",
"ctx",
",",
"name",
",",
"jupyter",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"_datasource_new_flow",
"(",
"context",
",",
"usage_event_end",
"=",
"usage_event_end",
",",
"datasource_name",
"=",
"name",
",",
"jupyter",
"=",
"jupyter",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return"
] | [
74,
0
] | [
92,
14
] | python | en | ['en', 'en', 'en'] | True |
delete_datasource | (ctx, datasource) | Delete the datasource specified as an argument | Delete the datasource specified as an argument | def delete_datasource(ctx, datasource):
"""Delete the datasource specified as an argument"""
context: DataContext = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
if not ctx.obj.assume_yes:
toolkit.confirm_proceed_or_exit(
confirm_prompt=f"""\nAre you sure you want to delete the Datasource "{datasource}" (this action is irreversible)?" """,
continuation_message=f"Datasource `{datasource}` was not deleted.",
exit_on_no=True,
data_context=context,
usage_stats_event=usage_event_end,
)
try:
context.delete_datasource(datasource)
except ValueError:
cli_message(f"<red>Datasource {datasource} could not be found.</red>")
toolkit.send_usage_message(context, event=usage_event_end, success=False)
sys.exit(1)
try:
context.get_datasource(datasource)
except ValueError:
cli_message("<green>{}</green>".format("Datasource deleted successfully."))
toolkit.send_usage_message(context, event=usage_event_end, success=True)
sys.exit(0) | [
"def",
"delete_datasource",
"(",
"ctx",
",",
"datasource",
")",
":",
"context",
":",
"DataContext",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"if",
"not",
"ctx",
".",
"obj",
".",
"assume_yes",
":",
"toolkit",
".",
"confirm_proceed_or_exit",
"(",
"confirm_prompt",
"=",
"f\"\"\"\\nAre you sure you want to delete the Datasource \"{datasource}\" (this action is irreversible)?\" \"\"\"",
",",
"continuation_message",
"=",
"f\"Datasource `{datasource}` was not deleted.\"",
",",
"exit_on_no",
"=",
"True",
",",
"data_context",
"=",
"context",
",",
"usage_stats_event",
"=",
"usage_event_end",
",",
")",
"try",
":",
"context",
".",
"delete_datasource",
"(",
"datasource",
")",
"except",
"ValueError",
":",
"cli_message",
"(",
"f\"<red>Datasource {datasource} could not be found.</red>\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"context",
".",
"get_datasource",
"(",
"datasource",
")",
"except",
"ValueError",
":",
"cli_message",
"(",
"\"<green>{}</green>\"",
".",
"format",
"(",
"\"Datasource deleted successfully.\"",
")",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
98,
0
] | [
123,
19
] | python | en | ['en', 'en', 'en'] | True |
datasource_list | (ctx) | List known Datasources. | List known Datasources. | def datasource_list(ctx):
"""List known Datasources."""
context = ctx.obj.data_context
usage_event_end: str = ctx.obj.usage_event_end
try:
datasources = context.list_datasources()
cli_message(_build_datasource_intro_string(datasources))
for datasource in datasources:
cli_message("")
cli_message_dict(
{
"name": datasource["name"],
"class_name": datasource["class_name"],
}
)
toolkit.send_usage_message(
data_context=context, event=usage_event_end, success=True
)
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context=context,
usage_event=usage_event_end,
message=f"<red>{e}</red>",
)
return | [
"def",
"datasource_list",
"(",
"ctx",
")",
":",
"context",
"=",
"ctx",
".",
"obj",
".",
"data_context",
"usage_event_end",
":",
"str",
"=",
"ctx",
".",
"obj",
".",
"usage_event_end",
"try",
":",
"datasources",
"=",
"context",
".",
"list_datasources",
"(",
")",
"cli_message",
"(",
"_build_datasource_intro_string",
"(",
"datasources",
")",
")",
"for",
"datasource",
"in",
"datasources",
":",
"cli_message",
"(",
"\"\"",
")",
"cli_message_dict",
"(",
"{",
"\"name\"",
":",
"datasource",
"[",
"\"name\"",
"]",
",",
"\"class_name\"",
":",
"datasource",
"[",
"\"class_name\"",
"]",
",",
"}",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"usage_event_end",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event_end",
",",
"message",
"=",
"f\"<red>{e}</red>\"",
",",
")",
"return"
] | [
128,
0
] | [
153,
14
] | python | en | ['en', 'en', 'en'] | True |
sanitize_yaml_and_save_datasource | (
context: DataContext, datasource_yaml: str, overwrite_existing: bool = False
) | A convenience function used in notebooks to help users save secrets. | A convenience function used in notebooks to help users save secrets. | def sanitize_yaml_and_save_datasource(
context: DataContext, datasource_yaml: str, overwrite_existing: bool = False
) -> None:
"""A convenience function used in notebooks to help users save secrets."""
if not datasource_yaml:
raise ValueError("Please verify the yaml and try again.")
if not isinstance(datasource_yaml, str):
raise TypeError("Please pass in a valid yaml string.")
config = yaml.load(datasource_yaml)
try:
datasource_name = config.pop("name")
except KeyError:
raise ValueError("The datasource yaml is missing a `name` attribute.")
if not overwrite_existing and check_if_datasource_name_exists(
context=context, datasource_name=datasource_name
):
print(
f'**WARNING** A Datasource named "{datasource_name}" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!'
)
return
if "credentials" in config.keys():
credentials = config["credentials"]
config["credentials"] = "${" + datasource_name + "}"
context.save_config_variable(datasource_name, credentials)
context.add_datasource(name=datasource_name, **config) | [
"def",
"sanitize_yaml_and_save_datasource",
"(",
"context",
":",
"DataContext",
",",
"datasource_yaml",
":",
"str",
",",
"overwrite_existing",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"if",
"not",
"datasource_yaml",
":",
"raise",
"ValueError",
"(",
"\"Please verify the yaml and try again.\"",
")",
"if",
"not",
"isinstance",
"(",
"datasource_yaml",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Please pass in a valid yaml string.\"",
")",
"config",
"=",
"yaml",
".",
"load",
"(",
"datasource_yaml",
")",
"try",
":",
"datasource_name",
"=",
"config",
".",
"pop",
"(",
"\"name\"",
")",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"The datasource yaml is missing a `name` attribute.\"",
")",
"if",
"not",
"overwrite_existing",
"and",
"check_if_datasource_name_exists",
"(",
"context",
"=",
"context",
",",
"datasource_name",
"=",
"datasource_name",
")",
":",
"print",
"(",
"f'**WARNING** A Datasource named \"{datasource_name}\" already exists in this Data Context. The Datasource has *not* been saved. Please use a different name or set overwrite_existing=True if you want to overwrite!'",
")",
"return",
"if",
"\"credentials\"",
"in",
"config",
".",
"keys",
"(",
")",
":",
"credentials",
"=",
"config",
"[",
"\"credentials\"",
"]",
"config",
"[",
"\"credentials\"",
"]",
"=",
"\"${\"",
"+",
"datasource_name",
"+",
"\"}\"",
"context",
".",
"save_config_variable",
"(",
"datasource_name",
",",
"credentials",
")",
"context",
".",
"add_datasource",
"(",
"name",
"=",
"datasource_name",
",",
"*",
"*",
"config",
")"
] | [
763,
0
] | [
787,
58
] | python | en | ['en', 'en', 'en'] | True |
check_if_datasource_name_exists | (context: DataContext, datasource_name: str) |
Check if a Datasource name already exists in the on-disk version of the given DataContext and if so raise an error
Args:
context: DataContext to check for existing Datasource
datasource_name: name of the proposed Datasource
Returns:
boolean True if datasource name exists in on-disk config, else False
|
Check if a Datasource name already exists in the on-disk version of the given DataContext and if so raise an error
Args:
context: DataContext to check for existing Datasource
datasource_name: name of the proposed Datasource
Returns:
boolean True if datasource name exists in on-disk config, else False
| def check_if_datasource_name_exists(context: DataContext, datasource_name: str) -> bool:
"""
Check if a Datasource name already exists in the on-disk version of the given DataContext and if so raise an error
Args:
context: DataContext to check for existing Datasource
datasource_name: name of the proposed Datasource
Returns:
boolean True if datasource name exists in on-disk config, else False
"""
# TODO: 20210324 Anthony: Note reading the context from disk is a temporary fix to allow use in a notebook
# after test_yaml_config(). test_yaml_config() should update a copy of the in-memory data context rather than
# making changes directly to the in-memory context.
context_on_disk: DataContext = DataContext(context.root_directory)
return datasource_name in [d["name"] for d in context_on_disk.list_datasources()] | [
"def",
"check_if_datasource_name_exists",
"(",
"context",
":",
"DataContext",
",",
"datasource_name",
":",
"str",
")",
"->",
"bool",
":",
"# TODO: 20210324 Anthony: Note reading the context from disk is a temporary fix to allow use in a notebook",
"# after test_yaml_config(). test_yaml_config() should update a copy of the in-memory data context rather than",
"# making changes directly to the in-memory context.",
"context_on_disk",
":",
"DataContext",
"=",
"DataContext",
"(",
"context",
".",
"root_directory",
")",
"return",
"datasource_name",
"in",
"[",
"d",
"[",
"\"name\"",
"]",
"for",
"d",
"in",
"context_on_disk",
".",
"list_datasources",
"(",
")",
"]"
] | [
804,
0
] | [
819,
85
] | python | en | ['en', 'error', 'th'] | False |
BaseDatasourceNewYamlHelper.verify_libraries_installed | (self) | Used in the interactive CLI to help users install dependencies. | Used in the interactive CLI to help users install dependencies. | def verify_libraries_installed(self) -> bool:
"""Used in the interactive CLI to help users install dependencies."""
raise NotImplementedError | [
"def",
"verify_libraries_installed",
"(",
"self",
")",
"->",
"bool",
":",
"raise",
"NotImplementedError"
] | [
229,
4
] | [
231,
33
] | python | en | ['en', 'en', 'en'] | True |
BaseDatasourceNewYamlHelper.create_notebook | (self, context: DataContext) | Create a datasource_new notebook and save it to disk. | Create a datasource_new notebook and save it to disk. | def create_notebook(self, context: DataContext) -> str:
"""Create a datasource_new notebook and save it to disk."""
renderer = self.get_notebook_renderer(context)
notebook_path = os.path.join(
context.root_directory,
context.GE_UNCOMMITTED_DIR,
"datasource_new.ipynb",
)
renderer.render_to_disk(notebook_path)
return notebook_path | [
"def",
"create_notebook",
"(",
"self",
",",
"context",
":",
"DataContext",
")",
"->",
"str",
":",
"renderer",
"=",
"self",
".",
"get_notebook_renderer",
"(",
"context",
")",
"notebook_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"\"datasource_new.ipynb\"",
",",
")",
"renderer",
".",
"render_to_disk",
"(",
"notebook_path",
")",
"return",
"notebook_path"
] | [
233,
4
] | [
242,
28
] | python | en | ['en', 'en', 'en'] | True |
BaseDatasourceNewYamlHelper.get_notebook_renderer | (self, context) | Get a renderer specifically constructed for the datasource type. | Get a renderer specifically constructed for the datasource type. | def get_notebook_renderer(self, context) -> DatasourceNewNotebookRenderer:
"""Get a renderer specifically constructed for the datasource type."""
raise NotImplementedError | [
"def",
"get_notebook_renderer",
"(",
"self",
",",
"context",
")",
"->",
"DatasourceNewNotebookRenderer",
":",
"raise",
"NotImplementedError"
] | [
244,
4
] | [
246,
33
] | python | en | ['en', 'en', 'en'] | True |
BaseDatasourceNewYamlHelper.prompt | (self) | Optional prompt if more information is needed before making a notebook. | Optional prompt if more information is needed before making a notebook. | def prompt(self) -> None:
"""Optional prompt if more information is needed before making a notebook."""
pass | [
"def",
"prompt",
"(",
"self",
")",
"->",
"None",
":",
"pass"
] | [
259,
4
] | [
261,
12
] | python | en | ['en', 'en', 'en'] | True |
BaseDatasourceNewYamlHelper.yaml_snippet | (self) | Override to create the yaml for the notebook. | Override to create the yaml for the notebook. | def yaml_snippet(self) -> str:
"""Override to create the yaml for the notebook."""
raise NotImplementedError | [
"def",
"yaml_snippet",
"(",
"self",
")",
"->",
"str",
":",
"raise",
"NotImplementedError"
] | [
263,
4
] | [
265,
33
] | python | en | ['en', 'en', 'en'] | True |
FilesYamlHelper.yaml_snippet | (self) |
Note the InferredAssetFilesystemDataConnector was selected to get users
to data assets with minimal configuration. Other DataConnectors are
available.
|
Note the InferredAssetFilesystemDataConnector was selected to get users
to data assets with minimal configuration. Other DataConnectors are
available.
| def yaml_snippet(self) -> str:
"""
Note the InferredAssetFilesystemDataConnector was selected to get users
to data assets with minimal configuration. Other DataConnectors are
available.
"""
return f'''f"""
name: {{datasource_name}}
class_name: Datasource
execution_engine:
class_name: {self.class_name}
data_connectors:
default_inferred_data_connector_name:
class_name: InferredAssetFilesystemDataConnector
base_directory: {self.base_path}
default_regex:
group_names:
- data_asset_name
pattern: (.*)
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
"""''' | [
"def",
"yaml_snippet",
"(",
"self",
")",
"->",
"str",
":",
"return",
"f'''f\"\"\"\nname: {{datasource_name}}\nclass_name: Datasource\nexecution_engine:\n class_name: {self.class_name}\ndata_connectors:\n default_inferred_data_connector_name:\n class_name: InferredAssetFilesystemDataConnector\n base_directory: {self.base_path}\n default_regex:\n group_names: \n - data_asset_name\n pattern: (.*)\n default_runtime_data_connector_name:\n class_name: RuntimeDataConnector\n batch_identifiers:\n - default_identifier_name\n\"\"\"'''"
] | [
292,
4
] | [
315,
6
] | python | en | ['en', 'error', 'th'] | False |
SQLCredentialYamlHelper._yaml_innards | (self) | Override if needed. | Override if needed. | def _yaml_innards(self) -> str:
"""Override if needed."""
return """
credentials:
host: {host}
port: '{port}'
username: {username}
password: {password}
database: {database}""" | [
"def",
"_yaml_innards",
"(",
"self",
")",
"->",
"str",
":",
"return",
"\"\"\"\n credentials:\n host: {host}\n port: '{port}'\n username: {username}\n password: {password}\n database: {database}\"\"\""
] | [
426,
4
] | [
434,
27
] | python | en | ['en', 'nl', 'en'] | True |
check_one_way_stream | (stream_maker, clogged_stream_maker) | Perform a number of generic tests on a custom one-way stream
implementation.
Args:
stream_maker: An async (!) function which returns a connected
(:class:`~trio.abc.SendStream`, :class:`~trio.abc.ReceiveStream`)
pair.
clogged_stream_maker: Either None, or an async function similar to
stream_maker, but with the extra property that the returned stream
is in a state where ``send_all`` and
``wait_send_all_might_not_block`` will block until ``receive_some``
has been called. This allows for more thorough testing of some edge
cases, especially around ``wait_send_all_might_not_block``.
Raises:
AssertionError: if a test fails.
| Perform a number of generic tests on a custom one-way stream
implementation. | async def check_one_way_stream(stream_maker, clogged_stream_maker):
"""Perform a number of generic tests on a custom one-way stream
implementation.
Args:
stream_maker: An async (!) function which returns a connected
(:class:`~trio.abc.SendStream`, :class:`~trio.abc.ReceiveStream`)
pair.
clogged_stream_maker: Either None, or an async function similar to
stream_maker, but with the extra property that the returned stream
is in a state where ``send_all`` and
``wait_send_all_might_not_block`` will block until ``receive_some``
has been called. This allows for more thorough testing of some edge
cases, especially around ``wait_send_all_might_not_block``.
Raises:
AssertionError: if a test fails.
"""
async with _ForceCloseBoth(await stream_maker()) as (s, r):
assert isinstance(s, SendStream)
assert isinstance(r, ReceiveStream)
async def do_send_all(data):
with assert_checkpoints():
assert await s.send_all(data) is None
async def do_receive_some(*args):
with assert_checkpoints():
return await r.receive_some(*args)
async def checked_receive_1(expected):
assert await do_receive_some(1) == expected
async def do_aclose(resource):
with assert_checkpoints():
await resource.aclose()
# Simple sending/receiving
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all, b"x")
nursery.start_soon(checked_receive_1, b"x")
async def send_empty_then_y():
# Streams should tolerate sending b"" without giving it any
# special meaning.
await do_send_all(b"")
await do_send_all(b"y")
async with _core.open_nursery() as nursery:
nursery.start_soon(send_empty_then_y)
nursery.start_soon(checked_receive_1, b"y")
# ---- Checking various argument types ----
# send_all accepts bytearray and memoryview
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all, bytearray(b"1"))
nursery.start_soon(checked_receive_1, b"1")
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all, memoryview(b"2"))
nursery.start_soon(checked_receive_1, b"2")
# max_bytes must be a positive integer
with _assert_raises(ValueError):
await r.receive_some(-1)
with _assert_raises(ValueError):
await r.receive_some(0)
with _assert_raises(TypeError):
await r.receive_some(1.5)
# it can also be missing or None
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all, b"x")
assert await do_receive_some() == b"x"
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all, b"x")
assert await do_receive_some(None) == b"x"
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(do_receive_some, 1)
nursery.start_soon(do_receive_some, 1)
# Method always has to exist, and an empty stream with a blocked
# receive_some should *always* allow send_all. (Technically it's legal
# for send_all to wait until receive_some is called to run, though; a
# stream doesn't *have* to have any internal buffering. That's why we
# start a concurrent receive_some call, then cancel it.)
async def simple_check_wait_send_all_might_not_block(scope):
with assert_checkpoints():
await s.wait_send_all_might_not_block()
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(
simple_check_wait_send_all_might_not_block, nursery.cancel_scope
)
nursery.start_soon(do_receive_some, 1)
# closing the r side leads to BrokenResourceError on the s side
# (eventually)
async def expect_broken_stream_on_send():
with _assert_raises(_core.BrokenResourceError):
while True:
await do_send_all(b"x" * 100)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect_broken_stream_on_send)
nursery.start_soon(do_aclose, r)
# once detected, the stream stays broken
with _assert_raises(_core.BrokenResourceError):
await do_send_all(b"x" * 100)
# r closed -> ClosedResourceError on the receive side
with _assert_raises(_core.ClosedResourceError):
await do_receive_some(4096)
# we can close the same stream repeatedly, it's fine
await do_aclose(r)
await do_aclose(r)
# closing the sender side
await do_aclose(s)
# now trying to send raises ClosedResourceError
with _assert_raises(_core.ClosedResourceError):
await do_send_all(b"x" * 100)
# even if it's an empty send
with _assert_raises(_core.ClosedResourceError):
await do_send_all(b"")
# ditto for wait_send_all_might_not_block
with _assert_raises(_core.ClosedResourceError):
with assert_checkpoints():
await s.wait_send_all_might_not_block()
# and again, repeated closing is fine
await do_aclose(s)
await do_aclose(s)
async with _ForceCloseBoth(await stream_maker()) as (s, r):
# if send-then-graceful-close, receiver gets data then b""
async def send_then_close():
await do_send_all(b"y")
await do_aclose(s)
async def receive_send_then_close():
# We want to make sure that if the sender closes the stream before
# we read anything, then we still get all the data. But some
# streams might block on the do_send_all call. So we let the
# sender get as far as it can, then we receive.
await _core.wait_all_tasks_blocked()
await checked_receive_1(b"y")
await checked_receive_1(b"")
await do_aclose(r)
async with _core.open_nursery() as nursery:
nursery.start_soon(send_then_close)
nursery.start_soon(receive_send_then_close)
async with _ForceCloseBoth(await stream_maker()) as (s, r):
await aclose_forcefully(r)
with _assert_raises(_core.BrokenResourceError):
while True:
await do_send_all(b"x" * 100)
with _assert_raises(_core.ClosedResourceError):
await do_receive_some(4096)
async with _ForceCloseBoth(await stream_maker()) as (s, r):
await aclose_forcefully(s)
with _assert_raises(_core.ClosedResourceError):
await do_send_all(b"123")
# after the sender does a forceful close, the receiver might either
# get BrokenResourceError or a clean b""; either is OK. Not OK would be
# if it freezes, or returns data.
try:
await checked_receive_1(b"")
except _core.BrokenResourceError:
pass
# cancelled aclose still closes
async with _ForceCloseBoth(await stream_maker()) as (s, r):
with _core.CancelScope() as scope:
scope.cancel()
await r.aclose()
with _core.CancelScope() as scope:
scope.cancel()
await s.aclose()
with _assert_raises(_core.ClosedResourceError):
await do_send_all(b"123")
with _assert_raises(_core.ClosedResourceError):
await do_receive_some(4096)
# Check that we can still gracefully close a stream after an operation has
# been cancelled. This can be challenging if cancellation can leave the
# stream internals in an inconsistent state, e.g. for
# SSLStream. Unfortunately this test isn't very thorough; the really
# challenging case for something like SSLStream is it gets cancelled
# *while* it's sending data on the underlying, not before. But testing
# that requires some special-case handling of the particular stream setup;
# we can't do it here. Maybe we could do a bit better with
# https://github.com/python-trio/trio/issues/77
async with _ForceCloseBoth(await stream_maker()) as (s, r):
async def expect_cancelled(afn, *args):
with _assert_raises(_core.Cancelled):
await afn(*args)
with _core.CancelScope() as scope:
scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect_cancelled, do_send_all, b"x")
nursery.start_soon(expect_cancelled, do_receive_some, 1)
async with _core.open_nursery() as nursery:
nursery.start_soon(do_aclose, s)
nursery.start_soon(do_aclose, r)
# Check that if a task is blocked in receive_some, then closing the
# receive stream causes it to wake up.
async with _ForceCloseBoth(await stream_maker()) as (s, r):
async def receive_expecting_closed():
with _assert_raises(_core.ClosedResourceError):
await r.receive_some(10)
async with _core.open_nursery() as nursery:
nursery.start_soon(receive_expecting_closed)
await _core.wait_all_tasks_blocked()
await aclose_forcefully(r)
# check wait_send_all_might_not_block, if we can
if clogged_stream_maker is not None:
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
record = []
async def waiter(cancel_scope):
record.append("waiter sleeping")
with assert_checkpoints():
await s.wait_send_all_might_not_block()
record.append("waiter wokeup")
cancel_scope.cancel()
async def receiver():
# give wait_send_all_might_not_block a chance to block
await _core.wait_all_tasks_blocked()
record.append("receiver starting")
while True:
await r.receive_some(16834)
async with _core.open_nursery() as nursery:
nursery.start_soon(waiter, nursery.cancel_scope)
await _core.wait_all_tasks_blocked()
nursery.start_soon(receiver)
assert record == [
"waiter sleeping",
"receiver starting",
"waiter wokeup",
]
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
# simultaneous wait_send_all_might_not_block fails
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(s.wait_send_all_might_not_block)
nursery.start_soon(s.wait_send_all_might_not_block)
# and simultaneous send_all and wait_send_all_might_not_block (NB
# this test might destroy the stream b/c we end up cancelling
# send_all and e.g. SSLStream can't handle that, so we have to
# recreate afterwards)
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(s.wait_send_all_might_not_block)
nursery.start_soon(s.send_all, b"123")
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
# send_all and send_all blocked simultaneously should also raise
# (but again this might destroy the stream)
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"123")
nursery.start_soon(s.send_all, b"123")
# closing the receiver causes wait_send_all_might_not_block to return
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
async def sender():
try:
with assert_checkpoints():
await s.wait_send_all_might_not_block()
except _core.BrokenResourceError:
pass
async def receiver():
await _core.wait_all_tasks_blocked()
await aclose_forcefully(r)
async with _core.open_nursery() as nursery:
nursery.start_soon(sender)
nursery.start_soon(receiver)
# and again with the call starting after the close
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
await aclose_forcefully(r)
try:
with assert_checkpoints():
await s.wait_send_all_might_not_block()
except _core.BrokenResourceError:
pass
# Check that if a task is blocked in a send-side method, then closing
# the send stream causes it to wake up.
async def close_soon(s):
await _core.wait_all_tasks_blocked()
await aclose_forcefully(s)
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
async with _core.open_nursery() as nursery:
nursery.start_soon(close_soon, s)
with _assert_raises(_core.ClosedResourceError):
await s.send_all(b"xyzzy")
async with _ForceCloseBoth(await clogged_stream_maker()) as (s, r):
async with _core.open_nursery() as nursery:
nursery.start_soon(close_soon, s)
with _assert_raises(_core.ClosedResourceError):
await s.wait_send_all_might_not_block() | [
"async",
"def",
"check_one_way_stream",
"(",
"stream_maker",
",",
"clogged_stream_maker",
")",
":",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"assert",
"isinstance",
"(",
"s",
",",
"SendStream",
")",
"assert",
"isinstance",
"(",
"r",
",",
"ReceiveStream",
")",
"async",
"def",
"do_send_all",
"(",
"data",
")",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"assert",
"await",
"s",
".",
"send_all",
"(",
"data",
")",
"is",
"None",
"async",
"def",
"do_receive_some",
"(",
"*",
"args",
")",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"return",
"await",
"r",
".",
"receive_some",
"(",
"*",
"args",
")",
"async",
"def",
"checked_receive_1",
"(",
"expected",
")",
":",
"assert",
"await",
"do_receive_some",
"(",
"1",
")",
"==",
"expected",
"async",
"def",
"do_aclose",
"(",
"resource",
")",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"resource",
".",
"aclose",
"(",
")",
"# Simple sending/receiving",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_send_all",
",",
"b\"x\"",
")",
"nursery",
".",
"start_soon",
"(",
"checked_receive_1",
",",
"b\"x\"",
")",
"async",
"def",
"send_empty_then_y",
"(",
")",
":",
"# Streams should tolerate sending b\"\" without giving it any",
"# special meaning.",
"await",
"do_send_all",
"(",
"b\"\"",
")",
"await",
"do_send_all",
"(",
"b\"y\"",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"send_empty_then_y",
")",
"nursery",
".",
"start_soon",
"(",
"checked_receive_1",
",",
"b\"y\"",
")",
"# ---- Checking various argument types ----",
"# send_all accepts bytearray and memoryview",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_send_all",
",",
"bytearray",
"(",
"b\"1\"",
")",
")",
"nursery",
".",
"start_soon",
"(",
"checked_receive_1",
",",
"b\"1\"",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_send_all",
",",
"memoryview",
"(",
"b\"2\"",
")",
")",
"nursery",
".",
"start_soon",
"(",
"checked_receive_1",
",",
"b\"2\"",
")",
"# max_bytes must be a positive integer",
"with",
"_assert_raises",
"(",
"ValueError",
")",
":",
"await",
"r",
".",
"receive_some",
"(",
"-",
"1",
")",
"with",
"_assert_raises",
"(",
"ValueError",
")",
":",
"await",
"r",
".",
"receive_some",
"(",
"0",
")",
"with",
"_assert_raises",
"(",
"TypeError",
")",
":",
"await",
"r",
".",
"receive_some",
"(",
"1.5",
")",
"# it can also be missing or None",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_send_all",
",",
"b\"x\"",
")",
"assert",
"await",
"do_receive_some",
"(",
")",
"==",
"b\"x\"",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_send_all",
",",
"b\"x\"",
")",
"assert",
"await",
"do_receive_some",
"(",
"None",
")",
"==",
"b\"x\"",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_receive_some",
",",
"1",
")",
"nursery",
".",
"start_soon",
"(",
"do_receive_some",
",",
"1",
")",
"# Method always has to exist, and an empty stream with a blocked",
"# receive_some should *always* allow send_all. (Technically it's legal",
"# for send_all to wait until receive_some is called to run, though; a",
"# stream doesn't *have* to have any internal buffering. That's why we",
"# start a concurrent receive_some call, then cancel it.)",
"async",
"def",
"simple_check_wait_send_all_might_not_block",
"(",
"scope",
")",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")",
"scope",
".",
"cancel",
"(",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"simple_check_wait_send_all_might_not_block",
",",
"nursery",
".",
"cancel_scope",
")",
"nursery",
".",
"start_soon",
"(",
"do_receive_some",
",",
"1",
")",
"# closing the r side leads to BrokenResourceError on the s side",
"# (eventually)",
"async",
"def",
"expect_broken_stream_on_send",
"(",
")",
":",
"with",
"_assert_raises",
"(",
"_core",
".",
"BrokenResourceError",
")",
":",
"while",
"True",
":",
"await",
"do_send_all",
"(",
"b\"x\"",
"*",
"100",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"expect_broken_stream_on_send",
")",
"nursery",
".",
"start_soon",
"(",
"do_aclose",
",",
"r",
")",
"# once detected, the stream stays broken",
"with",
"_assert_raises",
"(",
"_core",
".",
"BrokenResourceError",
")",
":",
"await",
"do_send_all",
"(",
"b\"x\"",
"*",
"100",
")",
"# r closed -> ClosedResourceError on the receive side",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_receive_some",
"(",
"4096",
")",
"# we can close the same stream repeatedly, it's fine",
"await",
"do_aclose",
"(",
"r",
")",
"await",
"do_aclose",
"(",
"r",
")",
"# closing the sender side",
"await",
"do_aclose",
"(",
"s",
")",
"# now trying to send raises ClosedResourceError",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_send_all",
"(",
"b\"x\"",
"*",
"100",
")",
"# even if it's an empty send",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_send_all",
"(",
"b\"\"",
")",
"# ditto for wait_send_all_might_not_block",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")",
"# and again, repeated closing is fine",
"await",
"do_aclose",
"(",
"s",
")",
"await",
"do_aclose",
"(",
"s",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"# if send-then-graceful-close, receiver gets data then b\"\"",
"async",
"def",
"send_then_close",
"(",
")",
":",
"await",
"do_send_all",
"(",
"b\"y\"",
")",
"await",
"do_aclose",
"(",
"s",
")",
"async",
"def",
"receive_send_then_close",
"(",
")",
":",
"# We want to make sure that if the sender closes the stream before",
"# we read anything, then we still get all the data. But some",
"# streams might block on the do_send_all call. So we let the",
"# sender get as far as it can, then we receive.",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"await",
"checked_receive_1",
"(",
"b\"y\"",
")",
"await",
"checked_receive_1",
"(",
"b\"\"",
")",
"await",
"do_aclose",
"(",
"r",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"send_then_close",
")",
"nursery",
".",
"start_soon",
"(",
"receive_send_then_close",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"await",
"aclose_forcefully",
"(",
"r",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"BrokenResourceError",
")",
":",
"while",
"True",
":",
"await",
"do_send_all",
"(",
"b\"x\"",
"*",
"100",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_receive_some",
"(",
"4096",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"await",
"aclose_forcefully",
"(",
"s",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_send_all",
"(",
"b\"123\"",
")",
"# after the sender does a forceful close, the receiver might either",
"# get BrokenResourceError or a clean b\"\"; either is OK. Not OK would be",
"# if it freezes, or returns data.",
"try",
":",
"await",
"checked_receive_1",
"(",
"b\"\"",
")",
"except",
"_core",
".",
"BrokenResourceError",
":",
"pass",
"# cancelled aclose still closes",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"with",
"_core",
".",
"CancelScope",
"(",
")",
"as",
"scope",
":",
"scope",
".",
"cancel",
"(",
")",
"await",
"r",
".",
"aclose",
"(",
")",
"with",
"_core",
".",
"CancelScope",
"(",
")",
"as",
"scope",
":",
"scope",
".",
"cancel",
"(",
")",
"await",
"s",
".",
"aclose",
"(",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_send_all",
"(",
"b\"123\"",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"do_receive_some",
"(",
"4096",
")",
"# Check that we can still gracefully close a stream after an operation has",
"# been cancelled. This can be challenging if cancellation can leave the",
"# stream internals in an inconsistent state, e.g. for",
"# SSLStream. Unfortunately this test isn't very thorough; the really",
"# challenging case for something like SSLStream is it gets cancelled",
"# *while* it's sending data on the underlying, not before. But testing",
"# that requires some special-case handling of the particular stream setup;",
"# we can't do it here. Maybe we could do a bit better with",
"# https://github.com/python-trio/trio/issues/77",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"async",
"def",
"expect_cancelled",
"(",
"afn",
",",
"*",
"args",
")",
":",
"with",
"_assert_raises",
"(",
"_core",
".",
"Cancelled",
")",
":",
"await",
"afn",
"(",
"*",
"args",
")",
"with",
"_core",
".",
"CancelScope",
"(",
")",
"as",
"scope",
":",
"scope",
".",
"cancel",
"(",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"expect_cancelled",
",",
"do_send_all",
",",
"b\"x\"",
")",
"nursery",
".",
"start_soon",
"(",
"expect_cancelled",
",",
"do_receive_some",
",",
"1",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"do_aclose",
",",
"s",
")",
"nursery",
".",
"start_soon",
"(",
"do_aclose",
",",
"r",
")",
"# Check that if a task is blocked in receive_some, then closing the",
"# receive stream causes it to wake up.",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"async",
"def",
"receive_expecting_closed",
"(",
")",
":",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"r",
".",
"receive_some",
"(",
"10",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"receive_expecting_closed",
")",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"await",
"aclose_forcefully",
"(",
"r",
")",
"# check wait_send_all_might_not_block, if we can",
"if",
"clogged_stream_maker",
"is",
"not",
"None",
":",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"record",
"=",
"[",
"]",
"async",
"def",
"waiter",
"(",
"cancel_scope",
")",
":",
"record",
".",
"append",
"(",
"\"waiter sleeping\"",
")",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")",
"record",
".",
"append",
"(",
"\"waiter wokeup\"",
")",
"cancel_scope",
".",
"cancel",
"(",
")",
"async",
"def",
"receiver",
"(",
")",
":",
"# give wait_send_all_might_not_block a chance to block",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"record",
".",
"append",
"(",
"\"receiver starting\"",
")",
"while",
"True",
":",
"await",
"r",
".",
"receive_some",
"(",
"16834",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"waiter",
",",
"nursery",
".",
"cancel_scope",
")",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"nursery",
".",
"start_soon",
"(",
"receiver",
")",
"assert",
"record",
"==",
"[",
"\"waiter sleeping\"",
",",
"\"receiver starting\"",
",",
"\"waiter wokeup\"",
",",
"]",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"# simultaneous wait_send_all_might_not_block fails",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"wait_send_all_might_not_block",
")",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"wait_send_all_might_not_block",
")",
"# and simultaneous send_all and wait_send_all_might_not_block (NB",
"# this test might destroy the stream b/c we end up cancelling",
"# send_all and e.g. SSLStream can't handle that, so we have to",
"# recreate afterwards)",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"wait_send_all_might_not_block",
")",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"send_all",
",",
"b\"123\"",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"# send_all and send_all blocked simultaneously should also raise",
"# (but again this might destroy the stream)",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"send_all",
",",
"b\"123\"",
")",
"nursery",
".",
"start_soon",
"(",
"s",
".",
"send_all",
",",
"b\"123\"",
")",
"# closing the receiver causes wait_send_all_might_not_block to return",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"async",
"def",
"sender",
"(",
")",
":",
"try",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")",
"except",
"_core",
".",
"BrokenResourceError",
":",
"pass",
"async",
"def",
"receiver",
"(",
")",
":",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"await",
"aclose_forcefully",
"(",
"r",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"sender",
")",
"nursery",
".",
"start_soon",
"(",
"receiver",
")",
"# and again with the call starting after the close",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"await",
"aclose_forcefully",
"(",
"r",
")",
"try",
":",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")",
"except",
"_core",
".",
"BrokenResourceError",
":",
"pass",
"# Check that if a task is blocked in a send-side method, then closing",
"# the send stream causes it to wake up.",
"async",
"def",
"close_soon",
"(",
"s",
")",
":",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"await",
"aclose_forcefully",
"(",
"s",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"close_soon",
",",
"s",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"s",
".",
"send_all",
"(",
"b\"xyzzy\"",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s",
",",
"r",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"close_soon",
",",
"s",
")",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"s",
".",
"wait_send_all_might_not_block",
"(",
")"
] | [
36,
0
] | [
374,
59
] | python | en | ['en', 'en', 'en'] | True |
check_two_way_stream | (stream_maker, clogged_stream_maker) | Perform a number of generic tests on a custom two-way stream
implementation.
This is similar to :func:`check_one_way_stream`, except that the maker
functions are expected to return objects implementing the
:class:`~trio.abc.Stream` interface.
This function tests a *superset* of what :func:`check_one_way_stream`
checks – if you call this, then you don't need to also call
:func:`check_one_way_stream`.
| Perform a number of generic tests on a custom two-way stream
implementation. | async def check_two_way_stream(stream_maker, clogged_stream_maker):
"""Perform a number of generic tests on a custom two-way stream
implementation.
This is similar to :func:`check_one_way_stream`, except that the maker
functions are expected to return objects implementing the
:class:`~trio.abc.Stream` interface.
This function tests a *superset* of what :func:`check_one_way_stream`
checks – if you call this, then you don't need to also call
:func:`check_one_way_stream`.
"""
await check_one_way_stream(stream_maker, clogged_stream_maker)
async def flipped_stream_maker():
return reversed(await stream_maker())
if clogged_stream_maker is not None:
async def flipped_clogged_stream_maker():
return reversed(await clogged_stream_maker())
else:
flipped_clogged_stream_maker = None
await check_one_way_stream(flipped_stream_maker, flipped_clogged_stream_maker)
async with _ForceCloseBoth(await stream_maker()) as (s1, s2):
assert isinstance(s1, Stream)
assert isinstance(s2, Stream)
# Duplex can be a bit tricky, might as well check it as well
DUPLEX_TEST_SIZE = 2 ** 20
CHUNK_SIZE_MAX = 2 ** 14
r = random.Random(0)
i = r.getrandbits(8 * DUPLEX_TEST_SIZE)
test_data = i.to_bytes(DUPLEX_TEST_SIZE, "little")
async def sender(s, data, seed):
r = random.Random(seed)
m = memoryview(data)
while m:
chunk_size = r.randint(1, CHUNK_SIZE_MAX)
await s.send_all(m[:chunk_size])
m = m[chunk_size:]
async def receiver(s, data, seed):
r = random.Random(seed)
got = bytearray()
while len(got) < len(data):
chunk = await s.receive_some(r.randint(1, CHUNK_SIZE_MAX))
assert chunk
got += chunk
assert got == data
async with _core.open_nursery() as nursery:
nursery.start_soon(sender, s1, test_data, 0)
nursery.start_soon(sender, s2, test_data[::-1], 1)
nursery.start_soon(receiver, s1, test_data[::-1], 2)
nursery.start_soon(receiver, s2, test_data, 3)
async def expect_receive_some_empty():
assert await s2.receive_some(10) == b""
await s2.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect_receive_some_empty)
nursery.start_soon(s1.aclose) | [
"async",
"def",
"check_two_way_stream",
"(",
"stream_maker",
",",
"clogged_stream_maker",
")",
":",
"await",
"check_one_way_stream",
"(",
"stream_maker",
",",
"clogged_stream_maker",
")",
"async",
"def",
"flipped_stream_maker",
"(",
")",
":",
"return",
"reversed",
"(",
"await",
"stream_maker",
"(",
")",
")",
"if",
"clogged_stream_maker",
"is",
"not",
"None",
":",
"async",
"def",
"flipped_clogged_stream_maker",
"(",
")",
":",
"return",
"reversed",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"else",
":",
"flipped_clogged_stream_maker",
"=",
"None",
"await",
"check_one_way_stream",
"(",
"flipped_stream_maker",
",",
"flipped_clogged_stream_maker",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s1",
",",
"s2",
")",
":",
"assert",
"isinstance",
"(",
"s1",
",",
"Stream",
")",
"assert",
"isinstance",
"(",
"s2",
",",
"Stream",
")",
"# Duplex can be a bit tricky, might as well check it as well",
"DUPLEX_TEST_SIZE",
"=",
"2",
"**",
"20",
"CHUNK_SIZE_MAX",
"=",
"2",
"**",
"14",
"r",
"=",
"random",
".",
"Random",
"(",
"0",
")",
"i",
"=",
"r",
".",
"getrandbits",
"(",
"8",
"*",
"DUPLEX_TEST_SIZE",
")",
"test_data",
"=",
"i",
".",
"to_bytes",
"(",
"DUPLEX_TEST_SIZE",
",",
"\"little\"",
")",
"async",
"def",
"sender",
"(",
"s",
",",
"data",
",",
"seed",
")",
":",
"r",
"=",
"random",
".",
"Random",
"(",
"seed",
")",
"m",
"=",
"memoryview",
"(",
"data",
")",
"while",
"m",
":",
"chunk_size",
"=",
"r",
".",
"randint",
"(",
"1",
",",
"CHUNK_SIZE_MAX",
")",
"await",
"s",
".",
"send_all",
"(",
"m",
"[",
":",
"chunk_size",
"]",
")",
"m",
"=",
"m",
"[",
"chunk_size",
":",
"]",
"async",
"def",
"receiver",
"(",
"s",
",",
"data",
",",
"seed",
")",
":",
"r",
"=",
"random",
".",
"Random",
"(",
"seed",
")",
"got",
"=",
"bytearray",
"(",
")",
"while",
"len",
"(",
"got",
")",
"<",
"len",
"(",
"data",
")",
":",
"chunk",
"=",
"await",
"s",
".",
"receive_some",
"(",
"r",
".",
"randint",
"(",
"1",
",",
"CHUNK_SIZE_MAX",
")",
")",
"assert",
"chunk",
"got",
"+=",
"chunk",
"assert",
"got",
"==",
"data",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"sender",
",",
"s1",
",",
"test_data",
",",
"0",
")",
"nursery",
".",
"start_soon",
"(",
"sender",
",",
"s2",
",",
"test_data",
"[",
":",
":",
"-",
"1",
"]",
",",
"1",
")",
"nursery",
".",
"start_soon",
"(",
"receiver",
",",
"s1",
",",
"test_data",
"[",
":",
":",
"-",
"1",
"]",
",",
"2",
")",
"nursery",
".",
"start_soon",
"(",
"receiver",
",",
"s2",
",",
"test_data",
",",
"3",
")",
"async",
"def",
"expect_receive_some_empty",
"(",
")",
":",
"assert",
"await",
"s2",
".",
"receive_some",
"(",
"10",
")",
"==",
"b\"\"",
"await",
"s2",
".",
"aclose",
"(",
")",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"expect_receive_some_empty",
")",
"nursery",
".",
"start_soon",
"(",
"s1",
".",
"aclose",
")"
] | [
377,
0
] | [
445,
41
] | python | en | ['en', 'en', 'en'] | True |
check_half_closeable_stream | (stream_maker, clogged_stream_maker) | Perform a number of generic tests on a custom half-closeable stream
implementation.
This is similar to :func:`check_two_way_stream`, except that the maker
functions are expected to return objects that implement the
:class:`~trio.abc.HalfCloseableStream` interface.
This function tests a *superset* of what :func:`check_two_way_stream`
checks – if you call this, then you don't need to also call
:func:`check_two_way_stream`.
| Perform a number of generic tests on a custom half-closeable stream
implementation. | async def check_half_closeable_stream(stream_maker, clogged_stream_maker):
"""Perform a number of generic tests on a custom half-closeable stream
implementation.
This is similar to :func:`check_two_way_stream`, except that the maker
functions are expected to return objects that implement the
:class:`~trio.abc.HalfCloseableStream` interface.
This function tests a *superset* of what :func:`check_two_way_stream`
checks – if you call this, then you don't need to also call
:func:`check_two_way_stream`.
"""
await check_two_way_stream(stream_maker, clogged_stream_maker)
async with _ForceCloseBoth(await stream_maker()) as (s1, s2):
assert isinstance(s1, HalfCloseableStream)
assert isinstance(s2, HalfCloseableStream)
async def send_x_then_eof(s):
await s.send_all(b"x")
with assert_checkpoints():
await s.send_eof()
async def expect_x_then_eof(r):
await _core.wait_all_tasks_blocked()
assert await r.receive_some(10) == b"x"
assert await r.receive_some(10) == b""
async with _core.open_nursery() as nursery:
nursery.start_soon(send_x_then_eof, s1)
nursery.start_soon(expect_x_then_eof, s2)
# now sending is disallowed
with _assert_raises(_core.ClosedResourceError):
await s1.send_all(b"y")
# but we can do send_eof again
with assert_checkpoints():
await s1.send_eof()
# and we can still send stuff back the other way
async with _core.open_nursery() as nursery:
nursery.start_soon(send_x_then_eof, s2)
nursery.start_soon(expect_x_then_eof, s1)
if clogged_stream_maker is not None:
async with _ForceCloseBoth(await clogged_stream_maker()) as (s1, s2):
# send_all and send_eof simultaneously is not ok
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(s1.send_all, b"x")
await _core.wait_all_tasks_blocked()
nursery.start_soon(s1.send_eof)
async with _ForceCloseBoth(await clogged_stream_maker()) as (s1, s2):
# wait_send_all_might_not_block and send_eof simultaneously is not
# ok either
with _assert_raises(_core.BusyResourceError):
async with _core.open_nursery() as nursery:
nursery.start_soon(s1.wait_send_all_might_not_block)
await _core.wait_all_tasks_blocked()
nursery.start_soon(s1.send_eof) | [
"async",
"def",
"check_half_closeable_stream",
"(",
"stream_maker",
",",
"clogged_stream_maker",
")",
":",
"await",
"check_two_way_stream",
"(",
"stream_maker",
",",
"clogged_stream_maker",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"stream_maker",
"(",
")",
")",
"as",
"(",
"s1",
",",
"s2",
")",
":",
"assert",
"isinstance",
"(",
"s1",
",",
"HalfCloseableStream",
")",
"assert",
"isinstance",
"(",
"s2",
",",
"HalfCloseableStream",
")",
"async",
"def",
"send_x_then_eof",
"(",
"s",
")",
":",
"await",
"s",
".",
"send_all",
"(",
"b\"x\"",
")",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s",
".",
"send_eof",
"(",
")",
"async",
"def",
"expect_x_then_eof",
"(",
"r",
")",
":",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"assert",
"await",
"r",
".",
"receive_some",
"(",
"10",
")",
"==",
"b\"x\"",
"assert",
"await",
"r",
".",
"receive_some",
"(",
"10",
")",
"==",
"b\"\"",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"send_x_then_eof",
",",
"s1",
")",
"nursery",
".",
"start_soon",
"(",
"expect_x_then_eof",
",",
"s2",
")",
"# now sending is disallowed",
"with",
"_assert_raises",
"(",
"_core",
".",
"ClosedResourceError",
")",
":",
"await",
"s1",
".",
"send_all",
"(",
"b\"y\"",
")",
"# but we can do send_eof again",
"with",
"assert_checkpoints",
"(",
")",
":",
"await",
"s1",
".",
"send_eof",
"(",
")",
"# and we can still send stuff back the other way",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"send_x_then_eof",
",",
"s2",
")",
"nursery",
".",
"start_soon",
"(",
"expect_x_then_eof",
",",
"s1",
")",
"if",
"clogged_stream_maker",
"is",
"not",
"None",
":",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s1",
",",
"s2",
")",
":",
"# send_all and send_eof simultaneously is not ok",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"s1",
".",
"send_all",
",",
"b\"x\"",
")",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"nursery",
".",
"start_soon",
"(",
"s1",
".",
"send_eof",
")",
"async",
"with",
"_ForceCloseBoth",
"(",
"await",
"clogged_stream_maker",
"(",
")",
")",
"as",
"(",
"s1",
",",
"s2",
")",
":",
"# wait_send_all_might_not_block and send_eof simultaneously is not",
"# ok either",
"with",
"_assert_raises",
"(",
"_core",
".",
"BusyResourceError",
")",
":",
"async",
"with",
"_core",
".",
"open_nursery",
"(",
")",
"as",
"nursery",
":",
"nursery",
".",
"start_soon",
"(",
"s1",
".",
"wait_send_all_might_not_block",
")",
"await",
"_core",
".",
"wait_all_tasks_blocked",
"(",
")",
"nursery",
".",
"start_soon",
"(",
"s1",
".",
"send_eof",
")"
] | [
448,
0
] | [
510,
51
] | python | en | ['en', 'en', 'en'] | True |
free_calldef_t._get__cmp__call_items | (self) | implementation details | implementation details | def _get__cmp__call_items(self):
"""implementation details"""
return [] | [
"def",
"_get__cmp__call_items",
"(",
"self",
")",
":",
"return",
"[",
"]"
] | [
43,
4
] | [
45,
17
] | python | da | ['eo', 'da', 'en'] | False |
free_calldef_t.function_type | (self) | returns function type. See :class:`type_t` hierarchy | returns function type. See :class:`type_t` hierarchy | def function_type(self):
"""returns function type. See :class:`type_t` hierarchy"""
return cpptypes.free_function_type_t(
return_type=self.return_type,
arguments_types=[
arg.decl_type for arg in self.arguments]) | [
"def",
"function_type",
"(",
"self",
")",
":",
"return",
"cpptypes",
".",
"free_function_type_t",
"(",
"return_type",
"=",
"self",
".",
"return_type",
",",
"arguments_types",
"=",
"[",
"arg",
".",
"decl_type",
"for",
"arg",
"in",
"self",
".",
"arguments",
"]",
")"
] | [
47,
4
] | [
52,
57
] | python | en | ['en', 'en', 'en'] | True |
free_calldef_t.guess_calling_convention | (self) | This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention | This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention | def guess_calling_convention(self):
"""This function should be overriden in the derived classes and return
more-or-less successfull guess about calling convention"""
return calldef_types.CALLING_CONVENTION_TYPES.UNKNOWN | [
"def",
"guess_calling_convention",
"(",
"self",
")",
":",
"return",
"calldef_types",
".",
"CALLING_CONVENTION_TYPES",
".",
"UNKNOWN"
] | [
61,
4
] | [
64,
61
] | python | en | ['en', 'en', 'en'] | True |
free_operator_t.class_types | (self) | list of class/class declaration types, extracted from the
operator arguments | list of class/class declaration types, extracted from the
operator arguments | def class_types(self):
"""list of class/class declaration types, extracted from the
operator arguments"""
if None is self.__class_types:
self.__class_types = []
for type_ in self.argument_types:
decl = None
type_ = type_traits.remove_reference(type_)
if type_traits_classes.is_class(type_):
decl = type_traits_classes.class_traits.get_declaration(
type_)
elif type_traits_classes.is_class_declaration(type_):
tt = type_traits_classes.class_declaration_traits
decl = tt.get_declaration(type_)
else:
pass
if decl:
self.__class_types.append(decl)
return self.__class_types | [
"def",
"class_types",
"(",
"self",
")",
":",
"if",
"None",
"is",
"self",
".",
"__class_types",
":",
"self",
".",
"__class_types",
"=",
"[",
"]",
"for",
"type_",
"in",
"self",
".",
"argument_types",
":",
"decl",
"=",
"None",
"type_",
"=",
"type_traits",
".",
"remove_reference",
"(",
"type_",
")",
"if",
"type_traits_classes",
".",
"is_class",
"(",
"type_",
")",
":",
"decl",
"=",
"type_traits_classes",
".",
"class_traits",
".",
"get_declaration",
"(",
"type_",
")",
"elif",
"type_traits_classes",
".",
"is_class_declaration",
"(",
"type_",
")",
":",
"tt",
"=",
"type_traits_classes",
".",
"class_declaration_traits",
"decl",
"=",
"tt",
".",
"get_declaration",
"(",
"type_",
")",
"else",
":",
"pass",
"if",
"decl",
":",
"self",
".",
"__class_types",
".",
"append",
"(",
"decl",
")",
"return",
"self",
".",
"__class_types"
] | [
94,
4
] | [
113,
33
] | python | en | ['en', 'en', 'en'] | True |
test_profiler_init_no_config | (
cardinality_dataset,
) |
What does this test do and why?
Confirms that profiler can initialize with no config.
|
What does this test do and why?
Confirms that profiler can initialize with no config.
| def test_profiler_init_no_config(
cardinality_dataset,
):
"""
What does this test do and why?
Confirms that profiler can initialize with no config.
"""
profiler = UserConfigurableProfiler(cardinality_dataset)
assert profiler.primary_or_compound_key == []
assert profiler.ignored_columns == []
assert profiler.value_set_threshold == "MANY"
assert not profiler.table_expectations_only
assert profiler.excluded_expectations == [] | [
"def",
"test_profiler_init_no_config",
"(",
"cardinality_dataset",
",",
")",
":",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
")",
"assert",
"profiler",
".",
"primary_or_compound_key",
"==",
"[",
"]",
"assert",
"profiler",
".",
"ignored_columns",
"==",
"[",
"]",
"assert",
"profiler",
".",
"value_set_threshold",
"==",
"\"MANY\"",
"assert",
"not",
"profiler",
".",
"table_expectations_only",
"assert",
"profiler",
".",
"excluded_expectations",
"==",
"[",
"]"
] | [
77,
0
] | [
89,
47
] | python | en | ['en', 'error', 'th'] | False |
test_profiler_init_full_config_no_semantic_types | (cardinality_dataset) |
What does this test do and why?
Confirms that profiler initializes properly with a full config, without a semantic_types dict
|
What does this test do and why?
Confirms that profiler initializes properly with a full config, without a semantic_types dict
| def test_profiler_init_full_config_no_semantic_types(cardinality_dataset):
"""
What does this test do and why?
Confirms that profiler initializes properly with a full config, without a semantic_types dict
"""
profiler = UserConfigurableProfiler(
cardinality_dataset,
primary_or_compound_key=["col_unique"],
ignored_columns=["col_one"],
value_set_threshold="UNIQUE",
table_expectations_only=False,
excluded_expectations=["expect_column_values_to_not_be_null"],
)
assert profiler.primary_or_compound_key == ["col_unique"]
assert profiler.ignored_columns == [
"col_one",
]
assert profiler.value_set_threshold == "UNIQUE"
assert not profiler.table_expectations_only
assert profiler.excluded_expectations == ["expect_column_values_to_not_be_null"]
assert "col_one" not in profiler.column_info | [
"def",
"test_profiler_init_full_config_no_semantic_types",
"(",
"cardinality_dataset",
")",
":",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
"]",
",",
"ignored_columns",
"=",
"[",
"\"col_one\"",
"]",
",",
"value_set_threshold",
"=",
"\"UNIQUE\"",
",",
"table_expectations_only",
"=",
"False",
",",
"excluded_expectations",
"=",
"[",
"\"expect_column_values_to_not_be_null\"",
"]",
",",
")",
"assert",
"profiler",
".",
"primary_or_compound_key",
"==",
"[",
"\"col_unique\"",
"]",
"assert",
"profiler",
".",
"ignored_columns",
"==",
"[",
"\"col_one\"",
",",
"]",
"assert",
"profiler",
".",
"value_set_threshold",
"==",
"\"UNIQUE\"",
"assert",
"not",
"profiler",
".",
"table_expectations_only",
"assert",
"profiler",
".",
"excluded_expectations",
"==",
"[",
"\"expect_column_values_to_not_be_null\"",
"]",
"assert",
"\"col_one\"",
"not",
"in",
"profiler",
".",
"column_info"
] | [
92,
0
] | [
114,
48
] | python | en | ['en', 'error', 'th'] | False |
test_init_with_semantic_types | (cardinality_dataset) |
What does this test do and why?
Confirms that profiler initializes properly with a full config and a semantic_types dict
|
What does this test do and why?
Confirms that profiler initializes properly with a full config and a semantic_types dict
| def test_init_with_semantic_types(cardinality_dataset):
"""
What does this test do and why?
Confirms that profiler initializes properly with a full config and a semantic_types dict
"""
semantic_types = {
"numeric": ["col_few", "col_many", "col_very_many"],
"value_set": ["col_two", "col_very_few"],
}
profiler = UserConfigurableProfiler(
cardinality_dataset,
semantic_types_dict=semantic_types,
primary_or_compound_key=["col_unique"],
ignored_columns=["col_one"],
value_set_threshold="unique",
table_expectations_only=False,
excluded_expectations=["expect_column_values_to_not_be_null"],
)
assert "col_one" not in profiler.column_info
assert profiler.column_info.get("col_none") == {
"cardinality": "NONE",
"type": "NUMERIC",
"semantic_types": [],
}
assert profiler.column_info.get("col_two") == {
"cardinality": "TWO",
"type": "INT",
"semantic_types": ["VALUE_SET"],
}
assert profiler.column_info.get("col_very_few") == {
"cardinality": "VERY_FEW",
"type": "INT",
"semantic_types": ["VALUE_SET"],
}
assert profiler.column_info.get("col_few") == {
"cardinality": "FEW",
"type": "INT",
"semantic_types": ["NUMERIC"],
}
assert profiler.column_info.get("col_many") == {
"cardinality": "MANY",
"type": "INT",
"semantic_types": ["NUMERIC"],
}
assert profiler.column_info.get("col_very_many") == {
"cardinality": "VERY_MANY",
"type": "INT",
"semantic_types": ["NUMERIC"],
}
assert profiler.column_info.get("col_unique") == {
"cardinality": "UNIQUE",
"type": "INT",
"semantic_types": [],
} | [
"def",
"test_init_with_semantic_types",
"(",
"cardinality_dataset",
")",
":",
"semantic_types",
"=",
"{",
"\"numeric\"",
":",
"[",
"\"col_few\"",
",",
"\"col_many\"",
",",
"\"col_very_many\"",
"]",
",",
"\"value_set\"",
":",
"[",
"\"col_two\"",
",",
"\"col_very_few\"",
"]",
",",
"}",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"semantic_types_dict",
"=",
"semantic_types",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
"]",
",",
"ignored_columns",
"=",
"[",
"\"col_one\"",
"]",
",",
"value_set_threshold",
"=",
"\"unique\"",
",",
"table_expectations_only",
"=",
"False",
",",
"excluded_expectations",
"=",
"[",
"\"expect_column_values_to_not_be_null\"",
"]",
",",
")",
"assert",
"\"col_one\"",
"not",
"in",
"profiler",
".",
"column_info",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_none\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"NONE\"",
",",
"\"type\"",
":",
"\"NUMERIC\"",
",",
"\"semantic_types\"",
":",
"[",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_two\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"TWO\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"\"VALUE_SET\"",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_very_few\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"VERY_FEW\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"\"VALUE_SET\"",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_few\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"FEW\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"\"NUMERIC\"",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_many\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"MANY\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"\"NUMERIC\"",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_very_many\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"VERY_MANY\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"\"NUMERIC\"",
"]",
",",
"}",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"col_unique\"",
")",
"==",
"{",
"\"cardinality\"",
":",
"\"UNIQUE\"",
",",
"\"type\"",
":",
"\"INT\"",
",",
"\"semantic_types\"",
":",
"[",
"]",
",",
"}"
] | [
117,
0
] | [
173,
5
] | python | en | ['en', 'error', 'th'] | False |
test__validate_config | (cardinality_dataset) |
What does this test do and why?
Tests the validate config function on the profiler
|
What does this test do and why?
Tests the validate config function on the profiler
| def test__validate_config(cardinality_dataset):
"""
What does this test do and why?
Tests the validate config function on the profiler
"""
with pytest.raises(AssertionError) as e:
UserConfigurableProfiler(cardinality_dataset, ignored_columns="col_name")
assert e.typename == "AssertionError"
with pytest.raises(AssertionError) as e:
UserConfigurableProfiler(cardinality_dataset, table_expectations_only="True")
assert e.typename == "AssertionError" | [
"def",
"test__validate_config",
"(",
"cardinality_dataset",
")",
":",
"with",
"pytest",
".",
"raises",
"(",
"AssertionError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"ignored_columns",
"=",
"\"col_name\"",
")",
"assert",
"e",
".",
"typename",
"==",
"\"AssertionError\"",
"with",
"pytest",
".",
"raises",
"(",
"AssertionError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"table_expectations_only",
"=",
"\"True\"",
")",
"assert",
"e",
".",
"typename",
"==",
"\"AssertionError\""
] | [
176,
0
] | [
188,
41
] | python | en | ['en', 'error', 'th'] | False |
test_value_set_threshold | (cardinality_dataset) |
What does this test do and why?
Tests the value_set_threshold logic on the profiler works as expected.
|
What does this test do and why?
Tests the value_set_threshold logic on the profiler works as expected.
| def test_value_set_threshold(cardinality_dataset):
"""
What does this test do and why?
Tests the value_set_threshold logic on the profiler works as expected.
"""
# Test that when value_set_threshold is unset, it will default to "MANY"
profiler = UserConfigurableProfiler(cardinality_dataset)
assert profiler.value_set_threshold == "MANY"
# Test that when value_set_threshold is set to an appropriate enum value, the value_set_threshold will be correct
profiler = UserConfigurableProfiler(cardinality_dataset, value_set_threshold="FEW")
assert profiler.value_set_threshold == "FEW"
# Test that when value_set_threshold is set to a non-string, it will error
with pytest.raises(AssertionError) as e:
UserConfigurableProfiler(cardinality_dataset, value_set_threshold=None)
assert e.typename == "AssertionError"
# Test that when value_set_threshold is set to a string that is not in the cardinality enum, it will error
with pytest.raises(AssertionError) as e:
UserConfigurableProfiler(cardinality_dataset, value_set_threshold="wrong_value")
assert e.typename == "AssertionError"
assert (
e.value.args[0]
== "value_set_threshold must be one of ['NONE', 'ONE', 'TWO', 'VERY_FEW', 'FEW', 'MANY', 'VERY_MANY', 'UNIQUE']"
) | [
"def",
"test_value_set_threshold",
"(",
"cardinality_dataset",
")",
":",
"# Test that when value_set_threshold is unset, it will default to \"MANY\"",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
")",
"assert",
"profiler",
".",
"value_set_threshold",
"==",
"\"MANY\"",
"# Test that when value_set_threshold is set to an appropriate enum value, the value_set_threshold will be correct",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"value_set_threshold",
"=",
"\"FEW\"",
")",
"assert",
"profiler",
".",
"value_set_threshold",
"==",
"\"FEW\"",
"# Test that when value_set_threshold is set to a non-string, it will error",
"with",
"pytest",
".",
"raises",
"(",
"AssertionError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"value_set_threshold",
"=",
"None",
")",
"assert",
"e",
".",
"typename",
"==",
"\"AssertionError\"",
"# Test that when value_set_threshold is set to a string that is not in the cardinality enum, it will error",
"with",
"pytest",
".",
"raises",
"(",
"AssertionError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"value_set_threshold",
"=",
"\"wrong_value\"",
")",
"assert",
"e",
".",
"typename",
"==",
"\"AssertionError\"",
"assert",
"(",
"e",
".",
"value",
".",
"args",
"[",
"0",
"]",
"==",
"\"value_set_threshold must be one of ['NONE', 'ONE', 'TWO', 'VERY_FEW', 'FEW', 'MANY', 'VERY_MANY', 'UNIQUE']\"",
")"
] | [
191,
0
] | [
216,
5
] | python | en | ['en', 'error', 'th'] | False |
test__validate_semantic_types_dict | (cardinality_dataset) |
What does this test do and why?
Tests that _validate_semantic_types_dict function errors when not formatted correctly
|
What does this test do and why?
Tests that _validate_semantic_types_dict function errors when not formatted correctly
| def test__validate_semantic_types_dict(cardinality_dataset):
"""
What does this test do and why?
Tests that _validate_semantic_types_dict function errors when not formatted correctly
"""
bad_semantic_types_dict_type = {"value_set": "col_few"}
with pytest.raises(AssertionError) as e:
UserConfigurableProfiler(
cardinality_dataset, semantic_types_dict=bad_semantic_types_dict_type
)
assert e.value.args[0] == (
"Entries in semantic type dict must be lists of column names e.g. "
"{'semantic_types': {'numeric': ['number_of_transactions']}}"
)
bad_semantic_types_incorrect_type = {"incorrect_type": ["col_few"]}
with pytest.raises(ValueError) as e:
UserConfigurableProfiler(
cardinality_dataset, semantic_types_dict=bad_semantic_types_incorrect_type
)
assert e.value.args[0] == (
f"incorrect_type is not a recognized semantic_type. Please only include one of "
f"{profiler_semantic_types}"
)
# Error if column is specified for both semantic_types and ignored
working_semantic_type = {"numeric": ["col_few"]}
with pytest.raises(ValueError) as e:
UserConfigurableProfiler(
cardinality_dataset,
semantic_types_dict=working_semantic_type,
ignored_columns=["col_few"],
)
assert e.value.args[0] == (
f"Column col_few is specified in both the semantic_types_dict and the list of ignored columns. Please remove "
f"one of these entries to proceed."
) | [
"def",
"test__validate_semantic_types_dict",
"(",
"cardinality_dataset",
")",
":",
"bad_semantic_types_dict_type",
"=",
"{",
"\"value_set\"",
":",
"\"col_few\"",
"}",
"with",
"pytest",
".",
"raises",
"(",
"AssertionError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"semantic_types_dict",
"=",
"bad_semantic_types_dict_type",
")",
"assert",
"e",
".",
"value",
".",
"args",
"[",
"0",
"]",
"==",
"(",
"\"Entries in semantic type dict must be lists of column names e.g. \"",
"\"{'semantic_types': {'numeric': ['number_of_transactions']}}\"",
")",
"bad_semantic_types_incorrect_type",
"=",
"{",
"\"incorrect_type\"",
":",
"[",
"\"col_few\"",
"]",
"}",
"with",
"pytest",
".",
"raises",
"(",
"ValueError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"semantic_types_dict",
"=",
"bad_semantic_types_incorrect_type",
")",
"assert",
"e",
".",
"value",
".",
"args",
"[",
"0",
"]",
"==",
"(",
"f\"incorrect_type is not a recognized semantic_type. Please only include one of \"",
"f\"{profiler_semantic_types}\"",
")",
"# Error if column is specified for both semantic_types and ignored",
"working_semantic_type",
"=",
"{",
"\"numeric\"",
":",
"[",
"\"col_few\"",
"]",
"}",
"with",
"pytest",
".",
"raises",
"(",
"ValueError",
")",
"as",
"e",
":",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"semantic_types_dict",
"=",
"working_semantic_type",
",",
"ignored_columns",
"=",
"[",
"\"col_few\"",
"]",
",",
")",
"assert",
"e",
".",
"value",
".",
"args",
"[",
"0",
"]",
"==",
"(",
"f\"Column col_few is specified in both the semantic_types_dict and the list of ignored columns. Please remove \"",
"f\"one of these entries to proceed.\"",
")"
] | [
219,
0
] | [
256,
5
] | python | en | ['en', 'error', 'th'] | False |
test_build_suite_no_config | (titanic_dataset, possible_expectations_set) |
What does this test do and why?
Tests that the build_suite function works as expected with no config
|
What does this test do and why?
Tests that the build_suite function works as expected with no config
| def test_build_suite_no_config(titanic_dataset, possible_expectations_set):
"""
What does this test do and why?
Tests that the build_suite function works as expected with no config
"""
profiler = UserConfigurableProfiler(titanic_dataset)
suite = profiler.build_suite()
expectations_from_suite = {i.expectation_type for i in suite.expectations}
assert expectations_from_suite.issubset(possible_expectations_set)
assert len(suite.expectations) == 48 | [
"def",
"test_build_suite_no_config",
"(",
"titanic_dataset",
",",
"possible_expectations_set",
")",
":",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"titanic_dataset",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"expectations_from_suite",
"=",
"{",
"i",
".",
"expectation_type",
"for",
"i",
"in",
"suite",
".",
"expectations",
"}",
"assert",
"expectations_from_suite",
".",
"issubset",
"(",
"possible_expectations_set",
")",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"48"
] | [
259,
0
] | [
269,
40
] | python | en | ['en', 'error', 'th'] | False |
test_build_suite_with_config_and_no_semantic_types_dict | (
titanic_dataset, possible_expectations_set
) |
What does this test do and why?
Tests that the build_suite function works as expected with a config and without a semantic_types dict
|
What does this test do and why?
Tests that the build_suite function works as expected with a config and without a semantic_types dict
| def test_build_suite_with_config_and_no_semantic_types_dict(
titanic_dataset, possible_expectations_set
):
"""
What does this test do and why?
Tests that the build_suite function works as expected with a config and without a semantic_types dict
"""
profiler = UserConfigurableProfiler(
titanic_dataset,
ignored_columns=["Survived", "Unnamed: 0"],
excluded_expectations=["expect_column_mean_to_be_between"],
primary_or_compound_key=["Name"],
table_expectations_only=False,
value_set_threshold="very_few",
)
suite = profiler.build_suite()
(
columns_with_expectations,
expectations_from_suite,
) = get_set_of_columns_and_expectations_from_suite(suite)
columns_expected_in_suite = {"Name", "PClass", "Age", "Sex", "SexCode"}
assert columns_with_expectations == columns_expected_in_suite
assert expectations_from_suite.issubset(possible_expectations_set)
assert "expect_column_mean_to_be_between" not in expectations_from_suite
assert len(suite.expectations) == 29 | [
"def",
"test_build_suite_with_config_and_no_semantic_types_dict",
"(",
"titanic_dataset",
",",
"possible_expectations_set",
")",
":",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"titanic_dataset",
",",
"ignored_columns",
"=",
"[",
"\"Survived\"",
",",
"\"Unnamed: 0\"",
"]",
",",
"excluded_expectations",
"=",
"[",
"\"expect_column_mean_to_be_between\"",
"]",
",",
"primary_or_compound_key",
"=",
"[",
"\"Name\"",
"]",
",",
"table_expectations_only",
"=",
"False",
",",
"value_set_threshold",
"=",
"\"very_few\"",
",",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"(",
"columns_with_expectations",
",",
"expectations_from_suite",
",",
")",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"columns_expected_in_suite",
"=",
"{",
"\"Name\"",
",",
"\"PClass\"",
",",
"\"Age\"",
",",
"\"Sex\"",
",",
"\"SexCode\"",
"}",
"assert",
"columns_with_expectations",
"==",
"columns_expected_in_suite",
"assert",
"expectations_from_suite",
".",
"issubset",
"(",
"possible_expectations_set",
")",
"assert",
"\"expect_column_mean_to_be_between\"",
"not",
"in",
"expectations_from_suite",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"29"
] | [
272,
0
] | [
297,
40
] | python | en | ['en', 'error', 'th'] | False |
test_build_suite_with_semantic_types_dict | (
cardinality_dataset,
possible_expectations_set,
) |
What does this test do and why?
Tests that the build_suite function works as expected with a semantic_types dict
|
What does this test do and why?
Tests that the build_suite function works as expected with a semantic_types dict
| def test_build_suite_with_semantic_types_dict(
cardinality_dataset,
possible_expectations_set,
):
"""
What does this test do and why?
Tests that the build_suite function works as expected with a semantic_types dict
"""
semantic_types = {
"numeric": ["col_few", "col_many", "col_very_many"],
"value_set": ["col_two", "col_very_few"],
}
profiler = UserConfigurableProfiler(
cardinality_dataset,
semantic_types_dict=semantic_types,
primary_or_compound_key=["col_unique"],
ignored_columns=["col_one"],
value_set_threshold="unique",
table_expectations_only=False,
excluded_expectations=["expect_column_values_to_not_be_null"],
)
suite = profiler.build_suite()
(
columns_with_expectations,
expectations_from_suite,
) = get_set_of_columns_and_expectations_from_suite(suite)
assert "column_one" not in columns_with_expectations
assert "expect_column_values_to_not_be_null" not in expectations_from_suite
assert expectations_from_suite.issubset(possible_expectations_set)
assert len(suite.expectations) == 33
value_set_expectations = [
i
for i in suite.expectations
if i.expectation_type == "expect_column_values_to_be_in_set"
]
value_set_columns = {i.kwargs.get("column") for i in value_set_expectations}
assert len(value_set_columns) == 2
assert value_set_columns == {"col_two", "col_very_few"} | [
"def",
"test_build_suite_with_semantic_types_dict",
"(",
"cardinality_dataset",
",",
"possible_expectations_set",
",",
")",
":",
"semantic_types",
"=",
"{",
"\"numeric\"",
":",
"[",
"\"col_few\"",
",",
"\"col_many\"",
",",
"\"col_very_many\"",
"]",
",",
"\"value_set\"",
":",
"[",
"\"col_two\"",
",",
"\"col_very_few\"",
"]",
",",
"}",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"semantic_types_dict",
"=",
"semantic_types",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
"]",
",",
"ignored_columns",
"=",
"[",
"\"col_one\"",
"]",
",",
"value_set_threshold",
"=",
"\"unique\"",
",",
"table_expectations_only",
"=",
"False",
",",
"excluded_expectations",
"=",
"[",
"\"expect_column_values_to_not_be_null\"",
"]",
",",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"(",
"columns_with_expectations",
",",
"expectations_from_suite",
",",
")",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"assert",
"\"column_one\"",
"not",
"in",
"columns_with_expectations",
"assert",
"\"expect_column_values_to_not_be_null\"",
"not",
"in",
"expectations_from_suite",
"assert",
"expectations_from_suite",
".",
"issubset",
"(",
"possible_expectations_set",
")",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"33",
"value_set_expectations",
"=",
"[",
"i",
"for",
"i",
"in",
"suite",
".",
"expectations",
"if",
"i",
".",
"expectation_type",
"==",
"\"expect_column_values_to_be_in_set\"",
"]",
"value_set_columns",
"=",
"{",
"i",
".",
"kwargs",
".",
"get",
"(",
"\"column\"",
")",
"for",
"i",
"in",
"value_set_expectations",
"}",
"assert",
"len",
"(",
"value_set_columns",
")",
"==",
"2",
"assert",
"value_set_columns",
"==",
"{",
"\"col_two\"",
",",
"\"col_very_few\"",
"}"
] | [
300,
0
] | [
342,
59
] | python | en | ['en', 'error', 'th'] | False |
test_build_suite_when_suite_already_exists | (cardinality_dataset) |
What does this test do and why?
Confirms that creating a new suite on an existing profiler wipes the previous suite
|
What does this test do and why?
Confirms that creating a new suite on an existing profiler wipes the previous suite
| def test_build_suite_when_suite_already_exists(cardinality_dataset):
"""
What does this test do and why?
Confirms that creating a new suite on an existing profiler wipes the previous suite
"""
profiler = UserConfigurableProfiler(
cardinality_dataset,
table_expectations_only=True,
excluded_expectations=["expect_table_row_count_to_be_between"],
)
suite = profiler.build_suite()
_, expectations = get_set_of_columns_and_expectations_from_suite(suite)
assert len(suite.expectations) == 1
assert "expect_table_columns_to_match_ordered_list" in expectations
profiler.excluded_expectations = ["expect_table_columns_to_match_ordered_list"]
suite = profiler.build_suite()
_, expectations = get_set_of_columns_and_expectations_from_suite(suite)
assert len(suite.expectations) == 1
assert "expect_table_row_count_to_be_between" in expectations | [
"def",
"test_build_suite_when_suite_already_exists",
"(",
"cardinality_dataset",
")",
":",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"table_expectations_only",
"=",
"True",
",",
"excluded_expectations",
"=",
"[",
"\"expect_table_row_count_to_be_between\"",
"]",
",",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"_",
",",
"expectations",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"1",
"assert",
"\"expect_table_columns_to_match_ordered_list\"",
"in",
"expectations",
"profiler",
".",
"excluded_expectations",
"=",
"[",
"\"expect_table_columns_to_match_ordered_list\"",
"]",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"_",
",",
"expectations",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"1",
"assert",
"\"expect_table_row_count_to_be_between\"",
"in",
"expectations"
] | [
345,
0
] | [
365,
65
] | python | en | ['en', 'error', 'th'] | False |
test_primary_or_compound_key_not_found_in_columns | (cardinality_dataset) |
What does this test do and why?
Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the dataset
|
What does this test do and why?
Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the dataset
| def test_primary_or_compound_key_not_found_in_columns(cardinality_dataset):
"""
What does this test do and why?
Confirms that an error is raised if a primary_or_compound key is specified with a column not found in the dataset
"""
# regular case, should pass
working_profiler = UserConfigurableProfiler(
cardinality_dataset, primary_or_compound_key=["col_unique"]
)
assert working_profiler.primary_or_compound_key == ["col_unique"]
# key includes a non-existent column, should fail
with pytest.raises(ValueError) as e:
bad_key_profiler = UserConfigurableProfiler(
cardinality_dataset,
primary_or_compound_key=["col_unique", "col_that_does_not_exist"],
)
assert e.value.args[0] == (
f"Column col_that_does_not_exist not found. Please ensure that this column is in the PandasDataset if "
f"you would like to use it as a primary_or_compound_key."
)
# key includes a column that exists, but is in ignored_columns, should pass
ignored_column_profiler = UserConfigurableProfiler(
cardinality_dataset,
primary_or_compound_key=["col_unique", "col_one"],
ignored_columns=["col_none", "col_one"],
)
assert ignored_column_profiler.primary_or_compound_key == ["col_unique", "col_one"] | [
"def",
"test_primary_or_compound_key_not_found_in_columns",
"(",
"cardinality_dataset",
")",
":",
"# regular case, should pass",
"working_profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
"]",
")",
"assert",
"working_profiler",
".",
"primary_or_compound_key",
"==",
"[",
"\"col_unique\"",
"]",
"# key includes a non-existent column, should fail",
"with",
"pytest",
".",
"raises",
"(",
"ValueError",
")",
"as",
"e",
":",
"bad_key_profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
",",
"\"col_that_does_not_exist\"",
"]",
",",
")",
"assert",
"e",
".",
"value",
".",
"args",
"[",
"0",
"]",
"==",
"(",
"f\"Column col_that_does_not_exist not found. Please ensure that this column is in the PandasDataset if \"",
"f\"you would like to use it as a primary_or_compound_key.\"",
")",
"# key includes a column that exists, but is in ignored_columns, should pass",
"ignored_column_profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"primary_or_compound_key",
"=",
"[",
"\"col_unique\"",
",",
"\"col_one\"",
"]",
",",
"ignored_columns",
"=",
"[",
"\"col_none\"",
",",
"\"col_one\"",
"]",
",",
")",
"assert",
"ignored_column_profiler",
".",
"primary_or_compound_key",
"==",
"[",
"\"col_unique\"",
",",
"\"col_one\"",
"]"
] | [
368,
0
] | [
396,
87
] | python | en | ['en', 'error', 'th'] | False |
test_config_with_not_null_only | (nulls_dataset, possible_expectations_set) |
What does this test do and why?
Confirms that the not_null_only key in config works as expected.
|
What does this test do and why?
Confirms that the not_null_only key in config works as expected.
| def test_config_with_not_null_only(nulls_dataset, possible_expectations_set):
"""
What does this test do and why?
Confirms that the not_null_only key in config works as expected.
"""
excluded_expectations = [i for i in possible_expectations_set if "null" not in i]
batch_df = nulls_dataset
profiler_without_not_null_only = UserConfigurableProfiler(
batch_df, excluded_expectations, not_null_only=False
)
suite_without_not_null_only = profiler_without_not_null_only.build_suite()
_, expectations = get_set_of_columns_and_expectations_from_suite(
suite_without_not_null_only
)
assert expectations == {
"expect_column_values_to_be_null",
"expect_column_values_to_not_be_null",
}
profiler_with_not_null_only = UserConfigurableProfiler(
batch_df, excluded_expectations, not_null_only=True
)
not_null_only_suite = profiler_with_not_null_only.build_suite()
_, expectations = get_set_of_columns_and_expectations_from_suite(
not_null_only_suite
)
assert expectations == {"expect_column_values_to_not_be_null"}
no_config_profiler = UserConfigurableProfiler(batch_df)
no_config_suite = no_config_profiler.build_suite()
_, expectations = get_set_of_columns_and_expectations_from_suite(no_config_suite)
assert "expect_column_values_to_be_null" in expectations | [
"def",
"test_config_with_not_null_only",
"(",
"nulls_dataset",
",",
"possible_expectations_set",
")",
":",
"excluded_expectations",
"=",
"[",
"i",
"for",
"i",
"in",
"possible_expectations_set",
"if",
"\"null\"",
"not",
"in",
"i",
"]",
"batch_df",
"=",
"nulls_dataset",
"profiler_without_not_null_only",
"=",
"UserConfigurableProfiler",
"(",
"batch_df",
",",
"excluded_expectations",
",",
"not_null_only",
"=",
"False",
")",
"suite_without_not_null_only",
"=",
"profiler_without_not_null_only",
".",
"build_suite",
"(",
")",
"_",
",",
"expectations",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite_without_not_null_only",
")",
"assert",
"expectations",
"==",
"{",
"\"expect_column_values_to_be_null\"",
",",
"\"expect_column_values_to_not_be_null\"",
",",
"}",
"profiler_with_not_null_only",
"=",
"UserConfigurableProfiler",
"(",
"batch_df",
",",
"excluded_expectations",
",",
"not_null_only",
"=",
"True",
")",
"not_null_only_suite",
"=",
"profiler_with_not_null_only",
".",
"build_suite",
"(",
")",
"_",
",",
"expectations",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"not_null_only_suite",
")",
"assert",
"expectations",
"==",
"{",
"\"expect_column_values_to_not_be_null\"",
"}",
"no_config_profiler",
"=",
"UserConfigurableProfiler",
"(",
"batch_df",
")",
"no_config_suite",
"=",
"no_config_profiler",
".",
"build_suite",
"(",
")",
"_",
",",
"expectations",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"no_config_suite",
")",
"assert",
"\"expect_column_values_to_be_null\"",
"in",
"expectations"
] | [
399,
0
] | [
433,
60
] | python | en | ['en', 'error', 'th'] | False |
test_profiled_dataset_passes_own_validation | (
cardinality_dataset, titanic_data_context
) |
What does this test do and why?
Confirms that a suite created on a dataset with no config will pass when validated against itself
|
What does this test do and why?
Confirms that a suite created on a dataset with no config will pass when validated against itself
| def test_profiled_dataset_passes_own_validation(
cardinality_dataset, titanic_data_context
):
"""
What does this test do and why?
Confirms that a suite created on a dataset with no config will pass when validated against itself
"""
context = titanic_data_context
profiler = UserConfigurableProfiler(
cardinality_dataset, ignored_columns=["col_none"]
)
suite = profiler.build_suite()
context.save_expectation_suite(suite)
results = context.run_validation_operator(
"action_list_operator", assets_to_validate=[cardinality_dataset]
)
assert results["success"] | [
"def",
"test_profiled_dataset_passes_own_validation",
"(",
"cardinality_dataset",
",",
"titanic_data_context",
")",
":",
"context",
"=",
"titanic_data_context",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"cardinality_dataset",
",",
"ignored_columns",
"=",
"[",
"\"col_none\"",
"]",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"context",
".",
"save_expectation_suite",
"(",
"suite",
")",
"results",
"=",
"context",
".",
"run_validation_operator",
"(",
"\"action_list_operator\"",
",",
"assets_to_validate",
"=",
"[",
"cardinality_dataset",
"]",
")",
"assert",
"results",
"[",
"\"success\"",
"]"
] | [
452,
0
] | [
470,
29
] | python | en | ['en', 'error', 'th'] | False |
test_profiler_all_expectation_types | (
titanic_data_context, possible_expectations_set
) |
What does this test do and why?
Ensures that all available expectation types work as expected
|
What does this test do and why?
Ensures that all available expectation types work as expected
| def test_profiler_all_expectation_types(
titanic_data_context, possible_expectations_set
):
"""
What does this test do and why?
Ensures that all available expectation types work as expected
"""
context = titanic_data_context
df = ge.read_csv(
file_relative_path(
__file__,
"../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv",
)
)
batch_df = ge.dataset.PandasDataset(df)
ignored_columns = [
"pickup_location_id",
"dropoff_location_id",
"fare_amount",
"extra",
"mta_tax",
"tip_amount",
"tolls_amount",
"improvement_surcharge",
"congestion_surcharge",
]
semantic_types = {
"datetime": ["pickup_datetime", "dropoff_datetime"],
"numeric": ["total_amount", "passenger_count"],
"value_set": [
"payment_type",
"rate_code_id",
"store_and_fwd_flag",
"passenger_count",
],
"boolean": ["store_and_fwd_flag"],
}
profiler = UserConfigurableProfiler(
batch_df,
semantic_types_dict=semantic_types,
ignored_columns=ignored_columns,
primary_or_compound_key=[
"vendor_id",
"pickup_datetime",
"dropoff_datetime",
"trip_distance",
"pickup_location_id",
"dropoff_location_id",
],
)
assert profiler.column_info.get("rate_code_id")
suite = profiler.build_suite()
assert len(suite.expectations) == 46
(
columns_with_expectations,
expectations_from_suite,
) = get_set_of_columns_and_expectations_from_suite(suite)
unexpected_expectations = {
"expect_column_values_to_be_unique",
"expect_column_values_to_be_null",
}
assert expectations_from_suite == {
i for i in possible_expectations_set if i not in unexpected_expectations
}
ignored_included_columns_overlap = [
i for i in columns_with_expectations if i in ignored_columns
]
assert len(ignored_included_columns_overlap) == 0
results = context.run_validation_operator(
"action_list_operator", assets_to_validate=[batch_df]
)
assert results["success"] | [
"def",
"test_profiler_all_expectation_types",
"(",
"titanic_data_context",
",",
"possible_expectations_set",
")",
":",
"context",
"=",
"titanic_data_context",
"df",
"=",
"ge",
".",
"read_csv",
"(",
"file_relative_path",
"(",
"__file__",
",",
"\"../test_sets/taxi_yellow_trip_data_samples/yellow_trip_data_sample_2019-01.csv\"",
",",
")",
")",
"batch_df",
"=",
"ge",
".",
"dataset",
".",
"PandasDataset",
"(",
"df",
")",
"ignored_columns",
"=",
"[",
"\"pickup_location_id\"",
",",
"\"dropoff_location_id\"",
",",
"\"fare_amount\"",
",",
"\"extra\"",
",",
"\"mta_tax\"",
",",
"\"tip_amount\"",
",",
"\"tolls_amount\"",
",",
"\"improvement_surcharge\"",
",",
"\"congestion_surcharge\"",
",",
"]",
"semantic_types",
"=",
"{",
"\"datetime\"",
":",
"[",
"\"pickup_datetime\"",
",",
"\"dropoff_datetime\"",
"]",
",",
"\"numeric\"",
":",
"[",
"\"total_amount\"",
",",
"\"passenger_count\"",
"]",
",",
"\"value_set\"",
":",
"[",
"\"payment_type\"",
",",
"\"rate_code_id\"",
",",
"\"store_and_fwd_flag\"",
",",
"\"passenger_count\"",
",",
"]",
",",
"\"boolean\"",
":",
"[",
"\"store_and_fwd_flag\"",
"]",
",",
"}",
"profiler",
"=",
"UserConfigurableProfiler",
"(",
"batch_df",
",",
"semantic_types_dict",
"=",
"semantic_types",
",",
"ignored_columns",
"=",
"ignored_columns",
",",
"primary_or_compound_key",
"=",
"[",
"\"vendor_id\"",
",",
"\"pickup_datetime\"",
",",
"\"dropoff_datetime\"",
",",
"\"trip_distance\"",
",",
"\"pickup_location_id\"",
",",
"\"dropoff_location_id\"",
",",
"]",
",",
")",
"assert",
"profiler",
".",
"column_info",
".",
"get",
"(",
"\"rate_code_id\"",
")",
"suite",
"=",
"profiler",
".",
"build_suite",
"(",
")",
"assert",
"len",
"(",
"suite",
".",
"expectations",
")",
"==",
"46",
"(",
"columns_with_expectations",
",",
"expectations_from_suite",
",",
")",
"=",
"get_set_of_columns_and_expectations_from_suite",
"(",
"suite",
")",
"unexpected_expectations",
"=",
"{",
"\"expect_column_values_to_be_unique\"",
",",
"\"expect_column_values_to_be_null\"",
",",
"}",
"assert",
"expectations_from_suite",
"==",
"{",
"i",
"for",
"i",
"in",
"possible_expectations_set",
"if",
"i",
"not",
"in",
"unexpected_expectations",
"}",
"ignored_included_columns_overlap",
"=",
"[",
"i",
"for",
"i",
"in",
"columns_with_expectations",
"if",
"i",
"in",
"ignored_columns",
"]",
"assert",
"len",
"(",
"ignored_included_columns_overlap",
")",
"==",
"0",
"results",
"=",
"context",
".",
"run_validation_operator",
"(",
"\"action_list_operator\"",
",",
"assets_to_validate",
"=",
"[",
"batch_df",
"]",
")",
"assert",
"results",
"[",
"\"success\"",
"]"
] | [
473,
0
] | [
551,
29
] | python | en | ['en', 'error', 'th'] | False |
regression_errors | (y, y_hat, smoothing_window=0.01, smooth=True) | Compute an array of absolute errors comparing predictions and expected output.
If smooth is True, apply EWMA to the resulting array of errors.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
smoothing_window (float):
Optional. Size of the smoothing window, expressed as a proportion of the total
length of y. If not given, 0.01 is used.
smooth (bool):
Optional. Indicates whether the returned errors should be smoothed with EWMA.
If not given, `True` is used.
Returns:
ndarray:
Array of errors.
| Compute an array of absolute errors comparing predictions and expected output. | def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True):
"""Compute an array of absolute errors comparing predictions and expected output.
If smooth is True, apply EWMA to the resulting array of errors.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
smoothing_window (float):
Optional. Size of the smoothing window, expressed as a proportion of the total
length of y. If not given, 0.01 is used.
smooth (bool):
Optional. Indicates whether the returned errors should be smoothed with EWMA.
If not given, `True` is used.
Returns:
ndarray:
Array of errors.
"""
errors = np.abs(y - y_hat)[:, 0]
if not smooth:
return errors
smoothing_window = int(smoothing_window * len(y))
return pd.Series(errors).ewm(span=smoothing_window).mean().values | [
"def",
"regression_errors",
"(",
"y",
",",
"y_hat",
",",
"smoothing_window",
"=",
"0.01",
",",
"smooth",
"=",
"True",
")",
":",
"errors",
"=",
"np",
".",
"abs",
"(",
"y",
"-",
"y_hat",
")",
"[",
":",
",",
"0",
"]",
"if",
"not",
"smooth",
":",
"return",
"errors",
"smoothing_window",
"=",
"int",
"(",
"smoothing_window",
"*",
"len",
"(",
"y",
")",
")",
"return",
"pd",
".",
"Series",
"(",
"errors",
")",
".",
"ewm",
"(",
"span",
"=",
"smoothing_window",
")",
".",
"mean",
"(",
")",
".",
"values"
] | [
12,
0
] | [
40,
69
] | python | en | ['en', 'en', 'en'] | True |
_point_wise_error | (y, y_hat) | Compute point-wise error between predicted and expected values.
The computed error is calculated as the difference between predicted
and expected values with a rolling smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
Returns:
ndarray:
An array of smoothed point-wise error.
| Compute point-wise error between predicted and expected values. | def _point_wise_error(y, y_hat):
"""Compute point-wise error between predicted and expected values.
The computed error is calculated as the difference between predicted
and expected values with a rolling smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
Returns:
ndarray:
An array of smoothed point-wise error.
"""
return abs(y - y_hat) | [
"def",
"_point_wise_error",
"(",
"y",
",",
"y_hat",
")",
":",
"return",
"abs",
"(",
"y",
"-",
"y_hat",
")"
] | [
43,
0
] | [
59,
25
] | python | en | ['en', 'en', 'en'] | True |
_area_error | (y, y_hat, score_window=10) | Compute area error between predicted and expected values.
The computed error is calculated as the area difference between predicted
and expected values with a smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
Returns:
ndarray:
An array of area error.
| Compute area error between predicted and expected values. | def _area_error(y, y_hat, score_window=10):
"""Compute area error between predicted and expected values.
The computed error is calculated as the area difference between predicted
and expected values with a smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
Returns:
ndarray:
An array of area error.
"""
smooth_y = pd.Series(y).rolling(
score_window, center=True, min_periods=score_window // 2).apply(integrate.trapz)
smooth_y_hat = pd.Series(y_hat).rolling(
score_window, center=True, min_periods=score_window // 2).apply(integrate.trapz)
errors = abs(smooth_y - smooth_y_hat)
return errors | [
"def",
"_area_error",
"(",
"y",
",",
"y_hat",
",",
"score_window",
"=",
"10",
")",
":",
"smooth_y",
"=",
"pd",
".",
"Series",
"(",
"y",
")",
".",
"rolling",
"(",
"score_window",
",",
"center",
"=",
"True",
",",
"min_periods",
"=",
"score_window",
"//",
"2",
")",
".",
"apply",
"(",
"integrate",
".",
"trapz",
")",
"smooth_y_hat",
"=",
"pd",
".",
"Series",
"(",
"y_hat",
")",
".",
"rolling",
"(",
"score_window",
",",
"center",
"=",
"True",
",",
"min_periods",
"=",
"score_window",
"//",
"2",
")",
".",
"apply",
"(",
"integrate",
".",
"trapz",
")",
"errors",
"=",
"abs",
"(",
"smooth_y",
"-",
"smooth_y_hat",
")",
"return",
"errors"
] | [
62,
0
] | [
88,
17
] | python | en | ['en', 'en', 'en'] | True |
_dtw_error | (y, y_hat, score_window=10) | Compute dtw error between predicted and expected values.
The computed error is calculated as the dynamic time warping distance
between predicted and expected values with a smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
Returns:
ndarray:
An array of dtw error.
| Compute dtw error between predicted and expected values. | def _dtw_error(y, y_hat, score_window=10):
"""Compute dtw error between predicted and expected values.
The computed error is calculated as the dynamic time warping distance
between predicted and expected values with a smoothing factor.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
Returns:
ndarray:
An array of dtw error.
"""
length_dtw = (score_window // 2) * 2 + 1
half_length_dtw = length_dtw // 2
# add padding
y_pad = np.pad(y, (half_length_dtw, half_length_dtw),
'constant', constant_values=(0, 0))
y_hat_pad = np.pad(y_hat, (half_length_dtw, half_length_dtw),
'constant', constant_values=(0, 0))
i = 0
similarity_dtw = list()
while i < len(y) - length_dtw:
true_data = y_pad[i:i + length_dtw]
true_data = true_data.flatten()
pred_data = y_hat_pad[i:i + length_dtw]
pred_data = pred_data.flatten()
dist = dtw(true_data, pred_data)
similarity_dtw.append(dist)
i += 1
errors = ([0] * half_length_dtw + similarity_dtw +
[0] * (len(y) - len(similarity_dtw) - half_length_dtw))
return errors | [
"def",
"_dtw_error",
"(",
"y",
",",
"y_hat",
",",
"score_window",
"=",
"10",
")",
":",
"length_dtw",
"=",
"(",
"score_window",
"//",
"2",
")",
"*",
"2",
"+",
"1",
"half_length_dtw",
"=",
"length_dtw",
"//",
"2",
"# add padding",
"y_pad",
"=",
"np",
".",
"pad",
"(",
"y",
",",
"(",
"half_length_dtw",
",",
"half_length_dtw",
")",
",",
"'constant'",
",",
"constant_values",
"=",
"(",
"0",
",",
"0",
")",
")",
"y_hat_pad",
"=",
"np",
".",
"pad",
"(",
"y_hat",
",",
"(",
"half_length_dtw",
",",
"half_length_dtw",
")",
",",
"'constant'",
",",
"constant_values",
"=",
"(",
"0",
",",
"0",
")",
")",
"i",
"=",
"0",
"similarity_dtw",
"=",
"list",
"(",
")",
"while",
"i",
"<",
"len",
"(",
"y",
")",
"-",
"length_dtw",
":",
"true_data",
"=",
"y_pad",
"[",
"i",
":",
"i",
"+",
"length_dtw",
"]",
"true_data",
"=",
"true_data",
".",
"flatten",
"(",
")",
"pred_data",
"=",
"y_hat_pad",
"[",
"i",
":",
"i",
"+",
"length_dtw",
"]",
"pred_data",
"=",
"pred_data",
".",
"flatten",
"(",
")",
"dist",
"=",
"dtw",
"(",
"true_data",
",",
"pred_data",
")",
"similarity_dtw",
".",
"append",
"(",
"dist",
")",
"i",
"+=",
"1",
"errors",
"=",
"(",
"[",
"0",
"]",
"*",
"half_length_dtw",
"+",
"similarity_dtw",
"+",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"y",
")",
"-",
"len",
"(",
"similarity_dtw",
")",
"-",
"half_length_dtw",
")",
")",
"return",
"errors"
] | [
91,
0
] | [
135,
17
] | python | en | ['en', 'en', 'en'] | True |
reconstruction_errors | (y, y_hat, step_size=1, score_window=10, smoothing_window=0.01,
smooth=True, rec_error_type='point') | Compute an array of reconstruction errors.
Compute the discrepancies between the expected and the
predicted values according to the reconstruction error type.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values. Each timestamp has multiple predictions.
step_size (int):
Optional. Indicating the number of steps between windows in the predicted values.
If not given, 1 is used.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
smoothing_window (float or int):
Optional. Size of the smoothing window, when float it is expressed as a proportion
of the total length of y. If not given, 0.01 is used.
smooth (bool):
Optional. Indicates whether the returned errors should be smoothed.
If not given, `True` is used.
rec_error_type (str):
Optional. Reconstruction error types ``["point", "area", "dtw"]``.
If not given, "point" is used.
Returns:
ndarray:
Array of reconstruction errors.
| Compute an array of reconstruction errors. | def reconstruction_errors(y, y_hat, step_size=1, score_window=10, smoothing_window=0.01,
smooth=True, rec_error_type='point'):
"""Compute an array of reconstruction errors.
Compute the discrepancies between the expected and the
predicted values according to the reconstruction error type.
Args:
y (ndarray):
Ground truth.
y_hat (ndarray):
Predicted values. Each timestamp has multiple predictions.
step_size (int):
Optional. Indicating the number of steps between windows in the predicted values.
If not given, 1 is used.
score_window (int):
Optional. Size of the window over which the scores are calculated.
If not given, 10 is used.
smoothing_window (float or int):
Optional. Size of the smoothing window, when float it is expressed as a proportion
of the total length of y. If not given, 0.01 is used.
smooth (bool):
Optional. Indicates whether the returned errors should be smoothed.
If not given, `True` is used.
rec_error_type (str):
Optional. Reconstruction error types ``["point", "area", "dtw"]``.
If not given, "point" is used.
Returns:
ndarray:
Array of reconstruction errors.
"""
if isinstance(smoothing_window, float):
smoothing_window = min(math.trunc(len(y) * smoothing_window), 200)
true = [item[0] for item in y.reshape((y.shape[0], -1))]
for item in y[-1][1:]:
true.extend(item)
predictions = []
predictions_vs = []
pred_length = y_hat.shape[1]
num_errors = y_hat.shape[1] + step_size * (y_hat.shape[0] - 1)
for i in range(num_errors):
intermediate = []
for j in range(max(0, i - num_errors + pred_length), min(i + 1, pred_length)):
intermediate.append(y_hat[i - j, j])
if intermediate:
predictions.append(np.median(np.asarray(intermediate)))
predictions_vs.append([[
np.min(np.asarray(intermediate)),
np.percentile(np.asarray(intermediate), 25),
np.percentile(np.asarray(intermediate), 50),
np.percentile(np.asarray(intermediate), 75),
np.max(np.asarray(intermediate))
]])
true = np.asarray(true)
predictions = np.asarray(predictions)
predictions_vs = np.asarray(predictions_vs)
# Compute reconstruction errors
if rec_error_type.lower() == "point":
errors = _point_wise_error(true, predictions)
elif rec_error_type.lower() == "area":
errors = _area_error(true, predictions, score_window)
elif rec_error_type.lower() == "dtw":
errors = _dtw_error(true, predictions, score_window)
# Apply smoothing
if smooth:
errors = pd.Series(errors).rolling(
smoothing_window, center=True, min_periods=smoothing_window // 2).mean().values
return errors, predictions_vs | [
"def",
"reconstruction_errors",
"(",
"y",
",",
"y_hat",
",",
"step_size",
"=",
"1",
",",
"score_window",
"=",
"10",
",",
"smoothing_window",
"=",
"0.01",
",",
"smooth",
"=",
"True",
",",
"rec_error_type",
"=",
"'point'",
")",
":",
"if",
"isinstance",
"(",
"smoothing_window",
",",
"float",
")",
":",
"smoothing_window",
"=",
"min",
"(",
"math",
".",
"trunc",
"(",
"len",
"(",
"y",
")",
"*",
"smoothing_window",
")",
",",
"200",
")",
"true",
"=",
"[",
"item",
"[",
"0",
"]",
"for",
"item",
"in",
"y",
".",
"reshape",
"(",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
",",
"-",
"1",
")",
")",
"]",
"for",
"item",
"in",
"y",
"[",
"-",
"1",
"]",
"[",
"1",
":",
"]",
":",
"true",
".",
"extend",
"(",
"item",
")",
"predictions",
"=",
"[",
"]",
"predictions_vs",
"=",
"[",
"]",
"pred_length",
"=",
"y_hat",
".",
"shape",
"[",
"1",
"]",
"num_errors",
"=",
"y_hat",
".",
"shape",
"[",
"1",
"]",
"+",
"step_size",
"*",
"(",
"y_hat",
".",
"shape",
"[",
"0",
"]",
"-",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"num_errors",
")",
":",
"intermediate",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"max",
"(",
"0",
",",
"i",
"-",
"num_errors",
"+",
"pred_length",
")",
",",
"min",
"(",
"i",
"+",
"1",
",",
"pred_length",
")",
")",
":",
"intermediate",
".",
"append",
"(",
"y_hat",
"[",
"i",
"-",
"j",
",",
"j",
"]",
")",
"if",
"intermediate",
":",
"predictions",
".",
"append",
"(",
"np",
".",
"median",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
")",
")",
"predictions_vs",
".",
"append",
"(",
"[",
"[",
"np",
".",
"min",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
")",
",",
"np",
".",
"percentile",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
",",
"25",
")",
",",
"np",
".",
"percentile",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
",",
"50",
")",
",",
"np",
".",
"percentile",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
",",
"75",
")",
",",
"np",
".",
"max",
"(",
"np",
".",
"asarray",
"(",
"intermediate",
")",
")",
"]",
"]",
")",
"true",
"=",
"np",
".",
"asarray",
"(",
"true",
")",
"predictions",
"=",
"np",
".",
"asarray",
"(",
"predictions",
")",
"predictions_vs",
"=",
"np",
".",
"asarray",
"(",
"predictions_vs",
")",
"# Compute reconstruction errors",
"if",
"rec_error_type",
".",
"lower",
"(",
")",
"==",
"\"point\"",
":",
"errors",
"=",
"_point_wise_error",
"(",
"true",
",",
"predictions",
")",
"elif",
"rec_error_type",
".",
"lower",
"(",
")",
"==",
"\"area\"",
":",
"errors",
"=",
"_area_error",
"(",
"true",
",",
"predictions",
",",
"score_window",
")",
"elif",
"rec_error_type",
".",
"lower",
"(",
")",
"==",
"\"dtw\"",
":",
"errors",
"=",
"_dtw_error",
"(",
"true",
",",
"predictions",
",",
"score_window",
")",
"# Apply smoothing",
"if",
"smooth",
":",
"errors",
"=",
"pd",
".",
"Series",
"(",
"errors",
")",
".",
"rolling",
"(",
"smoothing_window",
",",
"center",
"=",
"True",
",",
"min_periods",
"=",
"smoothing_window",
"//",
"2",
")",
".",
"mean",
"(",
")",
".",
"values",
"return",
"errors",
",",
"predictions_vs"
] | [
138,
0
] | [
217,
33
] | python | en | ['en', 'en', 'en'] | True |
mpjpe | (predicted, target) |
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
|
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
| def mpjpe(predicted, target):
"""
Mean per-joint position error (i.e. mean Euclidean distance),
often referred to as "Protocol #1" in many papers.
"""
assert predicted.shape == target.shape
#l2_error = torch.mean(torch.norm((predicted - target), dim=len(target.shape) - 1), -1).squeeze()
#print('each joint error:', torch.norm((predicted - target), dim=len(target.shape) - 1))
#index = np.where(l2_error.cpu().detach().numpy() > 0.3) # mean body l2 distance larger than 300mm
#value = l2_error[l2_error > 0.3]
#print('Index of mean body l2 distance larger than 300mm', index, value)
return torch.mean(torch.norm((predicted - target), dim=len(target.shape) - 1)) | [
"def",
"mpjpe",
"(",
"predicted",
",",
"target",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"#l2_error = torch.mean(torch.norm((predicted - target), dim=len(target.shape) - 1), -1).squeeze()",
"#print('each joint error:', torch.norm((predicted - target), dim=len(target.shape) - 1))",
"#index = np.where(l2_error.cpu().detach().numpy() > 0.3) # mean body l2 distance larger than 300mm",
"#value = l2_error[l2_error > 0.3]",
"#print('Index of mean body l2 distance larger than 300mm', index, value)",
"return",
"torch",
".",
"mean",
"(",
"torch",
".",
"norm",
"(",
"(",
"predicted",
"-",
"target",
")",
",",
"dim",
"=",
"len",
"(",
"target",
".",
"shape",
")",
"-",
"1",
")",
")"
] | [
13,
0
] | [
24,
82
] | python | en | ['en', 'error', 'th'] | False |
mpjae | (predicted, target) |
Mean per-joint angle error (3d bone vector angle error between gt and predicted one)
|
Mean per-joint angle error (3d bone vector angle error between gt and predicted one)
| def mpjae(predicted, target):
"""
Mean per-joint angle error (3d bone vector angle error between gt and predicted one)
"""
assert predicted.shape == target.shape # [B,T, K]
joint_error = torch.mean(torch.abs(predicted - target).cuda(), dim=0) # Calculate each joint angle
print('each bone angle error:', joint_error)
return torch.mean(joint_error) | [
"def",
"mpjae",
"(",
"predicted",
",",
"target",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"# [B,T, K]",
"joint_error",
"=",
"torch",
".",
"mean",
"(",
"torch",
".",
"abs",
"(",
"predicted",
"-",
"target",
")",
".",
"cuda",
"(",
")",
",",
"dim",
"=",
"0",
")",
"# Calculate each joint angle",
"print",
"(",
"'each bone angle error:'",
",",
"joint_error",
")",
"return",
"torch",
".",
"mean",
"(",
"joint_error",
")"
] | [
27,
0
] | [
34,
34
] | python | en | ['en', 'error', 'th'] | False |
mpjpe_smooth | (predicted, target, threshold, mi, L1) |
Referred in triangulation 3d pose paper
|
Referred in triangulation 3d pose paper
| def mpjpe_smooth(predicted, target, threshold, mi, L1):
"""
Referred in triangulation 3d pose paper
"""
assert predicted.shape == target.shape
if L1:
diff_norm = torch.abs((predicted - target), dim=len(target.shape) - 1)
diff = diff_norm.clone()
else: # MSE
diff = (predicted - target) ** 2
diff[diff > threshold] = torch.pow(diff[diff > threshold], mi) * (threshold ** (1 - mi))
loss = torch.mean(diff)
return loss | [
"def",
"mpjpe_smooth",
"(",
"predicted",
",",
"target",
",",
"threshold",
",",
"mi",
",",
"L1",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"if",
"L1",
":",
"diff_norm",
"=",
"torch",
".",
"abs",
"(",
"(",
"predicted",
"-",
"target",
")",
",",
"dim",
"=",
"len",
"(",
"target",
".",
"shape",
")",
"-",
"1",
")",
"diff",
"=",
"diff_norm",
".",
"clone",
"(",
")",
"else",
":",
"# MSE",
"diff",
"=",
"(",
"predicted",
"-",
"target",
")",
"**",
"2",
"diff",
"[",
"diff",
">",
"threshold",
"]",
"=",
"torch",
".",
"pow",
"(",
"diff",
"[",
"diff",
">",
"threshold",
"]",
",",
"mi",
")",
"*",
"(",
"threshold",
"**",
"(",
"1",
"-",
"mi",
")",
")",
"loss",
"=",
"torch",
".",
"mean",
"(",
"diff",
")",
"return",
"loss"
] | [
40,
0
] | [
52,
15
] | python | en | ['en', 'error', 'th'] | False |
p_mpjpe | (predicted, target) |
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
|
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
| def p_mpjpe(predicted, target):
"""
Pose error: MPJPE after rigid alignment (scale, rotation, and translation),
often referred to as "Protocol #2" in many papers.
"""
assert predicted.shape == target.shape # (3071, 17, 3)
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
# Remove scale
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
# print('target',normX,'predice',normY)
X0 /= (normX + 1e-8)
if normY.any() == 0:
normY = normY + 1e-8
Y0 /= (normY + 1e-8)
# Optimum rotation matrix of Y0
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1)) # Rotation
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY # Scale
t = muX - a * np.matmul(muY, R) # Translation
# Standarized Distance between X0 and a*Y0*R+c
d = 1 - tr ** 2
# Perform rigid transformation on the input
predicted_aligned = a * np.matmul(predicted, R)
trans_aligned = predicted_aligned + t
error = np.mean(np.linalg.norm(trans_aligned - target, axis=len(target.shape) - 1))
# Return MPJPE
return error, torch.from_numpy(trans_aligned).unsqueeze(dim=0).cuda() | [
"def",
"p_mpjpe",
"(",
"predicted",
",",
"target",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"# (3071, 17, 3)",
"muX",
"=",
"np",
".",
"mean",
"(",
"target",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"muY",
"=",
"np",
".",
"mean",
"(",
"predicted",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"X0",
"=",
"target",
"-",
"muX",
"Y0",
"=",
"predicted",
"-",
"muY",
"# Remove scale",
"normX",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"X0",
"**",
"2",
",",
"axis",
"=",
"(",
"1",
",",
"2",
")",
",",
"keepdims",
"=",
"True",
")",
")",
"normY",
"=",
"np",
".",
"sqrt",
"(",
"np",
".",
"sum",
"(",
"Y0",
"**",
"2",
",",
"axis",
"=",
"(",
"1",
",",
"2",
")",
",",
"keepdims",
"=",
"True",
")",
")",
"# print('target',normX,'predice',normY)",
"X0",
"/=",
"(",
"normX",
"+",
"1e-8",
")",
"if",
"normY",
".",
"any",
"(",
")",
"==",
"0",
":",
"normY",
"=",
"normY",
"+",
"1e-8",
"Y0",
"/=",
"(",
"normY",
"+",
"1e-8",
")",
"# Optimum rotation matrix of Y0",
"H",
"=",
"np",
".",
"matmul",
"(",
"X0",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"1",
")",
",",
"Y0",
")",
"U",
",",
"s",
",",
"Vt",
"=",
"np",
".",
"linalg",
".",
"svd",
"(",
"H",
")",
"V",
"=",
"Vt",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"1",
")",
"R",
"=",
"np",
".",
"matmul",
"(",
"V",
",",
"U",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"1",
")",
")",
"# Rotation",
"# Avoid improper rotations (reflections), i.e. rotations with det(R) = -1",
"sign_detR",
"=",
"np",
".",
"sign",
"(",
"np",
".",
"expand_dims",
"(",
"np",
".",
"linalg",
".",
"det",
"(",
"R",
")",
",",
"axis",
"=",
"1",
")",
")",
"V",
"[",
":",
",",
":",
",",
"-",
"1",
"]",
"*=",
"sign_detR",
"s",
"[",
":",
",",
"-",
"1",
"]",
"*=",
"sign_detR",
".",
"flatten",
"(",
")",
"R",
"=",
"np",
".",
"matmul",
"(",
"V",
",",
"U",
".",
"transpose",
"(",
"0",
",",
"2",
",",
"1",
")",
")",
"# Rotation",
"tr",
"=",
"np",
".",
"expand_dims",
"(",
"np",
".",
"sum",
"(",
"s",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
",",
"axis",
"=",
"2",
")",
"a",
"=",
"tr",
"*",
"normX",
"/",
"normY",
"# Scale",
"t",
"=",
"muX",
"-",
"a",
"*",
"np",
".",
"matmul",
"(",
"muY",
",",
"R",
")",
"# Translation",
"# Standarized Distance between X0 and a*Y0*R+c",
"d",
"=",
"1",
"-",
"tr",
"**",
"2",
"# Perform rigid transformation on the input",
"predicted_aligned",
"=",
"a",
"*",
"np",
".",
"matmul",
"(",
"predicted",
",",
"R",
")",
"trans_aligned",
"=",
"predicted_aligned",
"+",
"t",
"error",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"trans_aligned",
"-",
"target",
",",
"axis",
"=",
"len",
"(",
"target",
".",
"shape",
")",
"-",
"1",
")",
")",
"# Return MPJPE",
"return",
"error",
",",
"torch",
".",
"from_numpy",
"(",
"trans_aligned",
")",
".",
"unsqueeze",
"(",
"dim",
"=",
"0",
")",
".",
"cuda",
"(",
")"
] | [
185,
0
] | [
231,
73
] | python | en | ['en', 'error', 'th'] | False |
n_mpjpe | (predicted, target) |
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
|
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
| def n_mpjpe(predicted, target):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert predicted.shape == target.shape # [1, 1703, 17, 3]
norm_predicted = torch.mean(torch.sum(predicted ** 2, dim=3, keepdim=True), dim=2, keepdim=True)
norm_target = torch.mean(torch.sum(target * predicted, dim=3, keepdim=True), dim=2, keepdim=True)
scale = norm_target / norm_predicted
out = torch.mean(torch.norm((scale * predicted - target), dim=len(target.shape) - 1))
return out | [
"def",
"n_mpjpe",
"(",
"predicted",
",",
"target",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"# [1, 1703, 17, 3]",
"norm_predicted",
"=",
"torch",
".",
"mean",
"(",
"torch",
".",
"sum",
"(",
"predicted",
"**",
"2",
",",
"dim",
"=",
"3",
",",
"keepdim",
"=",
"True",
")",
",",
"dim",
"=",
"2",
",",
"keepdim",
"=",
"True",
")",
"norm_target",
"=",
"torch",
".",
"mean",
"(",
"torch",
".",
"sum",
"(",
"target",
"*",
"predicted",
",",
"dim",
"=",
"3",
",",
"keepdim",
"=",
"True",
")",
",",
"dim",
"=",
"2",
",",
"keepdim",
"=",
"True",
")",
"scale",
"=",
"norm_target",
"/",
"norm_predicted",
"out",
"=",
"torch",
".",
"mean",
"(",
"torch",
".",
"norm",
"(",
"(",
"scale",
"*",
"predicted",
"-",
"target",
")",
",",
"dim",
"=",
"len",
"(",
"target",
".",
"shape",
")",
"-",
"1",
")",
")",
"return",
"out"
] | [
234,
0
] | [
244,
14
] | python | en | ['en', 'error', 'th'] | False |
mean_velocity_error | (predicted, target) |
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
|
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
| def mean_velocity_error(predicted, target):
"""
Mean per-joint velocity error (i.e. mean Euclidean distance of the 1st derivative)
"""
assert predicted.shape == target.shape
velocity_predicted = np.diff(predicted, axis=0)
velocity_target = np.diff(target, axis=0)
return np.mean(np.linalg.norm(velocity_predicted - velocity_target, axis=len(target.shape) - 1)) | [
"def",
"mean_velocity_error",
"(",
"predicted",
",",
"target",
")",
":",
"assert",
"predicted",
".",
"shape",
"==",
"target",
".",
"shape",
"velocity_predicted",
"=",
"np",
".",
"diff",
"(",
"predicted",
",",
"axis",
"=",
"0",
")",
"velocity_target",
"=",
"np",
".",
"diff",
"(",
"target",
",",
"axis",
"=",
"0",
")",
"return",
"np",
".",
"mean",
"(",
"np",
".",
"linalg",
".",
"norm",
"(",
"velocity_predicted",
"-",
"velocity_target",
",",
"axis",
"=",
"len",
"(",
"target",
".",
"shape",
")",
"-",
"1",
")",
")"
] | [
247,
0
] | [
254,
100
] | python | en | ['en', 'error', 'th'] | False |
WindowGenerator.example | (self) | Get and cache an example batch of `inputs, labels` for plotting. | Get and cache an example batch of `inputs, labels` for plotting. | def example(self):
"""Get and cache an example batch of `inputs, labels` for plotting."""
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result | [
"def",
"example",
"(",
"self",
")",
":",
"result",
"=",
"getattr",
"(",
"self",
",",
"'_example'",
",",
"None",
")",
"if",
"result",
"is",
"None",
":",
"# No example batch was found, so get one from the `.train` dataset",
"result",
"=",
"next",
"(",
"iter",
"(",
"self",
".",
"train",
")",
")",
"# And cache it for next time",
"self",
".",
"_example",
"=",
"result",
"return",
"result"
] | [
142,
4
] | [
150,
21
] | python | en | ['en', 'en', 'en'] | True |
datasource | () | Datasource operations | Datasource operations | def datasource():
"""Datasource operations"""
pass | [
"def",
"datasource",
"(",
")",
":",
"pass"
] | [
74,
0
] | [
76,
8
] | python | en | ['en', 'en', 'en'] | False |
datasource_new | (directory) | Add a new datasource to the data context. | Add a new datasource to the data context. | def datasource_new(directory):
"""Add a new datasource to the data context."""
context = toolkit.load_data_context_with_error_handling(directory)
datasource_name, data_source_type = add_datasource(context)
if datasource_name:
cli_message(
"A new datasource '{}' was added to your project.".format(datasource_name)
)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.new", success=True
)
else: # no datasource was created
toolkit.send_usage_message(
data_context=context, event="cli.datasource.new", success=False
)
sys.exit(1) | [
"def",
"datasource_new",
"(",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"datasource_name",
",",
"data_source_type",
"=",
"add_datasource",
"(",
"context",
")",
"if",
"datasource_name",
":",
"cli_message",
"(",
"\"A new datasource '{}' was added to your project.\"",
".",
"format",
"(",
"datasource_name",
")",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.new\"",
",",
"success",
"=",
"True",
")",
"else",
":",
"# no datasource was created",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.new\"",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | [
86,
0
] | [
102,
19
] | python | en | ['en', 'en', 'en'] | True |
delete_datasource | (directory, datasource) | Delete the datasource specified as an argument | Delete the datasource specified as an argument | def delete_datasource(directory, datasource):
"""Delete the datasource specified as an argument"""
context = toolkit.load_data_context_with_error_handling(directory)
try:
context.delete_datasource(datasource)
except ValueError:
cli_message(
"<red>{}</red>".format(
"Datasource {} could not be found.".format(datasource)
)
)
sys.exit(1)
try:
context.get_datasource(datasource)
except ValueError:
cli_message("<green>{}</green>".format("Datasource deleted successfully."))
sys.exit(1)
else:
cli_message("<red>{}</red>".format("Datasource not deleted."))
sys.exit(1) | [
"def",
"delete_datasource",
"(",
"directory",
",",
"datasource",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"try",
":",
"context",
".",
"delete_datasource",
"(",
"datasource",
")",
"except",
"ValueError",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"\"Datasource {} could not be found.\"",
".",
"format",
"(",
"datasource",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"context",
".",
"get_datasource",
"(",
"datasource",
")",
"except",
"ValueError",
":",
"cli_message",
"(",
"\"<green>{}</green>\"",
".",
"format",
"(",
"\"Datasource deleted successfully.\"",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"\"Datasource not deleted.\"",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | [
113,
0
] | [
132,
19
] | python | en | ['en', 'en', 'en'] | True |
datasource_list | (directory) | List known datasources. | List known datasources. | def datasource_list(directory):
"""List known datasources."""
context = toolkit.load_data_context_with_error_handling(directory)
datasources = context.list_datasources()
datasource_count = len(datasources)
if datasource_count == 0:
list_intro_string = "No Datasources found"
else:
list_intro_string = _build_datasource_intro_string(datasource_count)
cli_message(list_intro_string)
for datasource in datasources:
cli_message("")
cli_message_dict(datasource)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.list", success=True
) | [
"def",
"datasource_list",
"(",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"datasources",
"=",
"context",
".",
"list_datasources",
"(",
")",
"datasource_count",
"=",
"len",
"(",
"datasources",
")",
"if",
"datasource_count",
"==",
"0",
":",
"list_intro_string",
"=",
"\"No Datasources found\"",
"else",
":",
"list_intro_string",
"=",
"_build_datasource_intro_string",
"(",
"datasource_count",
")",
"cli_message",
"(",
"list_intro_string",
")",
"for",
"datasource",
"in",
"datasources",
":",
"cli_message",
"(",
"\"\"",
")",
"cli_message_dict",
"(",
"datasource",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.list\"",
",",
"success",
"=",
"True",
")"
] | [
142,
0
] | [
160,
5
] | python | en | ['en', 'en', 'en'] | True |
datasource_profile | (
datasource,
batch_kwargs_generator_name,
data_assets,
profile_all_data_assets,
directory,
view,
additional_batch_kwargs,
assume_yes,
) |
Profile a datasource (Experimental)
If the optional data_assets and profile_all_data_assets arguments are not specified, the profiler will check
if the number of data assets in the datasource exceeds the internally defined limit. If it does, it will
prompt the user to either specify the list of data assets to profile or to profile all.
If the limit is not exceeded, the profiler will profile all data assets in the datasource.
|
Profile a datasource (Experimental) | def datasource_profile(
datasource,
batch_kwargs_generator_name,
data_assets,
profile_all_data_assets,
directory,
view,
additional_batch_kwargs,
assume_yes,
):
"""
Profile a datasource (Experimental)
If the optional data_assets and profile_all_data_assets arguments are not specified, the profiler will check
if the number of data assets in the datasource exceeds the internally defined limit. If it does, it will
prompt the user to either specify the list of data assets to profile or to profile all.
If the limit is not exceeded, the profiler will profile all data assets in the datasource.
"""
context = toolkit.load_data_context_with_error_handling(directory)
try:
if additional_batch_kwargs is not None:
# TODO refactor out json load check in suite edit and add here
additional_batch_kwargs = json.loads(additional_batch_kwargs)
# TODO refactor batch load check in suite edit and add here
if datasource is None:
datasources = [
_datasource["name"] for _datasource in context.list_datasources()
]
if not datasources:
cli_message(NO_DATASOURCES_FOUND)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.profile", success=False
)
sys.exit(1)
elif len(datasources) > 1:
cli_message(
"<red>Error: please specify the datasource to profile. "
"Available datasources: " + ", ".join(datasources) + "</red>"
)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.profile", success=False
)
sys.exit(1)
else:
profile_datasource(
context,
datasources[0],
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
open_docs=view,
additional_batch_kwargs=additional_batch_kwargs,
skip_prompt_flag=assume_yes,
)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.profile", success=True
)
else:
profile_datasource(
context,
datasource,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
open_docs=view,
additional_batch_kwargs=additional_batch_kwargs,
skip_prompt_flag=assume_yes,
)
toolkit.send_usage_message(
data_context=context, event="cli.datasource.profile", success=True
)
except Exception as e:
toolkit.send_usage_message(
data_context=context, event="cli.datasource.profile", success=False
)
raise e | [
"def",
"datasource_profile",
"(",
"datasource",
",",
"batch_kwargs_generator_name",
",",
"data_assets",
",",
"profile_all_data_assets",
",",
"directory",
",",
"view",
",",
"additional_batch_kwargs",
",",
"assume_yes",
",",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"try",
":",
"if",
"additional_batch_kwargs",
"is",
"not",
"None",
":",
"# TODO refactor out json load check in suite edit and add here",
"additional_batch_kwargs",
"=",
"json",
".",
"loads",
"(",
"additional_batch_kwargs",
")",
"# TODO refactor batch load check in suite edit and add here",
"if",
"datasource",
"is",
"None",
":",
"datasources",
"=",
"[",
"_datasource",
"[",
"\"name\"",
"]",
"for",
"_datasource",
"in",
"context",
".",
"list_datasources",
"(",
")",
"]",
"if",
"not",
"datasources",
":",
"cli_message",
"(",
"NO_DATASOURCES_FOUND",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.profile\"",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"len",
"(",
"datasources",
")",
">",
"1",
":",
"cli_message",
"(",
"\"<red>Error: please specify the datasource to profile. \"",
"\"Available datasources: \"",
"+",
"\", \"",
".",
"join",
"(",
"datasources",
")",
"+",
"\"</red>\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.profile\"",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"profile_datasource",
"(",
"context",
",",
"datasources",
"[",
"0",
"]",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_assets",
"=",
"data_assets",
",",
"profile_all_data_assets",
"=",
"profile_all_data_assets",
",",
"open_docs",
"=",
"view",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
"skip_prompt_flag",
"=",
"assume_yes",
",",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.profile\"",
",",
"success",
"=",
"True",
")",
"else",
":",
"profile_datasource",
"(",
"context",
",",
"datasource",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_assets",
"=",
"data_assets",
",",
"profile_all_data_assets",
"=",
"profile_all_data_assets",
",",
"open_docs",
"=",
"view",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
"skip_prompt_flag",
"=",
"assume_yes",
",",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.profile\"",
",",
"success",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.datasource.profile\"",
",",
"success",
"=",
"False",
")",
"raise",
"e"
] | [
219,
0
] | [
296,
15
] | python | en | ['en', 'error', 'th'] | False |
add_datasource | (context, choose_one_data_asset=False) |
Interactive flow for adding a datasource to an existing context.
:param context:
:param choose_one_data_asset: optional - if True, this signals the method that the intent
is to let user choose just one data asset (e.g., a file) and there is no need
to configure a batch kwargs generator that comprehensively scans the datasource for data assets
:return: a tuple: datasource_name, data_source_type
|
Interactive flow for adding a datasource to an existing context. | def add_datasource(context, choose_one_data_asset=False):
"""
Interactive flow for adding a datasource to an existing context.
:param context:
:param choose_one_data_asset: optional - if True, this signals the method that the intent
is to let user choose just one data asset (e.g., a file) and there is no need
to configure a batch kwargs generator that comprehensively scans the datasource for data assets
:return: a tuple: datasource_name, data_source_type
"""
msg_prompt_where_is_your_data = """
What data would you like Great Expectations to connect to?
1. Files on a filesystem (for processing with Pandas or Spark)
2. Relational database (SQL)
"""
msg_prompt_files_compute_engine = """
What are you processing your files with?
1. Pandas
2. PySpark
"""
data_source_location_selection = click.prompt(
msg_prompt_where_is_your_data, type=click.Choice(["1", "2"]), show_choices=False
)
datasource_name = None
data_source_type = None
if data_source_location_selection == "1":
data_source_compute_selection = click.prompt(
msg_prompt_files_compute_engine,
type=click.Choice(["1", "2"]),
show_choices=False,
)
if data_source_compute_selection == "1": # pandas
data_source_type = DatasourceTypes.PANDAS
datasource_name = _add_pandas_datasource(
context, passthrough_generator_only=choose_one_data_asset
)
elif data_source_compute_selection == "2": # Spark
data_source_type = DatasourceTypes.SPARK
datasource_name = _add_spark_datasource(
context, passthrough_generator_only=choose_one_data_asset
)
else:
data_source_type = DatasourceTypes.SQL
datasource_name = _add_sqlalchemy_datasource(context)
return datasource_name, data_source_type | [
"def",
"add_datasource",
"(",
"context",
",",
"choose_one_data_asset",
"=",
"False",
")",
":",
"msg_prompt_where_is_your_data",
"=",
"\"\"\"\nWhat data would you like Great Expectations to connect to?\n 1. Files on a filesystem (for processing with Pandas or Spark)\n 2. Relational database (SQL)\n\"\"\"",
"msg_prompt_files_compute_engine",
"=",
"\"\"\"\nWhat are you processing your files with?\n 1. Pandas\n 2. PySpark\n\"\"\"",
"data_source_location_selection",
"=",
"click",
".",
"prompt",
"(",
"msg_prompt_where_is_your_data",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
"]",
")",
",",
"show_choices",
"=",
"False",
")",
"datasource_name",
"=",
"None",
"data_source_type",
"=",
"None",
"if",
"data_source_location_selection",
"==",
"\"1\"",
":",
"data_source_compute_selection",
"=",
"click",
".",
"prompt",
"(",
"msg_prompt_files_compute_engine",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
"]",
")",
",",
"show_choices",
"=",
"False",
",",
")",
"if",
"data_source_compute_selection",
"==",
"\"1\"",
":",
"# pandas",
"data_source_type",
"=",
"DatasourceTypes",
".",
"PANDAS",
"datasource_name",
"=",
"_add_pandas_datasource",
"(",
"context",
",",
"passthrough_generator_only",
"=",
"choose_one_data_asset",
")",
"elif",
"data_source_compute_selection",
"==",
"\"2\"",
":",
"# Spark",
"data_source_type",
"=",
"DatasourceTypes",
".",
"SPARK",
"datasource_name",
"=",
"_add_spark_datasource",
"(",
"context",
",",
"passthrough_generator_only",
"=",
"choose_one_data_asset",
")",
"else",
":",
"data_source_type",
"=",
"DatasourceTypes",
".",
"SQL",
"datasource_name",
"=",
"_add_sqlalchemy_datasource",
"(",
"context",
")",
"return",
"datasource_name",
",",
"data_source_type"
] | [
299,
0
] | [
355,
44
] | python | en | ['en', 'error', 'th'] | False |
_should_hide_input | () |
This is a workaround to help identify Windows and adjust the prompts accordingly
since hidden prompts may freeze in certain Windows terminals
|
This is a workaround to help identify Windows and adjust the prompts accordingly
since hidden prompts may freeze in certain Windows terminals
| def _should_hide_input():
"""
This is a workaround to help identify Windows and adjust the prompts accordingly
since hidden prompts may freeze in certain Windows terminals
"""
if "windows" in platform.platform().lower():
return False
return True | [
"def",
"_should_hide_input",
"(",
")",
":",
"if",
"\"windows\"",
"in",
"platform",
".",
"platform",
"(",
")",
".",
"lower",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | [
593,
0
] | [
600,
15
] | python | en | ['en', 'error', 'th'] | False |
get_batch_kwargs | (
context,
datasource_name=None,
batch_kwargs_generator_name=None,
data_asset_name=None,
additional_batch_kwargs=None,
) |
This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset.
In order to get batch_kwargs this method needs datasource_name, batch_kwargs_generator_name and data_asset_name
to combine them into a fully-qualified data asset identifier(datasource_name/batch_kwargs_generator_name/data_asset_name).
All three arguments are optional. If they are present, the method uses their values. Otherwise, the method
prompts user to enter them interactively. Since it is possible for any of these three components to be
passed to this method as empty values and to get their values after interacting with user, this method
returns these components' values in case they changed.
If the datasource has batch_kwargs_generators that can list available data asset names, the method lets user choose a name
from that list (note: if there are multiple batch_kwargs_generators, user has to choose one first). If a name known to
the chosen batch_kwargs_generator is selected, the batch_kwargs_generators will be able to yield batch_kwargs. The method also gives user
an alternative to selecting the data asset name from the batch_kwargs_generators's list - user can type in a name for their
data asset. In this case a passthrough batch kwargs batch_kwargs_generators will be used to construct a fully-qualified data asset
identifier (note: if the datasource has no passthrough batch_kwargs_generators configured, the method will exist with a failure).
Since no batch_kwargs_generators can yield batch_kwargs for this data asset name, the method prompts user to specify batch_kwargs
by choosing a file (if the datasource is pandas or spark) or by writing a SQL query (if the datasource points
to a database).
:param context:
:param datasource_name:
:param batch_kwargs_generator_name:
:param data_asset_name:
:param additional_batch_kwargs:
:return: a tuple: (datasource_name, batch_kwargs_generator_name, data_asset_name, batch_kwargs). The components
of the tuple were passed into the methods as optional arguments, but their values might
have changed after this method's execution. If the returned batch_kwargs is None, it means
that the batch_kwargs_generator will know to yield batch_kwargs when called.
|
This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset. | def get_batch_kwargs(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
data_asset_name=None,
additional_batch_kwargs=None,
):
"""
This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset.
In order to get batch_kwargs this method needs datasource_name, batch_kwargs_generator_name and data_asset_name
to combine them into a fully-qualified data asset identifier(datasource_name/batch_kwargs_generator_name/data_asset_name).
All three arguments are optional. If they are present, the method uses their values. Otherwise, the method
prompts user to enter them interactively. Since it is possible for any of these three components to be
passed to this method as empty values and to get their values after interacting with user, this method
returns these components' values in case they changed.
If the datasource has batch_kwargs_generators that can list available data asset names, the method lets user choose a name
from that list (note: if there are multiple batch_kwargs_generators, user has to choose one first). If a name known to
the chosen batch_kwargs_generator is selected, the batch_kwargs_generators will be able to yield batch_kwargs. The method also gives user
an alternative to selecting the data asset name from the batch_kwargs_generators's list - user can type in a name for their
data asset. In this case a passthrough batch kwargs batch_kwargs_generators will be used to construct a fully-qualified data asset
identifier (note: if the datasource has no passthrough batch_kwargs_generators configured, the method will exist with a failure).
Since no batch_kwargs_generators can yield batch_kwargs for this data asset name, the method prompts user to specify batch_kwargs
by choosing a file (if the datasource is pandas or spark) or by writing a SQL query (if the datasource points
to a database).
:param context:
:param datasource_name:
:param batch_kwargs_generator_name:
:param data_asset_name:
:param additional_batch_kwargs:
:return: a tuple: (datasource_name, batch_kwargs_generator_name, data_asset_name, batch_kwargs). The components
of the tuple were passed into the methods as optional arguments, but their values might
have changed after this method's execution. If the returned batch_kwargs is None, it means
that the batch_kwargs_generator will know to yield batch_kwargs when called.
"""
try:
available_data_assets_dict = context.get_available_data_asset_names(
datasource_names=datasource_name
)
except ValueError:
# the datasource has no batch_kwargs_generators
available_data_assets_dict = {datasource_name: {}}
data_source = toolkit.select_datasource(context, datasource_name=datasource_name)
datasource_name = data_source.name
if batch_kwargs_generator_name is None:
batch_kwargs_generator_name = select_batch_kwargs_generator(
context,
datasource_name,
available_data_assets_dict=available_data_assets_dict,
)
# if the user provided us with the batch kwargs generator name and the data asset, we have everything we need -
# let's ask the generator to build batch kwargs for this asset - we are done.
if batch_kwargs_generator_name is not None and data_asset_name is not None:
generator = data_source.get_batch_kwargs_generator(batch_kwargs_generator_name)
batch_kwargs = generator.build_batch_kwargs(
data_asset_name, **additional_batch_kwargs
)
return batch_kwargs
if isinstance(
context.get_datasource(datasource_name), (PandasDatasource, SparkDFDatasource)
):
(
data_asset_name,
batch_kwargs,
) = _get_batch_kwargs_from_generator_or_from_file_path(
context,
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
)
elif isinstance(context.get_datasource(datasource_name), SqlAlchemyDatasource):
data_asset_name, batch_kwargs = _get_batch_kwargs_for_sqlalchemy_datasource(
context, datasource_name, additional_batch_kwargs=additional_batch_kwargs
)
else:
raise ge_exceptions.DataContextError(
"Datasource {:s} is expected to be a PandasDatasource or SparkDFDatasource, but is {:s}".format(
datasource_name, str(type(context.get_datasource(datasource_name)))
)
)
return (datasource_name, batch_kwargs_generator_name, data_asset_name, batch_kwargs) | [
"def",
"get_batch_kwargs",
"(",
"context",
",",
"datasource_name",
"=",
"None",
",",
"batch_kwargs_generator_name",
"=",
"None",
",",
"data_asset_name",
"=",
"None",
",",
"additional_batch_kwargs",
"=",
"None",
",",
")",
":",
"try",
":",
"available_data_assets_dict",
"=",
"context",
".",
"get_available_data_asset_names",
"(",
"datasource_names",
"=",
"datasource_name",
")",
"except",
"ValueError",
":",
"# the datasource has no batch_kwargs_generators",
"available_data_assets_dict",
"=",
"{",
"datasource_name",
":",
"{",
"}",
"}",
"data_source",
"=",
"toolkit",
".",
"select_datasource",
"(",
"context",
",",
"datasource_name",
"=",
"datasource_name",
")",
"datasource_name",
"=",
"data_source",
".",
"name",
"if",
"batch_kwargs_generator_name",
"is",
"None",
":",
"batch_kwargs_generator_name",
"=",
"select_batch_kwargs_generator",
"(",
"context",
",",
"datasource_name",
",",
"available_data_assets_dict",
"=",
"available_data_assets_dict",
",",
")",
"# if the user provided us with the batch kwargs generator name and the data asset, we have everything we need -",
"# let's ask the generator to build batch kwargs for this asset - we are done.",
"if",
"batch_kwargs_generator_name",
"is",
"not",
"None",
"and",
"data_asset_name",
"is",
"not",
"None",
":",
"generator",
"=",
"data_source",
".",
"get_batch_kwargs_generator",
"(",
"batch_kwargs_generator_name",
")",
"batch_kwargs",
"=",
"generator",
".",
"build_batch_kwargs",
"(",
"data_asset_name",
",",
"*",
"*",
"additional_batch_kwargs",
")",
"return",
"batch_kwargs",
"if",
"isinstance",
"(",
"context",
".",
"get_datasource",
"(",
"datasource_name",
")",
",",
"(",
"PandasDatasource",
",",
"SparkDFDatasource",
")",
")",
":",
"(",
"data_asset_name",
",",
"batch_kwargs",
",",
")",
"=",
"_get_batch_kwargs_from_generator_or_from_file_path",
"(",
"context",
",",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
")",
"elif",
"isinstance",
"(",
"context",
".",
"get_datasource",
"(",
"datasource_name",
")",
",",
"SqlAlchemyDatasource",
")",
":",
"data_asset_name",
",",
"batch_kwargs",
"=",
"_get_batch_kwargs_for_sqlalchemy_datasource",
"(",
"context",
",",
"datasource_name",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
")",
"else",
":",
"raise",
"ge_exceptions",
".",
"DataContextError",
"(",
"\"Datasource {:s} is expected to be a PandasDatasource or SparkDFDatasource, but is {:s}\"",
".",
"format",
"(",
"datasource_name",
",",
"str",
"(",
"type",
"(",
"context",
".",
"get_datasource",
"(",
"datasource_name",
")",
")",
")",
")",
")",
"return",
"(",
"datasource_name",
",",
"batch_kwargs_generator_name",
",",
"data_asset_name",
",",
"batch_kwargs",
")"
] | [
961,
0
] | [
1049,
88
] | python | en | ['en', 'error', 'th'] | False |
profile_datasource | (
context,
datasource_name,
batch_kwargs_generator_name=None,
data_assets=None,
profile_all_data_assets=False,
max_data_assets=20,
additional_batch_kwargs=None,
open_docs=False,
skip_prompt_flag=False,
) | Profile a named datasource using the specified context | Profile a named datasource using the specified context | def profile_datasource(
context,
datasource_name,
batch_kwargs_generator_name=None,
data_assets=None,
profile_all_data_assets=False,
max_data_assets=20,
additional_batch_kwargs=None,
open_docs=False,
skip_prompt_flag=False,
):
"""Profile a named datasource using the specified context"""
# Note we are explicitly not using a logger in all CLI output to have
# more control over console UI.
logging.getLogger("great_expectations.profile.basic_dataset_profiler").setLevel(
logging.INFO
)
msg_intro = "Profiling '{0:s}' will create expectations and documentation."
msg_confirm_ok_to_proceed = """Would you like to profile '{0:s}'?"""
msg_skipping = (
"Skipping profiling for now. You can always do this later "
"by running `<green>great_expectations datasource profile</green>`."
)
msg_some_data_assets_not_found = """Some of the data assets you specified were not found: {0:s}
"""
msg_too_many_data_assets = """There are {0:d} data assets in {1:s}. Profiling all of them might take too long.
"""
msg_error_multiple_generators_found = """<red>More than one batch kwargs generator found in datasource {0:s}.
Specify the one you want the profiler to use in batch_kwargs_generator_name argument.</red>
"""
msg_error_no_generators_found = """<red>No batch kwargs generators can list available data assets in datasource
{0:s}. The datasource might be empty or a batch kwargs generator not configured in the config file.</red>
"""
msg_prompt_enter_data_asset_list = """Enter comma-separated list of data asset names (e.g., {0:s})
"""
msg_options = """Choose how to proceed:
1. Specify a list of the data assets to profile
2. Exit and profile later
3. Profile ALL data assets (this might take a while)
"""
msg_data_doc_intro = """
<cyan>========== Data Docs ==========</cyan>
Great Expectations is building Data Docs from the data you just profiled!"""
cli_message(msg_intro.format(datasource_name))
if data_assets:
data_assets = [item.strip() for item in data_assets.split(",")]
# Call the data context's profiling method to check if the arguments are valid
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=True,
additional_batch_kwargs=additional_batch_kwargs,
)
if (
profiling_results["success"] is True
): # data context is ready to profile - run profiling
if (
data_assets
or profile_all_data_assets
or skip_prompt_message(
skip_prompt_flag, msg_confirm_ok_to_proceed.format(datasource_name)
)
):
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=False,
additional_batch_kwargs=additional_batch_kwargs,
)
else:
cli_message(msg_skipping)
return
else: # we need to get arguments from user interactively
do_exit = False
while not do_exit:
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
cli_message(
msg_some_data_assets_not_found.format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_TOO_MANY_DATA_ASSETS
):
cli_message(
msg_too_many_data_assets.format(
profiling_results["error"]["num_data_assets"], datasource_name
)
)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_MULTIPLE_BATCH_KWARGS_GENERATORS_FOUND
):
cli_message(msg_error_multiple_generators_found.format(datasource_name))
sys.exit(1)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_NO_BATCH_KWARGS_GENERATORS_FOUND
):
cli_message(msg_error_no_generators_found.format(datasource_name))
sys.exit(1)
else: # unknown error
raise ValueError(
"Unknown profiling error code: "
+ profiling_results["error"]["code"]
)
option_selection = click.prompt(
msg_options, type=click.Choice(["1", "2", "3"]), show_choices=False
)
if option_selection == "1":
data_assets = click.prompt(
msg_prompt_enter_data_asset_list.format(
", ".join(
[
data_asset[0]
for data_asset in profiling_results["error"][
"data_assets"
]
][:3]
)
),
show_default=False,
)
if data_assets:
data_assets = [item.strip() for item in data_assets.split(",")]
elif option_selection == "3":
profile_all_data_assets = True
data_assets = None
elif option_selection == "2": # skip
cli_message(msg_skipping)
return
else:
raise ValueError("Unrecognized option: " + option_selection)
# after getting the arguments from the user, let's try to run profiling again
# (no dry run this time)
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=False,
additional_batch_kwargs=additional_batch_kwargs,
)
if profiling_results["success"]: # data context is ready to profile
break
cli_message(msg_data_doc_intro.format(rtd_url_ge_version))
build_docs(context, view=open_docs, assume_yes=skip_prompt_flag)
if open_docs: # This is mostly to keep tests from spawning windows
context.open_data_docs() | [
"def",
"profile_datasource",
"(",
"context",
",",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"None",
",",
"data_assets",
"=",
"None",
",",
"profile_all_data_assets",
"=",
"False",
",",
"max_data_assets",
"=",
"20",
",",
"additional_batch_kwargs",
"=",
"None",
",",
"open_docs",
"=",
"False",
",",
"skip_prompt_flag",
"=",
"False",
",",
")",
":",
"# Note we are explicitly not using a logger in all CLI output to have",
"# more control over console UI.",
"logging",
".",
"getLogger",
"(",
"\"great_expectations.profile.basic_dataset_profiler\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"msg_intro",
"=",
"\"Profiling '{0:s}' will create expectations and documentation.\"",
"msg_confirm_ok_to_proceed",
"=",
"\"\"\"Would you like to profile '{0:s}'?\"\"\"",
"msg_skipping",
"=",
"(",
"\"Skipping profiling for now. You can always do this later \"",
"\"by running `<green>great_expectations datasource profile</green>`.\"",
")",
"msg_some_data_assets_not_found",
"=",
"\"\"\"Some of the data assets you specified were not found: {0:s}\n\"\"\"",
"msg_too_many_data_assets",
"=",
"\"\"\"There are {0:d} data assets in {1:s}. Profiling all of them might take too long.\n\"\"\"",
"msg_error_multiple_generators_found",
"=",
"\"\"\"<red>More than one batch kwargs generator found in datasource {0:s}.\nSpecify the one you want the profiler to use in batch_kwargs_generator_name argument.</red>\n\"\"\"",
"msg_error_no_generators_found",
"=",
"\"\"\"<red>No batch kwargs generators can list available data assets in datasource\n {0:s}. The datasource might be empty or a batch kwargs generator not configured in the config file.</red>\n\"\"\"",
"msg_prompt_enter_data_asset_list",
"=",
"\"\"\"Enter comma-separated list of data asset names (e.g., {0:s})\n\"\"\"",
"msg_options",
"=",
"\"\"\"Choose how to proceed:\n 1. Specify a list of the data assets to profile\n 2. Exit and profile later\n 3. Profile ALL data assets (this might take a while)\n\"\"\"",
"msg_data_doc_intro",
"=",
"\"\"\"\n<cyan>========== Data Docs ==========</cyan>\n\nGreat Expectations is building Data Docs from the data you just profiled!\"\"\"",
"cli_message",
"(",
"msg_intro",
".",
"format",
"(",
"datasource_name",
")",
")",
"if",
"data_assets",
":",
"data_assets",
"=",
"[",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"data_assets",
".",
"split",
"(",
"\",\"",
")",
"]",
"# Call the data context's profiling method to check if the arguments are valid",
"profiling_results",
"=",
"context",
".",
"profile_datasource",
"(",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_assets",
"=",
"data_assets",
",",
"profile_all_data_assets",
"=",
"profile_all_data_assets",
",",
"max_data_assets",
"=",
"max_data_assets",
",",
"dry_run",
"=",
"True",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
")",
"if",
"(",
"profiling_results",
"[",
"\"success\"",
"]",
"is",
"True",
")",
":",
"# data context is ready to profile - run profiling",
"if",
"(",
"data_assets",
"or",
"profile_all_data_assets",
"or",
"skip_prompt_message",
"(",
"skip_prompt_flag",
",",
"msg_confirm_ok_to_proceed",
".",
"format",
"(",
"datasource_name",
")",
")",
")",
":",
"profiling_results",
"=",
"context",
".",
"profile_datasource",
"(",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_assets",
"=",
"data_assets",
",",
"profile_all_data_assets",
"=",
"profile_all_data_assets",
",",
"max_data_assets",
"=",
"max_data_assets",
",",
"dry_run",
"=",
"False",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
")",
"else",
":",
"cli_message",
"(",
"msg_skipping",
")",
"return",
"else",
":",
"# we need to get arguments from user interactively",
"do_exit",
"=",
"False",
"while",
"not",
"do_exit",
":",
"if",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"code\"",
"]",
"==",
"DataContext",
".",
"PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND",
")",
":",
"cli_message",
"(",
"msg_some_data_assets_not_found",
".",
"format",
"(",
"\",\"",
".",
"join",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"not_found_data_assets\"",
"]",
")",
")",
")",
"elif",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"code\"",
"]",
"==",
"DataContext",
".",
"PROFILING_ERROR_CODE_TOO_MANY_DATA_ASSETS",
")",
":",
"cli_message",
"(",
"msg_too_many_data_assets",
".",
"format",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"num_data_assets\"",
"]",
",",
"datasource_name",
")",
")",
"elif",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"code\"",
"]",
"==",
"DataContext",
".",
"PROFILING_ERROR_CODE_MULTIPLE_BATCH_KWARGS_GENERATORS_FOUND",
")",
":",
"cli_message",
"(",
"msg_error_multiple_generators_found",
".",
"format",
"(",
"datasource_name",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"(",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"code\"",
"]",
"==",
"DataContext",
".",
"PROFILING_ERROR_CODE_NO_BATCH_KWARGS_GENERATORS_FOUND",
")",
":",
"cli_message",
"(",
"msg_error_no_generators_found",
".",
"format",
"(",
"datasource_name",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"# unknown error",
"raise",
"ValueError",
"(",
"\"Unknown profiling error code: \"",
"+",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"code\"",
"]",
")",
"option_selection",
"=",
"click",
".",
"prompt",
"(",
"msg_options",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"\"1\"",
",",
"\"2\"",
",",
"\"3\"",
"]",
")",
",",
"show_choices",
"=",
"False",
")",
"if",
"option_selection",
"==",
"\"1\"",
":",
"data_assets",
"=",
"click",
".",
"prompt",
"(",
"msg_prompt_enter_data_asset_list",
".",
"format",
"(",
"\", \"",
".",
"join",
"(",
"[",
"data_asset",
"[",
"0",
"]",
"for",
"data_asset",
"in",
"profiling_results",
"[",
"\"error\"",
"]",
"[",
"\"data_assets\"",
"]",
"]",
"[",
":",
"3",
"]",
")",
")",
",",
"show_default",
"=",
"False",
",",
")",
"if",
"data_assets",
":",
"data_assets",
"=",
"[",
"item",
".",
"strip",
"(",
")",
"for",
"item",
"in",
"data_assets",
".",
"split",
"(",
"\",\"",
")",
"]",
"elif",
"option_selection",
"==",
"\"3\"",
":",
"profile_all_data_assets",
"=",
"True",
"data_assets",
"=",
"None",
"elif",
"option_selection",
"==",
"\"2\"",
":",
"# skip",
"cli_message",
"(",
"msg_skipping",
")",
"return",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized option: \"",
"+",
"option_selection",
")",
"# after getting the arguments from the user, let's try to run profiling again",
"# (no dry run this time)",
"profiling_results",
"=",
"context",
".",
"profile_datasource",
"(",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_assets",
"=",
"data_assets",
",",
"profile_all_data_assets",
"=",
"profile_all_data_assets",
",",
"max_data_assets",
"=",
"max_data_assets",
",",
"dry_run",
"=",
"False",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
")",
"if",
"profiling_results",
"[",
"\"success\"",
"]",
":",
"# data context is ready to profile",
"break",
"cli_message",
"(",
"msg_data_doc_intro",
".",
"format",
"(",
"rtd_url_ge_version",
")",
")",
"build_docs",
"(",
"context",
",",
"view",
"=",
"open_docs",
",",
"assume_yes",
"=",
"skip_prompt_flag",
")",
"if",
"open_docs",
":",
"# This is mostly to keep tests from spawning windows",
"context",
".",
"open_data_docs",
"(",
")"
] | [
1438,
0
] | [
1616,
32
] | python | en | ['en', 'en', 'en'] | True |
_fn_matches | (fn, glob) | Return whether the supplied file name fn matches pattern filename. | Return whether the supplied file name fn matches pattern filename. | def _fn_matches(fn, glob):
"""Return whether the supplied file name fn matches pattern filename."""
if glob not in _pattern_cache:
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
return pattern.match(fn)
return _pattern_cache[glob].match(fn) | [
"def",
"_fn_matches",
"(",
"fn",
",",
"glob",
")",
":",
"if",
"glob",
"not",
"in",
"_pattern_cache",
":",
"pattern",
"=",
"_pattern_cache",
"[",
"glob",
"]",
"=",
"re",
".",
"compile",
"(",
"fnmatch",
".",
"translate",
"(",
"glob",
")",
")",
"return",
"pattern",
".",
"match",
"(",
"fn",
")",
"return",
"_pattern_cache",
"[",
"glob",
"]",
".",
"match",
"(",
"fn",
")"
] | [
30,
0
] | [
35,
41
] | python | en | ['en', 'en', 'en'] | True |
_load_lexers | (module_name) | Load a lexer (and all others in the module too). | Load a lexer (and all others in the module too). | def _load_lexers(module_name):
"""Load a lexer (and all others in the module too)."""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls | [
"def",
"_load_lexers",
"(",
"module_name",
")",
":",
"mod",
"=",
"__import__",
"(",
"module_name",
",",
"None",
",",
"None",
",",
"[",
"'__all__'",
"]",
")",
"for",
"lexer_name",
"in",
"mod",
".",
"__all__",
":",
"cls",
"=",
"getattr",
"(",
"mod",
",",
"lexer_name",
")",
"_lexer_cache",
"[",
"cls",
".",
"name",
"]",
"=",
"cls"
] | [
38,
0
] | [
43,
36
] | python | en | ['en', 'en', 'en'] | True |
get_all_lexers | () | Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
| Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
| def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in itervalues(LEXERS):
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes | [
"def",
"get_all_lexers",
"(",
")",
":",
"for",
"item",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"yield",
"item",
"[",
"1",
":",
"]",
"for",
"lexer",
"in",
"find_plugin_lexers",
"(",
")",
":",
"yield",
"lexer",
".",
"name",
",",
"lexer",
".",
"aliases",
",",
"lexer",
".",
"filenames",
",",
"lexer",
".",
"mimetypes"
] | [
46,
0
] | [
53,
73
] | python | en | ['en', 'en', 'en'] | True |
find_lexer_class | (name) | Lookup a lexer class by name.
Return None if not found.
| Lookup a lexer class by name. | def find_lexer_class(name):
"""Lookup a lexer class by name.
Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in itervalues(LEXERS):
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls | [
"def",
"find_lexer_class",
"(",
"name",
")",
":",
"if",
"name",
"in",
"_lexer_cache",
":",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# lookup builtin lexers",
"for",
"module_name",
",",
"lname",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"name",
"==",
"lname",
":",
"_load_lexers",
"(",
"module_name",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"# continue with lexers from setuptools entrypoints",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"cls",
".",
"name",
"==",
"name",
":",
"return",
"cls"
] | [
56,
0
] | [
71,
22
] | python | en | ['en', 'en', 'en'] | True |
get_lexer_by_name | (_alias, **options) | Get a lexer by an alias.
Raises ClassNotFound if not found.
| Get a lexer by an alias. | def get_lexer_by_name(_alias, **options):
"""Get a lexer by an alias.
Raises ClassNotFound if not found.
"""
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
for module_name, name, aliases, _, _ in itervalues(LEXERS):
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias.lower() in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias) | [
"def",
"get_lexer_by_name",
"(",
"_alias",
",",
"*",
"*",
"options",
")",
":",
"if",
"not",
"_alias",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer for alias %r found'",
"%",
"_alias",
")",
"# lookup builtin lexers",
"for",
"module_name",
",",
"name",
",",
"aliases",
",",
"_",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"_alias",
".",
"lower",
"(",
")",
"in",
"aliases",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"module_name",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"(",
"*",
"*",
"options",
")",
"# continue with lexers from setuptools entrypoints",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"_alias",
".",
"lower",
"(",
")",
"in",
"cls",
".",
"aliases",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"raise",
"ClassNotFound",
"(",
"'no lexer for alias %r found'",
"%",
"_alias",
")"
] | [
74,
0
] | [
92,
63
] | python | en | ['en', 'gd', 'en'] | True |
find_lexer_class_for_filename | (_fn, code=None) | Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
| Get a lexer for a filename. | def find_lexer_class_for_filename(_fn, code=None):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Returns None if not found.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in itervalues(LEXERS):
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if _fn_matches(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
# print "Possible lexers, after sort:", matches
return matches[-1][0] | [
"def",
"find_lexer_class_for_filename",
"(",
"_fn",
",",
"code",
"=",
"None",
")",
":",
"matches",
"=",
"[",
"]",
"fn",
"=",
"basename",
"(",
"_fn",
")",
"for",
"modname",
",",
"name",
",",
"_",
",",
"filenames",
",",
"_",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"modname",
")",
"matches",
".",
"append",
"(",
"(",
"_lexer_cache",
"[",
"name",
"]",
",",
"filename",
")",
")",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"for",
"filename",
"in",
"cls",
".",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"matches",
".",
"append",
"(",
"(",
"cls",
",",
"filename",
")",
")",
"if",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
")",
"and",
"isinstance",
"(",
"code",
",",
"bytes",
")",
":",
"# decode it, since all analyse_text functions expect unicode",
"code",
"=",
"guess_decode",
"(",
"code",
")",
"def",
"get_rating",
"(",
"info",
")",
":",
"cls",
",",
"filename",
"=",
"info",
"# explicit patterns get a bonus",
"bonus",
"=",
"'*'",
"not",
"in",
"filename",
"and",
"0.5",
"or",
"0",
"# The class _always_ defines analyse_text because it's included in",
"# the Lexer class. The default implementation returns None which",
"# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py",
"# to find lexers which need it overridden.",
"if",
"code",
":",
"return",
"cls",
".",
"analyse_text",
"(",
"code",
")",
"+",
"bonus",
"return",
"cls",
".",
"priority",
"+",
"bonus",
"if",
"matches",
":",
"matches",
".",
"sort",
"(",
"key",
"=",
"get_rating",
")",
"# print \"Possible lexers, after sort:\", matches",
"return",
"matches",
"[",
"-",
"1",
"]",
"[",
"0",
"]"
] | [
95,
0
] | [
135,
29
] | python | en | ['en', 'pt', 'en'] | True |
get_lexer_for_filename | (_fn, code=None, **options) | Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
| Get a lexer for a filename. | def get_lexer_for_filename(_fn, code=None, **options):
"""Get a lexer for a filename.
If multiple lexers match the filename pattern, use ``analyse_text()`` to
figure out which one is more appropriate.
Raises ClassNotFound if not found.
"""
res = find_lexer_class_for_filename(_fn, code)
if not res:
raise ClassNotFound('no lexer for filename %r found' % _fn)
return res(**options) | [
"def",
"get_lexer_for_filename",
"(",
"_fn",
",",
"code",
"=",
"None",
",",
"*",
"*",
"options",
")",
":",
"res",
"=",
"find_lexer_class_for_filename",
"(",
"_fn",
",",
"code",
")",
"if",
"not",
"res",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer for filename %r found'",
"%",
"_fn",
")",
"return",
"res",
"(",
"*",
"*",
"options",
")"
] | [
138,
0
] | [
149,
25
] | python | en | ['en', 'pt', 'en'] | True |
get_lexer_for_mimetype | (_mime, **options) | Get a lexer for a mimetype.
Raises ClassNotFound if not found.
| Get a lexer for a mimetype. | def get_lexer_for_mimetype(_mime, **options):
"""Get a lexer for a mimetype.
Raises ClassNotFound if not found.
"""
for modname, name, _, _, mimetypes in itervalues(LEXERS):
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime) | [
"def",
"get_lexer_for_mimetype",
"(",
"_mime",
",",
"*",
"*",
"options",
")",
":",
"for",
"modname",
",",
"name",
",",
"_",
",",
"_",
",",
"mimetypes",
"in",
"itervalues",
"(",
"LEXERS",
")",
":",
"if",
"_mime",
"in",
"mimetypes",
":",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"modname",
")",
"return",
"_lexer_cache",
"[",
"name",
"]",
"(",
"*",
"*",
"options",
")",
"for",
"cls",
"in",
"find_plugin_lexers",
"(",
")",
":",
"if",
"_mime",
"in",
"cls",
".",
"mimetypes",
":",
"return",
"cls",
"(",
"*",
"*",
"options",
")",
"raise",
"ClassNotFound",
"(",
"'no lexer for mimetype %r found'",
"%",
"_mime",
")"
] | [
152,
0
] | [
165,
65
] | python | en | ['en', 'en', 'en'] | True |
_iter_lexerclasses | (plugins=True) | Return an iterator over all lexer classes. | Return an iterator over all lexer classes. | def _iter_lexerclasses(plugins=True):
"""Return an iterator over all lexer classes."""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
if plugins:
for lexer in find_plugin_lexers():
yield lexer | [
"def",
"_iter_lexerclasses",
"(",
"plugins",
"=",
"True",
")",
":",
"for",
"key",
"in",
"sorted",
"(",
"LEXERS",
")",
":",
"module_name",
",",
"name",
"=",
"LEXERS",
"[",
"key",
"]",
"[",
":",
"2",
"]",
"if",
"name",
"not",
"in",
"_lexer_cache",
":",
"_load_lexers",
"(",
"module_name",
")",
"yield",
"_lexer_cache",
"[",
"name",
"]",
"if",
"plugins",
":",
"for",
"lexer",
"in",
"find_plugin_lexers",
"(",
")",
":",
"yield",
"lexer"
] | [
168,
0
] | [
177,
23
] | python | en | ['en', 'en', 'en'] | True |
guess_lexer_for_filename | (_fn, _text, **options) |
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
|
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result. | def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = {}
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = True
for filename in lexer.alias_filenames:
if _fn_matches(fn, filename):
matching_lexers.add(lexer)
primary[lexer] = False
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
def type_sort(t):
# sort by:
# - analyse score
# - is primary filename pattern?
# - priority
# - last resort: class name
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
result.sort(key=type_sort)
return result[-1][1](**options) | [
"def",
"guess_lexer_for_filename",
"(",
"_fn",
",",
"_text",
",",
"*",
"*",
"options",
")",
":",
"fn",
"=",
"basename",
"(",
"_fn",
")",
"primary",
"=",
"{",
"}",
"matching_lexers",
"=",
"set",
"(",
")",
"for",
"lexer",
"in",
"_iter_lexerclasses",
"(",
")",
":",
"for",
"filename",
"in",
"lexer",
".",
"filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"matching_lexers",
".",
"add",
"(",
"lexer",
")",
"primary",
"[",
"lexer",
"]",
"=",
"True",
"for",
"filename",
"in",
"lexer",
".",
"alias_filenames",
":",
"if",
"_fn_matches",
"(",
"fn",
",",
"filename",
")",
":",
"matching_lexers",
".",
"add",
"(",
"lexer",
")",
"primary",
"[",
"lexer",
"]",
"=",
"False",
"if",
"not",
"matching_lexers",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer for filename %r found'",
"%",
"fn",
")",
"if",
"len",
"(",
"matching_lexers",
")",
"==",
"1",
":",
"return",
"matching_lexers",
".",
"pop",
"(",
")",
"(",
"*",
"*",
"options",
")",
"result",
"=",
"[",
"]",
"for",
"lexer",
"in",
"matching_lexers",
":",
"rv",
"=",
"lexer",
".",
"analyse_text",
"(",
"_text",
")",
"if",
"rv",
"==",
"1.0",
":",
"return",
"lexer",
"(",
"*",
"*",
"options",
")",
"result",
".",
"append",
"(",
"(",
"rv",
",",
"lexer",
")",
")",
"def",
"type_sort",
"(",
"t",
")",
":",
"# sort by:",
"# - analyse score",
"# - is primary filename pattern?",
"# - priority",
"# - last resort: class name",
"return",
"(",
"t",
"[",
"0",
"]",
",",
"primary",
"[",
"t",
"[",
"1",
"]",
"]",
",",
"t",
"[",
"1",
"]",
".",
"priority",
",",
"t",
"[",
"1",
"]",
".",
"__name__",
")",
"result",
".",
"sort",
"(",
"key",
"=",
"type_sort",
")",
"return",
"result",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"(",
"*",
"*",
"options",
")"
] | [
180,
0
] | [
228,
35
] | python | en | ['en', 'error', 'th'] | False |
guess_lexer | (_text, **options) | Guess a lexer by strong distinctions in the text (eg, shebang). | Guess a lexer by strong distinctions in the text (eg, shebang). | def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options) | [
"def",
"guess_lexer",
"(",
"_text",
",",
"*",
"*",
"options",
")",
":",
"# try to get a vim modeline first",
"ft",
"=",
"get_filetype_from_buffer",
"(",
"_text",
")",
"if",
"ft",
"is",
"not",
"None",
":",
"try",
":",
"return",
"get_lexer_by_name",
"(",
"ft",
",",
"*",
"*",
"options",
")",
"except",
"ClassNotFound",
":",
"pass",
"best_lexer",
"=",
"[",
"0.0",
",",
"None",
"]",
"for",
"lexer",
"in",
"_iter_lexerclasses",
"(",
")",
":",
"rv",
"=",
"lexer",
".",
"analyse_text",
"(",
"_text",
")",
"if",
"rv",
"==",
"1.0",
":",
"return",
"lexer",
"(",
"*",
"*",
"options",
")",
"if",
"rv",
">",
"best_lexer",
"[",
"0",
"]",
":",
"best_lexer",
"[",
":",
"]",
"=",
"(",
"rv",
",",
"lexer",
")",
"if",
"not",
"best_lexer",
"[",
"0",
"]",
"or",
"best_lexer",
"[",
"1",
"]",
"is",
"None",
":",
"raise",
"ClassNotFound",
"(",
"'no lexer matching the text found'",
")",
"return",
"best_lexer",
"[",
"1",
"]",
"(",
"*",
"*",
"options",
")"
] | [
231,
0
] | [
252,
35
] | python | en | ['en', 'en', 'en'] | True |
XvfbDisplay.__init__ | (self, size=(1024, 768), color_depth=24,
bgcolor='black', fbdir=None) |
:param bgcolor: 'black' or 'white'
:param fbdir: If non-null, the virtual screen is memory-mapped
to a file in the given directory ('-fbdir' option)
|
:param bgcolor: 'black' or 'white'
:param fbdir: If non-null, the virtual screen is memory-mapped
to a file in the given directory ('-fbdir' option)
| def __init__(self, size=(1024, 768), color_depth=24,
bgcolor='black', fbdir=None):
'''
:param bgcolor: 'black' or 'white'
:param fbdir: If non-null, the virtual screen is memory-mapped
to a file in the given directory ('-fbdir' option)
'''
self.screen = 0
self.size = size
self.color_depth = color_depth
self.process = None
self.bgcolor = bgcolor
self.display = None
self.fbdir = fbdir
AbstractDisplay.__init__(self) | [
"def",
"__init__",
"(",
"self",
",",
"size",
"=",
"(",
"1024",
",",
"768",
")",
",",
"color_depth",
"=",
"24",
",",
"bgcolor",
"=",
"'black'",
",",
"fbdir",
"=",
"None",
")",
":",
"self",
".",
"screen",
"=",
"0",
"self",
".",
"size",
"=",
"size",
"self",
".",
"color_depth",
"=",
"color_depth",
"self",
".",
"process",
"=",
"None",
"self",
".",
"bgcolor",
"=",
"bgcolor",
"self",
".",
"display",
"=",
"None",
"self",
".",
"fbdir",
"=",
"fbdir",
"AbstractDisplay",
".",
"__init__",
"(",
"self",
")"
] | [
14,
4
] | [
28,
38
] | python | en | ['en', 'error', 'th'] | False |
TIMESAT_stats | (dataarray, time_dim='time') |
For a 1D array of values for a vegetation index - for which higher values tend to
indicate more vegetation - determine several statistics:
1. Beginning of Season (BOS): The time index of the beginning of the growing season.
(The downward inflection point before the maximum vegetation index value)
2. End of Season (EOS): The time index of the end of the growing season.
(The upward inflection point after the maximum vegetation index value)
3. Middle of Season (MOS): The time index of the maximum vegetation index value.
4. Length of Season (EOS-BOS): The time length of the season (index difference).
5. Base Value (BASE): The minimum vegetation index value.
6. Max Value (MAX): The maximum vegetation index value (the value at MOS).
7. Amplitude (AMP): The difference between BASE and MAX.
Parameters
----------
dataarray: xarray.DataArray
The 1D array of non-NaN values to determine the statistics for.
time_dim: string
The name of the time dimension in `dataarray`.
Returns
-------
stats: dict
A dictionary mapping statistic names to values.
|
For a 1D array of values for a vegetation index - for which higher values tend to
indicate more vegetation - determine several statistics:
1. Beginning of Season (BOS): The time index of the beginning of the growing season.
(The downward inflection point before the maximum vegetation index value)
2. End of Season (EOS): The time index of the end of the growing season.
(The upward inflection point after the maximum vegetation index value)
3. Middle of Season (MOS): The time index of the maximum vegetation index value.
4. Length of Season (EOS-BOS): The time length of the season (index difference).
5. Base Value (BASE): The minimum vegetation index value.
6. Max Value (MAX): The maximum vegetation index value (the value at MOS).
7. Amplitude (AMP): The difference between BASE and MAX.
Parameters
----------
dataarray: xarray.DataArray
The 1D array of non-NaN values to determine the statistics for.
time_dim: string
The name of the time dimension in `dataarray`. | def TIMESAT_stats(dataarray, time_dim='time'):
"""
For a 1D array of values for a vegetation index - for which higher values tend to
indicate more vegetation - determine several statistics:
1. Beginning of Season (BOS): The time index of the beginning of the growing season.
(The downward inflection point before the maximum vegetation index value)
2. End of Season (EOS): The time index of the end of the growing season.
(The upward inflection point after the maximum vegetation index value)
3. Middle of Season (MOS): The time index of the maximum vegetation index value.
4. Length of Season (EOS-BOS): The time length of the season (index difference).
5. Base Value (BASE): The minimum vegetation index value.
6. Max Value (MAX): The maximum vegetation index value (the value at MOS).
7. Amplitude (AMP): The difference between BASE and MAX.
Parameters
----------
dataarray: xarray.DataArray
The 1D array of non-NaN values to determine the statistics for.
time_dim: string
The name of the time dimension in `dataarray`.
Returns
-------
stats: dict
A dictionary mapping statistic names to values.
"""
assert time_dim in dataarray.dims, "The parameter `time_dim` is \"{}\", " \
"but that dimension does not exist in the data.".format(time_dim)
stats = {}
data_np_arr = dataarray.values
time_np_arr = _n64_datetime_to_scalar(dataarray[time_dim].values)
data_inds = np.arange(len(data_np_arr))
# Obtain the first and second derivatives.
fst_deriv = np.gradient(data_np_arr, time_np_arr)
pos_fst_deriv = fst_deriv > 0
neg_fst_deriv = 0 > fst_deriv
snd_deriv = np.gradient(fst_deriv, time_np_arr)
pos_snd_deriv = snd_deriv > 0
neg_snd_deriv = 0 > snd_deriv
# Determine MOS.
# MOS is the index of the highest value.
idxmos = np.argmax(data_np_arr)
stats['Middle of Season'] = idxmos
data_inds_before_mos = data_inds[:idxmos]
data_inds_after_mos = data_inds[idxmos:]
# Determine BOS.
# BOS is the last negative inflection point before the MOS.
# If that point does not exist, choose the first positive
# first derivative point before the MOS. If that point does
# not exist, the BOS is the MOS (there is no point before the MOS in this case).
snd_deriv_neg_infl = np.concatenate((np.array([False]), neg_snd_deriv[1:] & ~neg_snd_deriv[:-1]))
if snd_deriv_neg_infl[data_inds_before_mos].sum() > 0:
idxbos = data_inds_before_mos[len(data_inds_before_mos) - 1 -
np.argmax(snd_deriv_neg_infl[data_inds_before_mos][::-1])]
elif pos_fst_deriv[data_inds_before_mos].sum() > 0:
idxbos = np.argmax(pos_fst_deriv[data_inds_before_mos])
else:
idxbos = idxmos
stats['Beginning of Season'] = idxbos
# Determine EOS.
# EOS is the first positive inflection point after the MOS.
# If that point does not exist, choose the last negative
# first derivative point after the MOS. If that point does
# not exist, the EOS is the MOS (there is no point after the MOS in this case).
snd_deriv_pos_infl = np.concatenate((np.array([False]), pos_snd_deriv[1:] & ~pos_snd_deriv[:-1]))
if snd_deriv_pos_infl[data_inds_after_mos].sum() > 0:
idxeos = data_inds_after_mos[np.argmax(snd_deriv_pos_infl[data_inds_after_mos])]
elif neg_fst_deriv[data_inds_after_mos].sum() > 0:
idxeos = np.argmax(neg_fst_deriv[data_inds_after_mos])
else:
idxeos = idxmos
stats['End of Season'] = idxeos
# Determine EOS-BOS.
stats['Length of Season'] = idxeos - idxbos
# Determine BASE.
stats['Base Value'] = data_np_arr.min()
# Determine MAX.
stats['Max Value'] = data_np_arr.max()
# Determine AMP.
stats['Amplitude'] = stats['Max Value'] - stats['Base Value']
return stats | [
"def",
"TIMESAT_stats",
"(",
"dataarray",
",",
"time_dim",
"=",
"'time'",
")",
":",
"assert",
"time_dim",
"in",
"dataarray",
".",
"dims",
",",
"\"The parameter `time_dim` is \\\"{}\\\", \"",
"\"but that dimension does not exist in the data.\"",
".",
"format",
"(",
"time_dim",
")",
"stats",
"=",
"{",
"}",
"data_np_arr",
"=",
"dataarray",
".",
"values",
"time_np_arr",
"=",
"_n64_datetime_to_scalar",
"(",
"dataarray",
"[",
"time_dim",
"]",
".",
"values",
")",
"data_inds",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data_np_arr",
")",
")",
"# Obtain the first and second derivatives.",
"fst_deriv",
"=",
"np",
".",
"gradient",
"(",
"data_np_arr",
",",
"time_np_arr",
")",
"pos_fst_deriv",
"=",
"fst_deriv",
">",
"0",
"neg_fst_deriv",
"=",
"0",
">",
"fst_deriv",
"snd_deriv",
"=",
"np",
".",
"gradient",
"(",
"fst_deriv",
",",
"time_np_arr",
")",
"pos_snd_deriv",
"=",
"snd_deriv",
">",
"0",
"neg_snd_deriv",
"=",
"0",
">",
"snd_deriv",
"# Determine MOS.",
"# MOS is the index of the highest value.",
"idxmos",
"=",
"np",
".",
"argmax",
"(",
"data_np_arr",
")",
"stats",
"[",
"'Middle of Season'",
"]",
"=",
"idxmos",
"data_inds_before_mos",
"=",
"data_inds",
"[",
":",
"idxmos",
"]",
"data_inds_after_mos",
"=",
"data_inds",
"[",
"idxmos",
":",
"]",
"# Determine BOS.",
"# BOS is the last negative inflection point before the MOS.",
"# If that point does not exist, choose the first positive",
"# first derivative point before the MOS. If that point does",
"# not exist, the BOS is the MOS (there is no point before the MOS in this case).",
"snd_deriv_neg_infl",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"False",
"]",
")",
",",
"neg_snd_deriv",
"[",
"1",
":",
"]",
"&",
"~",
"neg_snd_deriv",
"[",
":",
"-",
"1",
"]",
")",
")",
"if",
"snd_deriv_neg_infl",
"[",
"data_inds_before_mos",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"idxbos",
"=",
"data_inds_before_mos",
"[",
"len",
"(",
"data_inds_before_mos",
")",
"-",
"1",
"-",
"np",
".",
"argmax",
"(",
"snd_deriv_neg_infl",
"[",
"data_inds_before_mos",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
"]",
"elif",
"pos_fst_deriv",
"[",
"data_inds_before_mos",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"idxbos",
"=",
"np",
".",
"argmax",
"(",
"pos_fst_deriv",
"[",
"data_inds_before_mos",
"]",
")",
"else",
":",
"idxbos",
"=",
"idxmos",
"stats",
"[",
"'Beginning of Season'",
"]",
"=",
"idxbos",
"# Determine EOS. ",
"# EOS is the first positive inflection point after the MOS.",
"# If that point does not exist, choose the last negative",
"# first derivative point after the MOS. If that point does",
"# not exist, the EOS is the MOS (there is no point after the MOS in this case).",
"snd_deriv_pos_infl",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"False",
"]",
")",
",",
"pos_snd_deriv",
"[",
"1",
":",
"]",
"&",
"~",
"pos_snd_deriv",
"[",
":",
"-",
"1",
"]",
")",
")",
"if",
"snd_deriv_pos_infl",
"[",
"data_inds_after_mos",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"idxeos",
"=",
"data_inds_after_mos",
"[",
"np",
".",
"argmax",
"(",
"snd_deriv_pos_infl",
"[",
"data_inds_after_mos",
"]",
")",
"]",
"elif",
"neg_fst_deriv",
"[",
"data_inds_after_mos",
"]",
".",
"sum",
"(",
")",
">",
"0",
":",
"idxeos",
"=",
"np",
".",
"argmax",
"(",
"neg_fst_deriv",
"[",
"data_inds_after_mos",
"]",
")",
"else",
":",
"idxeos",
"=",
"idxmos",
"stats",
"[",
"'End of Season'",
"]",
"=",
"idxeos",
"# Determine EOS-BOS.",
"stats",
"[",
"'Length of Season'",
"]",
"=",
"idxeos",
"-",
"idxbos",
"# Determine BASE.",
"stats",
"[",
"'Base Value'",
"]",
"=",
"data_np_arr",
".",
"min",
"(",
")",
"# Determine MAX.",
"stats",
"[",
"'Max Value'",
"]",
"=",
"data_np_arr",
".",
"max",
"(",
")",
"# Determine AMP.",
"stats",
"[",
"'Amplitude'",
"]",
"=",
"stats",
"[",
"'Max Value'",
"]",
"-",
"stats",
"[",
"'Base Value'",
"]",
"return",
"stats"
] | [
24,
0
] | [
111,
16
] | python | en | ['en', 'error', 'th'] | False |
IRIPAllowDeny.__init__ | (self, ir: 'IR', aconf: Config,
rkey: str="ir.ipallowdeny",
name: str="ir.ipallowdeny",
kind: str="IRIPAllowDeny",
parent: IRResource=None,
action: str=None,
**kwargs) |
Initialize an IRIPAllowDeny. In addition to the usual IRFilter parameters,
parent and action are required:
parent is the IRResource in which the IRIPAllowDeny is defined; at present,
this will be the Ambassador module. It's required because it's where errors
should be posted.
action must be either "ALLOW" or "DENY". This action will be normalized to
all-uppercase in setup().
|
Initialize an IRIPAllowDeny. In addition to the usual IRFilter parameters,
parent and action are required: | def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.ipallowdeny",
name: str="ir.ipallowdeny",
kind: str="IRIPAllowDeny",
parent: IRResource=None,
action: str=None,
**kwargs) -> None:
"""
Initialize an IRIPAllowDeny. In addition to the usual IRFilter parameters,
parent and action are required:
parent is the IRResource in which the IRIPAllowDeny is defined; at present,
this will be the Ambassador module. It's required because it's where errors
should be posted.
action must be either "ALLOW" or "DENY". This action will be normalized to
all-uppercase in setup().
"""
assert parent is not None
assert action is not None
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
parent=parent, action=action, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"ir",
":",
"'IR'",
",",
"aconf",
":",
"Config",
",",
"rkey",
":",
"str",
"=",
"\"ir.ipallowdeny\"",
",",
"name",
":",
"str",
"=",
"\"ir.ipallowdeny\"",
",",
"kind",
":",
"str",
"=",
"\"IRIPAllowDeny\"",
",",
"parent",
":",
"IRResource",
"=",
"None",
",",
"action",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"assert",
"parent",
"is",
"not",
"None",
"assert",
"action",
"is",
"not",
"None",
"super",
"(",
")",
".",
"__init__",
"(",
"ir",
"=",
"ir",
",",
"aconf",
"=",
"aconf",
",",
"rkey",
"=",
"rkey",
",",
"kind",
"=",
"kind",
",",
"name",
"=",
"name",
",",
"parent",
"=",
"parent",
",",
"action",
"=",
"action",
",",
"*",
"*",
"kwargs",
")"
] | [
28,
4
] | [
52,
51
] | python | en | ['en', 'error', 'th'] | False |
IRIPAllowDeny.setup | (self, ir: 'IR', aconf: Config) |
Set up an IRIPAllowDeny based on the action and principals passed into
__init__.
|
Set up an IRIPAllowDeny based on the action and principals passed into
__init__.
| def setup(self, ir: 'IR', aconf: Config) -> bool:
"""
Set up an IRIPAllowDeny based on the action and principals passed into
__init__.
"""
assert self.parent
# These pops will crash if the action or principals are missing. That's
# OK -- they're required elements.
action: Optional[str] = self.pop("action")
principals: Optional[List[Dict[str, str]]] = self.pop("principals")
assert action is not None
assert principals is not None
action = action.upper()
if (action != "ALLOW") and (action != "DENY"):
raise RuntimeError(f"IRIPAllowDeny action must be ALLOW or DENY, not {action}")
self.action = action
self.principals = []
ir.logger.debug(f"PRINCIPALS: {principals}")
# principals looks like
#
# [
# { 'peer': '127.0.0.1' },
# { 'remote': '192.68.0.0/24' },
# { 'remote': '::1' }
# ]
#
# or the like, where the key in the dict specifies how Envoy will handle the
# IP match, and the value is a CIDRRange spec.
for pdict in principals:
# If we have more than one thing in the dict, that's an error.
first = True
for kind, spec in pdict.items():
if not first:
self.parent.post_error(f"ip{self.action.lower()} principals must be separate list elements")
break
first = False
envoy_kind = IRIPAllowDeny.EnvoyTypeMap.get(kind, None)
if not envoy_kind:
self.parent.post_error(f"ip{self.action.lower()} principal type {kind} unknown: must be peer or remote")
continue
cidrrange = CIDRRange(spec)
if cidrrange:
self.principals.append((envoy_kind, cidrrange))
else:
self.parent.post_error(f"ip_{self.action.lower()} principal {spec} is not valid: {cidrrange.error}")
if len(self.principals) > 0:
return True
else:
return False | [
"def",
"setup",
"(",
"self",
",",
"ir",
":",
"'IR'",
",",
"aconf",
":",
"Config",
")",
"->",
"bool",
":",
"assert",
"self",
".",
"parent",
"# These pops will crash if the action or principals are missing. That's",
"# OK -- they're required elements.",
"action",
":",
"Optional",
"[",
"str",
"]",
"=",
"self",
".",
"pop",
"(",
"\"action\"",
")",
"principals",
":",
"Optional",
"[",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"]",
"=",
"self",
".",
"pop",
"(",
"\"principals\"",
")",
"assert",
"action",
"is",
"not",
"None",
"assert",
"principals",
"is",
"not",
"None",
"action",
"=",
"action",
".",
"upper",
"(",
")",
"if",
"(",
"action",
"!=",
"\"ALLOW\"",
")",
"and",
"(",
"action",
"!=",
"\"DENY\"",
")",
":",
"raise",
"RuntimeError",
"(",
"f\"IRIPAllowDeny action must be ALLOW or DENY, not {action}\"",
")",
"self",
".",
"action",
"=",
"action",
"self",
".",
"principals",
"=",
"[",
"]",
"ir",
".",
"logger",
".",
"debug",
"(",
"f\"PRINCIPALS: {principals}\"",
")",
"# principals looks like",
"#",
"# [ ",
"# { 'peer': '127.0.0.1' },",
"# { 'remote': '192.68.0.0/24' },",
"# { 'remote': '::1' }",
"# ]",
"#",
"# or the like, where the key in the dict specifies how Envoy will handle the ",
"# IP match, and the value is a CIDRRange spec.",
"for",
"pdict",
"in",
"principals",
":",
"# If we have more than one thing in the dict, that's an error.",
"first",
"=",
"True",
"for",
"kind",
",",
"spec",
"in",
"pdict",
".",
"items",
"(",
")",
":",
"if",
"not",
"first",
":",
"self",
".",
"parent",
".",
"post_error",
"(",
"f\"ip{self.action.lower()} principals must be separate list elements\"",
")",
"break",
"first",
"=",
"False",
"envoy_kind",
"=",
"IRIPAllowDeny",
".",
"EnvoyTypeMap",
".",
"get",
"(",
"kind",
",",
"None",
")",
"if",
"not",
"envoy_kind",
":",
"self",
".",
"parent",
".",
"post_error",
"(",
"f\"ip{self.action.lower()} principal type {kind} unknown: must be peer or remote\"",
")",
"continue",
"cidrrange",
"=",
"CIDRRange",
"(",
"spec",
")",
"if",
"cidrrange",
":",
"self",
".",
"principals",
".",
"append",
"(",
"(",
"envoy_kind",
",",
"cidrrange",
")",
")",
"else",
":",
"self",
".",
"parent",
".",
"post_error",
"(",
"f\"ip_{self.action.lower()} principal {spec} is not valid: {cidrrange.error}\"",
")",
"if",
"len",
"(",
"self",
".",
"principals",
")",
">",
"0",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | [
54,
4
] | [
119,
24
] | python | en | ['en', 'error', 'th'] | False |
complex_flat_schema | () | This includes some descriptions. | This includes some descriptions. | def complex_flat_schema():
"""This includes some descriptions."""
return {
"$id": "https://example.com/address.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "An address",
"type": "object",
"properties": {
"post-office-box": {"type": "string"},
"street-name": {"type": "string"},
"street-number": {
"type": "integer",
"description": "Only the address number.",
},
"locality": {"type": "string"},
"region": {"type": "string"},
"postal-code": {"type": "string"},
"country-name": {"type": "string"},
},
"required": ["locality", "region", "country-name"],
} | [
"def",
"complex_flat_schema",
"(",
")",
":",
"return",
"{",
"\"$id\"",
":",
"\"https://example.com/address.schema.json\"",
",",
"\"$schema\"",
":",
"\"http://json-schema.org/draft-07/schema#\"",
",",
"\"description\"",
":",
"\"An address\"",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"post-office-box\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"street-name\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"street-number\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"description\"",
":",
"\"Only the address number.\"",
",",
"}",
",",
"\"locality\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"region\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"postal-code\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"country-name\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"}",
",",
"\"required\"",
":",
"[",
"\"locality\"",
",",
"\"region\"",
",",
"\"country-name\"",
"]",
",",
"}"
] | [
22,
0
] | [
42,
5
] | python | en | ['en', 'en', 'en'] | True |
string_lengths_schema | () |
This fixture has various combinations string lengths.
https://json-schema.org/understanding-json-schema/reference/string.html#length
|
This fixture has various combinations string lengths.
https://json-schema.org/understanding-json-schema/reference/string.html#length
| def string_lengths_schema():
"""
This fixture has various combinations string lengths.
https://json-schema.org/understanding-json-schema/reference/string.html#length
"""
return {
"$id": "https://example.com/address.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"comments-no-constraints": {"type": "string"},
"state-abbreviation-equal-min-max": {
"type": "string",
"minLength": 2,
"maxLength": 2,
},
"ICD10-code-3-7": {"type": "string", "minLength": 3, "maxLength": 7},
"name-no-max": {"type": "string", "minLength": 1},
"password-max-33": {"type": "string", "maxLength": 33},
"optional-min-1": {
"anyOf": [
{
"type": "string",
"minLength": 1,
},
{
"type": "null",
},
]
},
},
} | [
"def",
"string_lengths_schema",
"(",
")",
":",
"return",
"{",
"\"$id\"",
":",
"\"https://example.com/address.schema.json\"",
",",
"\"$schema\"",
":",
"\"http://json-schema.org/draft-07/schema#\"",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"comments-no-constraints\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
"}",
",",
"\"state-abbreviation-equal-min-max\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"minLength\"",
":",
"2",
",",
"\"maxLength\"",
":",
"2",
",",
"}",
",",
"\"ICD10-code-3-7\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"minLength\"",
":",
"3",
",",
"\"maxLength\"",
":",
"7",
"}",
",",
"\"name-no-max\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"minLength\"",
":",
"1",
"}",
",",
"\"password-max-33\"",
":",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"maxLength\"",
":",
"33",
"}",
",",
"\"optional-min-1\"",
":",
"{",
"\"anyOf\"",
":",
"[",
"{",
"\"type\"",
":",
"\"string\"",
",",
"\"minLength\"",
":",
"1",
",",
"}",
",",
"{",
"\"type\"",
":",
"\"null\"",
",",
"}",
",",
"]",
"}",
",",
"}",
",",
"}"
] | [
95,
0
] | [
126,
5
] | python | en | ['en', 'error', 'th'] | False |
integer_ranges_schema | () |
This fixture has various combinations of integer ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
|
This fixture has various combinations of integer ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
| def integer_ranges_schema():
"""
This fixture has various combinations of integer ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
"""
return {
"$id": "https://example.com/address.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "An address similar to http://microformats.org/wiki/h-card",
"type": "object",
"properties": {
"favorite-number": {"type": "integer"},
"age-0-130": {"type": "integer", "minimum": 0, "maximum": 130},
"wheel-count-0-plus": {"type": "integer", "minimum": 0},
"rpm-max-7000": {"type": "integer", "maximum": 7000},
"lake-depth-max-minus-100": {"type": "integer", "maximum": -100},
"floor-exclusive-min-0": {"type": "integer", "exclusiveMinimum": 0},
"floor-exclusive-max-100": {"type": "integer", "exclusiveMaximum": 100},
"gear-exclusive-0-6": {
"type": "integer",
"exclusiveMinimum": 0,
"exclusiveMaximum": 6,
},
"optional-min-1": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
},
{
"type": "null",
},
]
},
},
} | [
"def",
"integer_ranges_schema",
"(",
")",
":",
"return",
"{",
"\"$id\"",
":",
"\"https://example.com/address.schema.json\"",
",",
"\"$schema\"",
":",
"\"http://json-schema.org/draft-07/schema#\"",
",",
"\"description\"",
":",
"\"An address similar to http://microformats.org/wiki/h-card\"",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"favorite-number\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
"}",
",",
"\"age-0-130\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"minimum\"",
":",
"0",
",",
"\"maximum\"",
":",
"130",
"}",
",",
"\"wheel-count-0-plus\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"minimum\"",
":",
"0",
"}",
",",
"\"rpm-max-7000\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"maximum\"",
":",
"7000",
"}",
",",
"\"lake-depth-max-minus-100\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"maximum\"",
":",
"-",
"100",
"}",
",",
"\"floor-exclusive-min-0\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"exclusiveMinimum\"",
":",
"0",
"}",
",",
"\"floor-exclusive-max-100\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"exclusiveMaximum\"",
":",
"100",
"}",
",",
"\"gear-exclusive-0-6\"",
":",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"exclusiveMinimum\"",
":",
"0",
",",
"\"exclusiveMaximum\"",
":",
"6",
",",
"}",
",",
"\"optional-min-1\"",
":",
"{",
"\"anyOf\"",
":",
"[",
"{",
"\"type\"",
":",
"\"integer\"",
",",
"\"minimum\"",
":",
"1",
",",
"}",
",",
"{",
"\"type\"",
":",
"\"null\"",
",",
"}",
",",
"]",
"}",
",",
"}",
",",
"}"
] | [
130,
0
] | [
165,
5
] | python | en | ['en', 'error', 'th'] | False |
number_ranges_schema | () |
This fixture has various combinations of number ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
|
This fixture has various combinations of number ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
| def number_ranges_schema():
"""
This fixture has various combinations of number ranges.
https://json-schema.org/understanding-json-schema/reference/numeric.html#range
"""
return {
"$id": "https://example.com/address.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "An address similar to http://microformats.org/wiki/h-card",
"type": "object",
"properties": {
"favorite-number": {"type": "number"},
"age-0-130": {"type": "number", "minimum": 0.5, "maximum": 130.5},
"wheel-count-0-plus": {"type": "number", "minimum": 0.5},
"rpm-max-7000": {"type": "number", "maximum": 7000.5},
"lake-depth-max-minus-100": {"type": "number", "maximum": -100.5},
"floor-exclusive-min-0": {"type": "number", "exclusiveMinimum": 0.5},
"floor-exclusive-max-100": {"type": "number", "exclusiveMaximum": 100.5},
"gear-exclusive-0-6": {
"type": "number",
"exclusiveMinimum": 0.5,
"exclusiveMaximum": 6.5,
},
"optional-min-half": {
"anyOf": [
{
"type": "number",
"minimum": 0.5,
},
{
"type": "null",
},
]
},
},
} | [
"def",
"number_ranges_schema",
"(",
")",
":",
"return",
"{",
"\"$id\"",
":",
"\"https://example.com/address.schema.json\"",
",",
"\"$schema\"",
":",
"\"http://json-schema.org/draft-07/schema#\"",
",",
"\"description\"",
":",
"\"An address similar to http://microformats.org/wiki/h-card\"",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"favorite-number\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
"}",
",",
"\"age-0-130\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"minimum\"",
":",
"0.5",
",",
"\"maximum\"",
":",
"130.5",
"}",
",",
"\"wheel-count-0-plus\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"minimum\"",
":",
"0.5",
"}",
",",
"\"rpm-max-7000\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"maximum\"",
":",
"7000.5",
"}",
",",
"\"lake-depth-max-minus-100\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"maximum\"",
":",
"-",
"100.5",
"}",
",",
"\"floor-exclusive-min-0\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"exclusiveMinimum\"",
":",
"0.5",
"}",
",",
"\"floor-exclusive-max-100\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"exclusiveMaximum\"",
":",
"100.5",
"}",
",",
"\"gear-exclusive-0-6\"",
":",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"exclusiveMinimum\"",
":",
"0.5",
",",
"\"exclusiveMaximum\"",
":",
"6.5",
",",
"}",
",",
"\"optional-min-half\"",
":",
"{",
"\"anyOf\"",
":",
"[",
"{",
"\"type\"",
":",
"\"number\"",
",",
"\"minimum\"",
":",
"0.5",
",",
"}",
",",
"{",
"\"type\"",
":",
"\"null\"",
",",
"}",
",",
"]",
"}",
",",
"}",
",",
"}"
] | [
169,
0
] | [
204,
5
] | python | en | ['en', 'error', 'th'] | False |
null_fields_schema | () |
This fixture has null fields.
https://json-schema.org/understanding-json-schema/reference/null.html
|
This fixture has null fields.
https://json-schema.org/understanding-json-schema/reference/null.html
| def null_fields_schema():
"""
This fixture has null fields.
https://json-schema.org/understanding-json-schema/reference/null.html
"""
return {
"$id": "https://example.com/null.schema.json",
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"null": {"type": "null"},
"string-or-null": {"type": ["string", "null"]},
"int-or-null": {"type": ["integer", "null"]},
"number-or-null": {"type": ["number", "null"]},
"enum-or-null": {"anyOf": [{"enum": ["a", "b", "c"]}, {"type": "null"}]},
},
} | [
"def",
"null_fields_schema",
"(",
")",
":",
"return",
"{",
"\"$id\"",
":",
"\"https://example.com/null.schema.json\"",
",",
"\"$schema\"",
":",
"\"http://json-schema.org/draft-07/schema#\"",
",",
"\"type\"",
":",
"\"object\"",
",",
"\"properties\"",
":",
"{",
"\"null\"",
":",
"{",
"\"type\"",
":",
"\"null\"",
"}",
",",
"\"string-or-null\"",
":",
"{",
"\"type\"",
":",
"[",
"\"string\"",
",",
"\"null\"",
"]",
"}",
",",
"\"int-or-null\"",
":",
"{",
"\"type\"",
":",
"[",
"\"integer\"",
",",
"\"null\"",
"]",
"}",
",",
"\"number-or-null\"",
":",
"{",
"\"type\"",
":",
"[",
"\"number\"",
",",
"\"null\"",
"]",
"}",
",",
"\"enum-or-null\"",
":",
"{",
"\"anyOf\"",
":",
"[",
"{",
"\"enum\"",
":",
"[",
"\"a\"",
",",
"\"b\"",
",",
"\"c\"",
"]",
"}",
",",
"{",
"\"type\"",
":",
"\"null\"",
"}",
"]",
"}",
",",
"}",
",",
"}"
] | [
208,
0
] | [
224,
5
] | python | en | ['en', 'error', 'th'] | False |
open_tcp_listeners | (port, *, host=None, backlog=None) | Create :class:`SocketListener` objects to listen for TCP connections.
Args:
port (int): The port to listen on.
If you use 0 as your port, then the kernel will automatically pick
an arbitrary open port. But be careful: if you use this feature when
binding to multiple IP addresses, then each IP address will get its
own random port, and the returned listeners will probably be
listening on different ports. In particular, this will happen if you
use ``host=None`` – which is the default – because in this case
:func:`open_tcp_listeners` will bind to both the IPv4 wildcard
address (``0.0.0.0``) and also the IPv6 wildcard address (``::``).
host (str, bytes-like, or None): The local interface to bind to. This is
passed to :func:`~socket.getaddrinfo` with the ``AI_PASSIVE`` flag
set.
If you want to bind to the wildcard address on both IPv4 and IPv6,
in order to accept connections on all available interfaces, then
pass ``None``. This is the default.
If you have a specific interface you want to bind to, pass its IP
address or hostname here. If a hostname resolves to multiple IP
addresses, this function will open one listener on each of them.
If you want to use only IPv4, or only IPv6, but want to accept on
all interfaces, pass the family-specific wildcard address:
``"0.0.0.0"`` for IPv4-only and ``"::"`` for IPv6-only.
backlog (int or None): The listen backlog to use. If you leave this as
``None`` then Trio will pick a good default. (Currently: whatever
your system has configured as the maximum backlog.)
Returns:
list of :class:`SocketListener`
| Create :class:`SocketListener` objects to listen for TCP connections. | async def open_tcp_listeners(port, *, host=None, backlog=None):
"""Create :class:`SocketListener` objects to listen for TCP connections.
Args:
port (int): The port to listen on.
If you use 0 as your port, then the kernel will automatically pick
an arbitrary open port. But be careful: if you use this feature when
binding to multiple IP addresses, then each IP address will get its
own random port, and the returned listeners will probably be
listening on different ports. In particular, this will happen if you
use ``host=None`` – which is the default – because in this case
:func:`open_tcp_listeners` will bind to both the IPv4 wildcard
address (``0.0.0.0``) and also the IPv6 wildcard address (``::``).
host (str, bytes-like, or None): The local interface to bind to. This is
passed to :func:`~socket.getaddrinfo` with the ``AI_PASSIVE`` flag
set.
If you want to bind to the wildcard address on both IPv4 and IPv6,
in order to accept connections on all available interfaces, then
pass ``None``. This is the default.
If you have a specific interface you want to bind to, pass its IP
address or hostname here. If a hostname resolves to multiple IP
addresses, this function will open one listener on each of them.
If you want to use only IPv4, or only IPv6, but want to accept on
all interfaces, pass the family-specific wildcard address:
``"0.0.0.0"`` for IPv4-only and ``"::"`` for IPv6-only.
backlog (int or None): The listen backlog to use. If you leave this as
``None`` then Trio will pick a good default. (Currently: whatever
your system has configured as the maximum backlog.)
Returns:
list of :class:`SocketListener`
"""
# getaddrinfo sometimes allows port=None, sometimes not (depending on
# whether host=None). And on some systems it treats "" as 0, others it
# doesn't:
# http://klickverbot.at/blog/2012/01/getaddrinfo-edge-case-behavior-on-windows-linux-and-osx/
if not isinstance(port, int):
raise TypeError("port must be an int not {!r}".format(port))
backlog = _compute_backlog(backlog)
addresses = await tsocket.getaddrinfo(
host, port, type=tsocket.SOCK_STREAM, flags=tsocket.AI_PASSIVE
)
listeners = []
unsupported_address_families = []
try:
for family, type, proto, _, sockaddr in addresses:
try:
sock = tsocket.socket(family, type, proto)
except OSError as ex:
if ex.errno == errno.EAFNOSUPPORT:
# If a system only supports IPv4, or only IPv6, it
# is still likely that getaddrinfo will return
# both an IPv4 and an IPv6 address. As long as at
# least one of the returned addresses can be
# turned into a socket, we won't complain about a
# failure to create the other.
unsupported_address_families.append(ex)
continue
else:
raise
try:
# See https://github.com/python-trio/trio/issues/39
if sys.platform != "win32":
sock.setsockopt(tsocket.SOL_SOCKET, tsocket.SO_REUSEADDR, 1)
if family == tsocket.AF_INET6:
sock.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, 1)
await sock.bind(sockaddr)
sock.listen(backlog)
listeners.append(trio.SocketListener(sock))
except:
sock.close()
raise
except:
for listener in listeners:
listener.socket.close()
raise
if unsupported_address_families and not listeners:
raise OSError(
errno.EAFNOSUPPORT,
"This system doesn't support any of the kinds of "
"socket that that address could use",
) from trio.MultiError(unsupported_address_families)
return listeners | [
"async",
"def",
"open_tcp_listeners",
"(",
"port",
",",
"*",
",",
"host",
"=",
"None",
",",
"backlog",
"=",
"None",
")",
":",
"# getaddrinfo sometimes allows port=None, sometimes not (depending on",
"# whether host=None). And on some systems it treats \"\" as 0, others it",
"# doesn't:",
"# http://klickverbot.at/blog/2012/01/getaddrinfo-edge-case-behavior-on-windows-linux-and-osx/",
"if",
"not",
"isinstance",
"(",
"port",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"port must be an int not {!r}\"",
".",
"format",
"(",
"port",
")",
")",
"backlog",
"=",
"_compute_backlog",
"(",
"backlog",
")",
"addresses",
"=",
"await",
"tsocket",
".",
"getaddrinfo",
"(",
"host",
",",
"port",
",",
"type",
"=",
"tsocket",
".",
"SOCK_STREAM",
",",
"flags",
"=",
"tsocket",
".",
"AI_PASSIVE",
")",
"listeners",
"=",
"[",
"]",
"unsupported_address_families",
"=",
"[",
"]",
"try",
":",
"for",
"family",
",",
"type",
",",
"proto",
",",
"_",
",",
"sockaddr",
"in",
"addresses",
":",
"try",
":",
"sock",
"=",
"tsocket",
".",
"socket",
"(",
"family",
",",
"type",
",",
"proto",
")",
"except",
"OSError",
"as",
"ex",
":",
"if",
"ex",
".",
"errno",
"==",
"errno",
".",
"EAFNOSUPPORT",
":",
"# If a system only supports IPv4, or only IPv6, it",
"# is still likely that getaddrinfo will return",
"# both an IPv4 and an IPv6 address. As long as at",
"# least one of the returned addresses can be",
"# turned into a socket, we won't complain about a",
"# failure to create the other.",
"unsupported_address_families",
".",
"append",
"(",
"ex",
")",
"continue",
"else",
":",
"raise",
"try",
":",
"# See https://github.com/python-trio/trio/issues/39",
"if",
"sys",
".",
"platform",
"!=",
"\"win32\"",
":",
"sock",
".",
"setsockopt",
"(",
"tsocket",
".",
"SOL_SOCKET",
",",
"tsocket",
".",
"SO_REUSEADDR",
",",
"1",
")",
"if",
"family",
"==",
"tsocket",
".",
"AF_INET6",
":",
"sock",
".",
"setsockopt",
"(",
"tsocket",
".",
"IPPROTO_IPV6",
",",
"tsocket",
".",
"IPV6_V6ONLY",
",",
"1",
")",
"await",
"sock",
".",
"bind",
"(",
"sockaddr",
")",
"sock",
".",
"listen",
"(",
"backlog",
")",
"listeners",
".",
"append",
"(",
"trio",
".",
"SocketListener",
"(",
"sock",
")",
")",
"except",
":",
"sock",
".",
"close",
"(",
")",
"raise",
"except",
":",
"for",
"listener",
"in",
"listeners",
":",
"listener",
".",
"socket",
".",
"close",
"(",
")",
"raise",
"if",
"unsupported_address_families",
"and",
"not",
"listeners",
":",
"raise",
"OSError",
"(",
"errno",
".",
"EAFNOSUPPORT",
",",
"\"This system doesn't support any of the kinds of \"",
"\"socket that that address could use\"",
",",
")",
"from",
"trio",
".",
"MultiError",
"(",
"unsupported_address_families",
")",
"return",
"listeners"
] | [
44,
0
] | [
142,
20
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.