Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
ACMapping.__init__ | (self, rkey: str, location: str, *,
name: str,
kind: str="Mapping",
apiVersion: Optional[str]=None,
serialization: Optional[str]=None,
service: str,
prefix: str,
prefix_regex: bool=False,
rewrite: Optional[str]="/",
case_sensitive: bool=False,
grpc: bool=False,
bypass_auth: bool=False,
bypass_error_response_overrides: bool=False,
# We don't list "method" or "method_regex" above because if they're
# not present, we want them to be _not present_. Having them be always
# present with an optional method is too annoying for schema validation
# at this point.
**kwargs) |
Initialize an ACMapping from the raw fields of its ACResource.
|
Initialize an ACMapping from the raw fields of its ACResource.
| def __init__(self, rkey: str, location: str, *,
name: str,
kind: str="Mapping",
apiVersion: Optional[str]=None,
serialization: Optional[str]=None,
service: str,
prefix: str,
prefix_regex: bool=False,
rewrite: Optional[str]="/",
case_sensitive: bool=False,
grpc: bool=False,
bypass_auth: bool=False,
bypass_error_response_overrides: bool=False,
# We don't list "method" or "method_regex" above because if they're
# not present, we want them to be _not present_. Having them be always
# present with an optional method is too annoying for schema validation
# at this point.
**kwargs) -> None:
"""
Initialize an ACMapping from the raw fields of its ACResource.
"""
# print("ACMapping __init__ (%s %s)" % (kind, name))
# First init our superclass...
super().__init__(rkey, location,
kind=kind,
name=name,
apiVersion=apiVersion,
serialization=serialization,
service=service,
prefix=prefix,
prefix_regex=prefix_regex,
rewrite=rewrite,
case_sensitive=case_sensitive,
grpc=grpc,
bypass_auth=bypass_auth,
bypass_error_response_overrides=bypass_error_response_overrides,
**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"rkey",
":",
"str",
",",
"location",
":",
"str",
",",
"*",
",",
"name",
":",
"str",
",",
"kind",
":",
"str",
"=",
"\"Mapping\"",
",",
"apiVersion",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"serialization",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"service",
":",
"str",
",",
"prefix",
":",
"str",
",",
"prefix_regex",
":",
"bool",
"=",
"False",
",",
"rewrite",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"/\"",
",",
"case_sensitive",
":",
"bool",
"=",
"False",
",",
"grpc",
":",
"bool",
"=",
"False",
",",
"bypass_auth",
":",
"bool",
"=",
"False",
",",
"bypass_error_response_overrides",
":",
"bool",
"=",
"False",
",",
"# We don't list \"method\" or \"method_regex\" above because if they're",
"# not present, we want them to be _not present_. Having them be always",
"# present with an optional method is too annoying for schema validation",
"# at this point.",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"# print(\"ACMapping __init__ (%s %s)\" % (kind, name))",
"# First init our superclass...",
"super",
"(",
")",
".",
"__init__",
"(",
"rkey",
",",
"location",
",",
"kind",
"=",
"kind",
",",
"name",
"=",
"name",
",",
"apiVersion",
"=",
"apiVersion",
",",
"serialization",
"=",
"serialization",
",",
"service",
"=",
"service",
",",
"prefix",
"=",
"prefix",
",",
"prefix_regex",
"=",
"prefix_regex",
",",
"rewrite",
"=",
"rewrite",
",",
"case_sensitive",
"=",
"case_sensitive",
",",
"grpc",
"=",
"grpc",
",",
"bypass_auth",
"=",
"bypass_auth",
",",
"bypass_error_response_overrides",
"=",
"bypass_error_response_overrides",
",",
"*",
"*",
"kwargs",
")"
] | [
45,
4
] | [
87,
34
] | python | en | ['en', 'error', 'th'] | False |
split_command | (cmd, posix=None) |
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
|
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
| def split_command(cmd, posix=None):
'''
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
'''
if not isinstance(cmd, string_types):
# cmd is string list
pass
else:
if not PY3:
# cmd is string
# The shlex module currently does not support Unicode input in 2.x
if isinstance(cmd, unicode): # noqa: ignore=F821
try:
cmd = unicodedata.normalize(
'NFKD', cmd).encode('ascii', 'strict')
except UnicodeEncodeError:
raise EasyProcessUnicodeError(
'unicode command "%s" can not be processed.' % cmd + ''
'Use string list instead of string')
if posix is None:
posix = 'win' not in sys.platform
cmd = shlex.split(cmd, posix=posix)
return cmd | [
"def",
"split_command",
"(",
"cmd",
",",
"posix",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"cmd",
",",
"string_types",
")",
":",
"# cmd is string list",
"pass",
"else",
":",
"if",
"not",
"PY3",
":",
"# cmd is string",
"# The shlex module currently does not support Unicode input in 2.x",
"if",
"isinstance",
"(",
"cmd",
",",
"unicode",
")",
":",
"# noqa: ignore=F821",
"try",
":",
"cmd",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"cmd",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'strict'",
")",
"except",
"UnicodeEncodeError",
":",
"raise",
"EasyProcessUnicodeError",
"(",
"'unicode command \"%s\" can not be processed.'",
"%",
"cmd",
"+",
"''",
"'Use string list instead of string'",
")",
"if",
"posix",
"is",
"None",
":",
"posix",
"=",
"'win'",
"not",
"in",
"sys",
".",
"platform",
"cmd",
"=",
"shlex",
".",
"split",
"(",
"cmd",
",",
"posix",
"=",
"posix",
")",
"return",
"cmd"
] | [
17,
0
] | [
42,
14
] | python | en | ['en', 'error', 'th'] | False |
LocationManager.push_reset | (self) |
Like push, but simply resets ocount keeping the current filename. Useful
for changing resource types.
|
Like push, but simply resets ocount keeping the current filename. Useful
for changing resource types.
| def push_reset(self) -> ContextManager[Location]:
"""
Like push, but simply resets ocount keeping the current filename. Useful
for changing resource types.
"""
return self.push(filename=self.current.filename) | [
"def",
"push_reset",
"(",
"self",
")",
"->",
"ContextManager",
"[",
"Location",
"]",
":",
"return",
"self",
".",
"push",
"(",
"filename",
"=",
"self",
".",
"current",
".",
"filename",
")"
] | [
49,
4
] | [
54,
56
] | python | en | ['en', 'error', 'th'] | False |
LocationManager.mark_annotated | (self) |
Keeps the current stack, adding an annotation flag to the end of the
filename.
|
Keeps the current stack, adding an annotation flag to the end of the
filename.
| def mark_annotated(self) -> ContextManager[Location]:
"""
Keeps the current stack, adding an annotation flag to the end of the
filename.
"""
previous_filename = self.current.filename
if self.current.filename and not self.current.filename.endswith(':annotation'):
self.current.filename += ':annotation'
@contextlib.contextmanager
def cleaner():
yield previous_filename
self.current.filename = previous_filename
return cleaner() | [
"def",
"mark_annotated",
"(",
"self",
")",
"->",
"ContextManager",
"[",
"Location",
"]",
":",
"previous_filename",
"=",
"self",
".",
"current",
".",
"filename",
"if",
"self",
".",
"current",
".",
"filename",
"and",
"not",
"self",
".",
"current",
".",
"filename",
".",
"endswith",
"(",
"':annotation'",
")",
":",
"self",
".",
"current",
".",
"filename",
"+=",
"':annotation'",
"@",
"contextlib",
".",
"contextmanager",
"def",
"cleaner",
"(",
")",
":",
"yield",
"previous_filename",
"self",
".",
"current",
".",
"filename",
"=",
"previous_filename",
"return",
"cleaner",
"(",
")"
] | [
61,
4
] | [
75,
24
] | python | en | ['en', 'error', 'th'] | False |
init | (ctx, usage_stats) |
Initialize a new Great Expectations project.
This guided input walks the user through setting up a new project and also
onboards a new developer in an existing project.
It scaffolds directories, sets up notebooks, creates a project file, and
appends to a `.gitignore` file.
|
Initialize a new Great Expectations project. | def init(ctx, usage_stats):
"""
Initialize a new Great Expectations project.
This guided input walks the user through setting up a new project and also
onboards a new developer in an existing project.
It scaffolds directories, sets up notebooks, creates a project file, and
appends to a `.gitignore` file.
"""
directory = toolkit.parse_cli_config_file_location(
config_file_location=ctx.obj.config_file_location
).get("directory")
if directory is None:
directory = os.getcwd()
target_directory = os.path.abspath(directory)
ge_dir = _get_full_path_to_ge_dir(target_directory)
cli_message(GREETING)
if DataContext.does_config_exist_on_disk(ge_dir):
message = (
f"""Warning. An existing `{DataContext.GE_YML}` was found here: {ge_dir}."""
)
warnings.warn(message)
try:
project_file_structure_exists = (
DataContext.does_config_exist_on_disk(ge_dir)
and DataContext.all_uncommitted_directories_exist(ge_dir)
and DataContext.config_variables_yml_exist(ge_dir)
)
if project_file_structure_exists:
cli_message(PROJECT_IS_COMPLETE)
sys.exit(0)
else:
# Prompt to modify the project to add missing files
if not ctx.obj.assume_yes:
if not click.confirm(COMPLETE_ONBOARDING_PROMPT, default=True):
cli_message(RUN_INIT_AGAIN)
exit(0)
except (DataContextError, DatasourceInitializationError) as e:
cli_message("<red>{}</red>".format(e.message))
sys.exit(1)
try:
DataContext.create(target_directory, usage_statistics_enabled=usage_stats)
cli_message(ONBOARDING_COMPLETE)
except DataContextError as e:
cli_message("<red>{}</red>".format(e.message))
# TODO ensure this is covered by a test
exit(5)
else:
if not ctx.obj.assume_yes:
if not click.confirm(LETS_BEGIN_PROMPT, default=True):
cli_message(RUN_INIT_AGAIN)
exit(0)
try:
context = DataContext.create(
target_directory, usage_statistics_enabled=usage_stats
)
toolkit.send_usage_message(
data_context=context, event="cli.init.create", success=True
)
except DataContextError as e:
# TODO ensure this is covered by a test
cli_message("<red>{}</red>".format(e))
cli_message(SECTION_SEPARATOR)
cli_message(READY_FOR_CUSTOMIZATION)
cli_message(HOW_TO_CUSTOMIZE)
sys.exit(0) | [
"def",
"init",
"(",
"ctx",
",",
"usage_stats",
")",
":",
"directory",
"=",
"toolkit",
".",
"parse_cli_config_file_location",
"(",
"config_file_location",
"=",
"ctx",
".",
"obj",
".",
"config_file_location",
")",
".",
"get",
"(",
"\"directory\"",
")",
"if",
"directory",
"is",
"None",
":",
"directory",
"=",
"os",
".",
"getcwd",
"(",
")",
"target_directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"directory",
")",
"ge_dir",
"=",
"_get_full_path_to_ge_dir",
"(",
"target_directory",
")",
"cli_message",
"(",
"GREETING",
")",
"if",
"DataContext",
".",
"does_config_exist_on_disk",
"(",
"ge_dir",
")",
":",
"message",
"=",
"(",
"f\"\"\"Warning. An existing `{DataContext.GE_YML}` was found here: {ge_dir}.\"\"\"",
")",
"warnings",
".",
"warn",
"(",
"message",
")",
"try",
":",
"project_file_structure_exists",
"=",
"(",
"DataContext",
".",
"does_config_exist_on_disk",
"(",
"ge_dir",
")",
"and",
"DataContext",
".",
"all_uncommitted_directories_exist",
"(",
"ge_dir",
")",
"and",
"DataContext",
".",
"config_variables_yml_exist",
"(",
"ge_dir",
")",
")",
"if",
"project_file_structure_exists",
":",
"cli_message",
"(",
"PROJECT_IS_COMPLETE",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"else",
":",
"# Prompt to modify the project to add missing files",
"if",
"not",
"ctx",
".",
"obj",
".",
"assume_yes",
":",
"if",
"not",
"click",
".",
"confirm",
"(",
"COMPLETE_ONBOARDING_PROMPT",
",",
"default",
"=",
"True",
")",
":",
"cli_message",
"(",
"RUN_INIT_AGAIN",
")",
"exit",
"(",
"0",
")",
"except",
"(",
"DataContextError",
",",
"DatasourceInitializationError",
")",
"as",
"e",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"e",
".",
"message",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"DataContext",
".",
"create",
"(",
"target_directory",
",",
"usage_statistics_enabled",
"=",
"usage_stats",
")",
"cli_message",
"(",
"ONBOARDING_COMPLETE",
")",
"except",
"DataContextError",
"as",
"e",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"e",
".",
"message",
")",
")",
"# TODO ensure this is covered by a test",
"exit",
"(",
"5",
")",
"else",
":",
"if",
"not",
"ctx",
".",
"obj",
".",
"assume_yes",
":",
"if",
"not",
"click",
".",
"confirm",
"(",
"LETS_BEGIN_PROMPT",
",",
"default",
"=",
"True",
")",
":",
"cli_message",
"(",
"RUN_INIT_AGAIN",
")",
"exit",
"(",
"0",
")",
"try",
":",
"context",
"=",
"DataContext",
".",
"create",
"(",
"target_directory",
",",
"usage_statistics_enabled",
"=",
"usage_stats",
")",
"toolkit",
".",
"send_usage_message",
"(",
"data_context",
"=",
"context",
",",
"event",
"=",
"\"cli.init.create\"",
",",
"success",
"=",
"True",
")",
"except",
"DataContextError",
"as",
"e",
":",
"# TODO ensure this is covered by a test",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"e",
")",
")",
"cli_message",
"(",
"SECTION_SEPARATOR",
")",
"cli_message",
"(",
"READY_FOR_CUSTOMIZATION",
")",
"cli_message",
"(",
"HOW_TO_CUSTOMIZE",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
41,
0
] | [
113,
15
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase.receptive_field | (self) |
Return the total receptive field of this model as # of frames.
|
Return the total receptive field of this model as # of frames.
| def receptive_field(self):
"""
Return the total receptive field of this model as # of frames.
"""
frames = 0
for f in self.pad:
frames += f
self.frames= frames
return 1 + 2 * frames | [
"def",
"receptive_field",
"(",
"self",
")",
":",
"frames",
"=",
"0",
"for",
"f",
"in",
"self",
".",
"pad",
":",
"frames",
"+=",
"f",
"self",
".",
"frames",
"=",
"frames",
"return",
"1",
"+",
"2",
"*",
"frames"
] | [
46,
4
] | [
54,
29
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase.total_causal_shift | (self) |
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
|
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
| def total_causal_shift(self):
"""
Return the asymmetric offset for sequence padding.
The returned value is typically 0 if causal convolutions are disabled,
otherwise it is half the receptive field.
"""
frames = self.causal_shift[0]
next_dilation = self.filter_widths[0]
for i in range(1, len(self.filter_widths)):
frames += self.causal_shift[i] * next_dilation
next_dilation *= self.filter_widths[i]
return frames | [
"def",
"total_causal_shift",
"(",
"self",
")",
":",
"frames",
"=",
"self",
".",
"causal_shift",
"[",
"0",
"]",
"next_dilation",
"=",
"self",
".",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"self",
".",
"filter_widths",
")",
")",
":",
"frames",
"+=",
"self",
".",
"causal_shift",
"[",
"i",
"]",
"*",
"next_dilation",
"next_dilation",
"*=",
"self",
".",
"filter_widths",
"[",
"i",
"]",
"return",
"frames"
] | [
56,
4
] | [
67,
21
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase._get_next_seq | (self, inc, channels, out_seq) |
Generate input information of the next layer
:param inc: input sequence of each group, type:list e.g [455, 569]
:param channels: output channel size of the whole layer, type:int e.g 1024
:param out_seq: output sequence index of each each group, which decides how to group with those indexes. type:list. e.g [[0],[1]]
:return: Next input sequence and Next output sequence index
|
Generate input information of the next layer
:param inc: input sequence of each group, type:list e.g [455, 569]
:param channels: output channel size of the whole layer, type:int e.g 1024
:param out_seq: output sequence index of each each group, which decides how to group with those indexes. type:list. e.g [[0],[1]]
:return: Next input sequence and Next output sequence index
| def _get_next_seq(self, inc, channels, out_seq):
"""
Generate input information of the next layer
:param inc: input sequence of each group, type:list e.g [455, 569]
:param channels: output channel size of the whole layer, type:int e.g 1024
:param out_seq: output sequence index of each each group, which decides how to group with those indexes. type:list. e.g [[0],[1]]
:return: Next input sequence and Next output sequence index
"""
in_ch_sum = 0
for index, i in enumerate(out_seq):
in_ch_sum += sum(map(lambda x: inc[x], i))
out_chs = []
next_seq = []
for index, i in enumerate(out_seq):
in_ch = sum(map(lambda x:inc[x],i))
if len(out_seq) == 1:
out_ch = channels
elif index == len(out_seq)-1:
out_ch = channels-sum(out_chs)
else:
out_ch = int(in_ch / in_ch_sum * channels)
out_chs.append(out_ch)
next_seq.append([index])
return out_chs, next_seq | [
"def",
"_get_next_seq",
"(",
"self",
",",
"inc",
",",
"channels",
",",
"out_seq",
")",
":",
"in_ch_sum",
"=",
"0",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"out_seq",
")",
":",
"in_ch_sum",
"+=",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"inc",
"[",
"x",
"]",
",",
"i",
")",
")",
"out_chs",
"=",
"[",
"]",
"next_seq",
"=",
"[",
"]",
"for",
"index",
",",
"i",
"in",
"enumerate",
"(",
"out_seq",
")",
":",
"in_ch",
"=",
"sum",
"(",
"map",
"(",
"lambda",
"x",
":",
"inc",
"[",
"x",
"]",
",",
"i",
")",
")",
"if",
"len",
"(",
"out_seq",
")",
"==",
"1",
":",
"out_ch",
"=",
"channels",
"elif",
"index",
"==",
"len",
"(",
"out_seq",
")",
"-",
"1",
":",
"out_ch",
"=",
"channels",
"-",
"sum",
"(",
"out_chs",
")",
"else",
":",
"out_ch",
"=",
"int",
"(",
"in_ch",
"/",
"in_ch_sum",
"*",
"channels",
")",
"out_chs",
".",
"append",
"(",
"out_ch",
")",
"next_seq",
".",
"append",
"(",
"[",
"index",
"]",
")",
"return",
"out_chs",
",",
"next_seq"
] | [
69,
4
] | [
92,
32
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelBase._get_all_seq | (self, inc, channels, out_seq, filter_widths) |
:return: Get all sequence info. for a model.
|
:return: Get all sequence info. for a model.
| def _get_all_seq(self, inc, channels, out_seq, filter_widths):
"""
:return: Get all sequence info. for a model.
"""
in_out_seq = []
in_out_seq.append(self._get_next_seq(inc, channels, out_seq))
# Generate input sequence and output sequence of each layer
for i in range(1, len(filter_widths)):
in_out_seq.append(self._get_next_seq(in_out_seq[2*i-2][0], channels, in_out_seq[2*i-2][1]))
in_out_seq.append(self._get_next_seq(in_out_seq[2*i-1][0], channels, in_out_seq[2*i-1][1]))
# For Final layer:
in_out_seq.append(self._get_next_seq(in_out_seq[2*i][0], channels, in_out_seq[2*i][1]))
return in_out_seq | [
"def",
"_get_all_seq",
"(",
"self",
",",
"inc",
",",
"channels",
",",
"out_seq",
",",
"filter_widths",
")",
":",
"in_out_seq",
"=",
"[",
"]",
"in_out_seq",
".",
"append",
"(",
"self",
".",
"_get_next_seq",
"(",
"inc",
",",
"channels",
",",
"out_seq",
")",
")",
"# Generate input sequence and output sequence of each layer",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"in_out_seq",
".",
"append",
"(",
"self",
".",
"_get_next_seq",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"1",
"]",
")",
")",
"in_out_seq",
".",
"append",
"(",
"self",
".",
"_get_next_seq",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"1",
"]",
")",
")",
"# For Final layer:",
"in_out_seq",
".",
"append",
"(",
"self",
".",
"_get_next_seq",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"]",
"[",
"1",
"]",
")",
")",
"return",
"in_out_seq"
] | [
94,
4
] | [
106,
25
] | python | en | ['en', 'error', 'th'] | False |
TemporalModel.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False) |
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = FlexGroupLayer(self.conv_inc, channels, self.conv_seq, kernel_size=filter_widths[0],
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False)
in_out_seq = self._get_all_seq(self.conv_inc, channels, self.conv_seq, filter_widths)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-2][0], channels, in_out_seq[2*i-2][1], kernel_size=filter_widths[0], dilation=next_dilation,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-1][0], channels, in_out_seq[2*i-1][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.final_layer = FlexGroupLayer(in_out_seq[-1][0], self.final_outc, in_out_seq[2*i][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=True)
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
",",
"dense",
"=",
"False",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"FlexGroupLayer",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
"in_out_seq",
"=",
"self",
".",
"_get_all_seq",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"filter_widths",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
")",
"//",
"2",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
"*",
"next_dilation",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"dilation",
"=",
"next_dilation",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"self",
".",
"final_layer",
"=",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"self",
".",
"final_outc",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"True",
")",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")"
] | [
138,
4
] | [
191,
49
] | python | en | ['en', 'error', 'th'] | False |
Same_Model.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False) |
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024, dense=False):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
dense -- use regular dense convolutions instead of dilated convolutions (ablation experiment)
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = FlexGroupLayer(self.conv_inc, channels, self.conv_seq, kernel_size=filter_widths[0],
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False)
in_out_seq = self._get_all_seq(self.conv_inc, channels, self.conv_seq, filter_widths)
layers_conv = []
layers_bn = []
self.ref_pad = []
self.causal_shift = [(filter_widths[0]) // 2 if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2 * next_dilation) if causal else 0)
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-2][0], channels, in_out_seq[2*i-2][1], kernel_size=filter_widths[0], dilation=next_dilation,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-1][0], channels, in_out_seq[2*i-1][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
#self.ref_pad.append(nn.ReplicationPad1d(next_dilation))
self.ref_pad.append(nn.ReflectionPad1d(next_dilation))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.final_layer = FlexGroupLayer(in_out_seq[-1][0], self.final_outc, in_out_seq[2*i][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=True)
self.reflec = nn.ReflectionPad1d(1)
#self.reflec = nn.ReplicationPad1d(1)
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
",",
"dense",
"=",
"False",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"FlexGroupLayer",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
"in_out_seq",
"=",
"self",
".",
"_get_all_seq",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"filter_widths",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"ref_pad",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
")",
"//",
"2",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
"*",
"next_dilation",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"dilation",
"=",
"next_dilation",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"#self.ref_pad.append(nn.ReplicationPad1d(next_dilation))",
"self",
".",
"ref_pad",
".",
"append",
"(",
"nn",
".",
"ReflectionPad1d",
"(",
"next_dilation",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"self",
".",
"final_layer",
"=",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"self",
".",
"final_outc",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"True",
")",
"self",
".",
"reflec",
"=",
"nn",
".",
"ReflectionPad1d",
"(",
"1",
")",
"#self.reflec = nn.ReplicationPad1d(1)",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")"
] | [
211,
4
] | [
268,
49
] | python | en | ['en', 'error', 'th'] | False |
TemporalModelOptimized1f.__init__ | (self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024) |
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
|
Initialize this model. | def __init__(self, num_joints_in, in_features, num_joints_out,
filter_widths, causal=False, dropout=0.25, channels=1024):
"""
Initialize this model.
Arguments:
num_joints_in -- number of input joints (e.g. 17 for Human3.6M)
in_features -- number of input features for each joint (typically 2 for 2D input)
num_joints_out -- number of output joints (can be different than input)
filter_widths -- list of convolution widths, which also determines the # of blocks and receptive field
causal -- use causal convolutions instead of symmetric convolutions (for real-time applications)
dropout -- dropout probability
channels -- number of convolution channels
"""
super().__init__(num_joints_in, in_features, num_joints_out, filter_widths, causal, dropout, channels)
self.expand_conv = FlexGroupLayer(self.conv_inc, channels, self.conv_seq, kernel_size=filter_widths[0], stride=filter_widths[0],
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func = args.mean_func, ups_mean=args.ups_mean, bias=False)
in_out_seq = self._get_all_seq(self.conv_inc, channels, self.conv_seq, filter_widths)
layers_conv = []
layers_bn = []
self.causal_shift = [(filter_widths[0] // 2) if causal else 0]
next_dilation = filter_widths[0]
for i in range(1, len(filter_widths)):
self.pad.append((filter_widths[i] - 1) * next_dilation // 2)
self.causal_shift.append((filter_widths[i] // 2) if causal else 0)
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-2][0], channels, in_out_seq[2*i-2][1], kernel_size=filter_widths[0], stride=filter_widths[0],
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
layers_conv.append(FlexGroupLayer(in_out_seq[2*i-1][0], channels, in_out_seq[2*i-1][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=False))
layers_bn.append(nn.BatchNorm1d(channels, momentum=0.1))
next_dilation *= filter_widths[i]
self.final_layer = FlexGroupLayer(in_out_seq[-1][0], self.final_outc, in_out_seq[2*i][1], kernel_size=1, dilation=1,
feature_split=args.split, recombine=args.recombine,
fix_seq=self.conv_seq, mean_func=args.mean_func,
ups_mean=args.ups_mean, bias=True)
self.layers_conv = nn.ModuleList(layers_conv)
self.layers_bn = nn.ModuleList(layers_bn) | [
"def",
"__init__",
"(",
"self",
",",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
"=",
"False",
",",
"dropout",
"=",
"0.25",
",",
"channels",
"=",
"1024",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"num_joints_in",
",",
"in_features",
",",
"num_joints_out",
",",
"filter_widths",
",",
"causal",
",",
"dropout",
",",
"channels",
")",
"self",
".",
"expand_conv",
"=",
"FlexGroupLayer",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"stride",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
"in_out_seq",
"=",
"self",
".",
"_get_all_seq",
"(",
"self",
".",
"conv_inc",
",",
"channels",
",",
"self",
".",
"conv_seq",
",",
"filter_widths",
")",
"layers_conv",
"=",
"[",
"]",
"layers_bn",
"=",
"[",
"]",
"self",
".",
"causal_shift",
"=",
"[",
"(",
"filter_widths",
"[",
"0",
"]",
"//",
"2",
")",
"if",
"causal",
"else",
"0",
"]",
"next_dilation",
"=",
"filter_widths",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"filter_widths",
")",
")",
":",
"self",
".",
"pad",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"-",
"1",
")",
"*",
"next_dilation",
"//",
"2",
")",
"self",
".",
"causal_shift",
".",
"append",
"(",
"(",
"filter_widths",
"[",
"i",
"]",
"//",
"2",
")",
"if",
"causal",
"else",
"0",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"2",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"stride",
"=",
"filter_widths",
"[",
"0",
"]",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"layers_conv",
".",
"append",
"(",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"channels",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"-",
"1",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"False",
")",
")",
"layers_bn",
".",
"append",
"(",
"nn",
".",
"BatchNorm1d",
"(",
"channels",
",",
"momentum",
"=",
"0.1",
")",
")",
"next_dilation",
"*=",
"filter_widths",
"[",
"i",
"]",
"self",
".",
"final_layer",
"=",
"FlexGroupLayer",
"(",
"in_out_seq",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
",",
"self",
".",
"final_outc",
",",
"in_out_seq",
"[",
"2",
"*",
"i",
"]",
"[",
"1",
"]",
",",
"kernel_size",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"feature_split",
"=",
"args",
".",
"split",
",",
"recombine",
"=",
"args",
".",
"recombine",
",",
"fix_seq",
"=",
"self",
".",
"conv_seq",
",",
"mean_func",
"=",
"args",
".",
"mean_func",
",",
"ups_mean",
"=",
"args",
".",
"ups_mean",
",",
"bias",
"=",
"True",
")",
"self",
".",
"layers_conv",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_conv",
")",
"self",
".",
"layers_bn",
"=",
"nn",
".",
"ModuleList",
"(",
"layers_bn",
")"
] | [
295,
4
] | [
339,
49
] | python | en | ['en', 'error', 'th'] | False |
test_database_evaluation_parameter_store_store_backend_id | (in_memory_param_store) |
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
|
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
| def test_database_evaluation_parameter_store_store_backend_id(in_memory_param_store):
"""
What does this test and why?
A Store should be able to report it's store_backend_id
which is set when the StoreBackend is instantiated.
"""
# Check that store_backend_id exists can be read
assert in_memory_param_store.store_backend_id is not None
# Check that store_backend_id is a valid UUID
assert test_utils.validate_uuid4(in_memory_param_store.store_backend_id) | [
"def",
"test_database_evaluation_parameter_store_store_backend_id",
"(",
"in_memory_param_store",
")",
":",
"# Check that store_backend_id exists can be read",
"assert",
"in_memory_param_store",
".",
"store_backend_id",
"is",
"not",
"None",
"# Check that store_backend_id is a valid UUID",
"assert",
"test_utils",
".",
"validate_uuid4",
"(",
"in_memory_param_store",
".",
"store_backend_id",
")"
] | [
186,
0
] | [
195,
76
] | python | en | ['en', 'error', 'th'] | False |
parse_feature_annotation | (docstring: Union[str, List[str], None]) | Parse a docstring and return a feature annotation. | Parse a docstring and return a feature annotation. | def parse_feature_annotation(docstring: Union[str, List[str], None]):
"""Parse a docstring and return a feature annotation."""
list_of_annotations = []
id_val = ""
if docstring is None:
return
if isinstance(docstring, str):
for matches in re.findall(annotation_regex_compiled, docstring):
annotation_dict = dict() # create new dictionary for each match
maturity_details_dict = dict()
for matched_line in matches:
if not matched_line:
continue
# split matched line_fields
matched_line_fields = matched_line.split(":")
this_key = matched_line_fields[0].strip()
this_val = (
""
if "TODO" in (":".join(matched_line_fields[1:]).strip())
else (":".join(matched_line_fields[1:]).strip())
)
if this_key == "id":
id_val = this_val
if this_key in maturity_details_keys:
maturity_details_dict[this_key] = this_val
elif this_key == "icon": # icon is a special cases
if this_val == "":
annotation_dict[
this_key
] = f"https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/feature_maturity_icons/{id_val}.png"
else:
annotation_dict[this_key] = this_val
else:
annotation_dict[this_key] = this_val
annotation_dict["maturity_details"] = maturity_details_dict
if annotation_dict is not None:
list_of_annotations.append(annotation_dict)
return list_of_annotations | [
"def",
"parse_feature_annotation",
"(",
"docstring",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
",",
"None",
"]",
")",
":",
"list_of_annotations",
"=",
"[",
"]",
"id_val",
"=",
"\"\"",
"if",
"docstring",
"is",
"None",
":",
"return",
"if",
"isinstance",
"(",
"docstring",
",",
"str",
")",
":",
"for",
"matches",
"in",
"re",
".",
"findall",
"(",
"annotation_regex_compiled",
",",
"docstring",
")",
":",
"annotation_dict",
"=",
"dict",
"(",
")",
"# create new dictionary for each match",
"maturity_details_dict",
"=",
"dict",
"(",
")",
"for",
"matched_line",
"in",
"matches",
":",
"if",
"not",
"matched_line",
":",
"continue",
"# split matched line_fields",
"matched_line_fields",
"=",
"matched_line",
".",
"split",
"(",
"\":\"",
")",
"this_key",
"=",
"matched_line_fields",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"this_val",
"=",
"(",
"\"\"",
"if",
"\"TODO\"",
"in",
"(",
"\":\"",
".",
"join",
"(",
"matched_line_fields",
"[",
"1",
":",
"]",
")",
".",
"strip",
"(",
")",
")",
"else",
"(",
"\":\"",
".",
"join",
"(",
"matched_line_fields",
"[",
"1",
":",
"]",
")",
".",
"strip",
"(",
")",
")",
")",
"if",
"this_key",
"==",
"\"id\"",
":",
"id_val",
"=",
"this_val",
"if",
"this_key",
"in",
"maturity_details_keys",
":",
"maturity_details_dict",
"[",
"this_key",
"]",
"=",
"this_val",
"elif",
"this_key",
"==",
"\"icon\"",
":",
"# icon is a special cases",
"if",
"this_val",
"==",
"\"\"",
":",
"annotation_dict",
"[",
"this_key",
"]",
"=",
"f\"https://great-expectations-web-assets.s3.us-east-2.amazonaws.com/feature_maturity_icons/{id_val}.png\"",
"else",
":",
"annotation_dict",
"[",
"this_key",
"]",
"=",
"this_val",
"else",
":",
"annotation_dict",
"[",
"this_key",
"]",
"=",
"this_val",
"annotation_dict",
"[",
"\"maturity_details\"",
"]",
"=",
"maturity_details_dict",
"if",
"annotation_dict",
"is",
"not",
"None",
":",
"list_of_annotations",
".",
"append",
"(",
"annotation_dict",
")",
"return",
"list_of_annotations"
] | [
42,
0
] | [
81,
30
] | python | en | ['en', 'en', 'en'] | True |
open_unix_socket | (filename) | Opens a connection to the specified
`Unix domain socket <https://en.wikipedia.org/wiki/Unix_domain_socket>`__.
You must have read/write permission on the specified file to connect.
Args:
filename (str or bytes): The filename to open the connection to.
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given file.
Raises:
OSError: If the socket file could not be connected to.
RuntimeError: If AF_UNIX sockets are not supported.
| Opens a connection to the specified
`Unix domain socket <https://en.wikipedia.org/wiki/Unix_domain_socket>`__. | async def open_unix_socket(filename):
"""Opens a connection to the specified
`Unix domain socket <https://en.wikipedia.org/wiki/Unix_domain_socket>`__.
You must have read/write permission on the specified file to connect.
Args:
filename (str or bytes): The filename to open the connection to.
Returns:
SocketStream: a :class:`~trio.abc.Stream` connected to the given file.
Raises:
OSError: If the socket file could not be connected to.
RuntimeError: If AF_UNIX sockets are not supported.
"""
if not has_unix:
raise RuntimeError("Unix sockets are not supported on this platform")
# much more simplified logic vs tcp sockets - one socket type and only one
# possible location to connect to
sock = socket(AF_UNIX, SOCK_STREAM)
with close_on_error(sock):
await sock.connect(os.fspath(filename))
return trio.SocketStream(sock) | [
"async",
"def",
"open_unix_socket",
"(",
"filename",
")",
":",
"if",
"not",
"has_unix",
":",
"raise",
"RuntimeError",
"(",
"\"Unix sockets are not supported on this platform\"",
")",
"# much more simplified logic vs tcp sockets - one socket type and only one",
"# possible location to connect to",
"sock",
"=",
"socket",
"(",
"AF_UNIX",
",",
"SOCK_STREAM",
")",
"with",
"close_on_error",
"(",
"sock",
")",
":",
"await",
"sock",
".",
"connect",
"(",
"os",
".",
"fspath",
"(",
"filename",
")",
")",
"return",
"trio",
".",
"SocketStream",
"(",
"sock",
")"
] | [
23,
0
] | [
48,
34
] | python | en | ['en', 'en', 'en'] | True |
load_mpi_test | (file_path, seq, norm) |
Usage: Load a section once
:param dataset_root: root path
:param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7).
If you want to evaluate by scene setting, you can use the sequencewise evaluation
to convert to these numbers by doing
#1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540)
#2:Studio without Green Screen (TS3*505+TS4*553)/(505+553)
#3:Outdoor (TS5*276+TS6*452)/(276+452)
:return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
|
Usage: Load a section once
:param dataset_root: root path
:param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7).
If you want to evaluate by scene setting, you can use the sequencewise evaluation
to convert to these numbers by doing
#1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540)
#2:Studio without Green Screen (TS3*505+TS4*553)/(505+553)
#3:Outdoor (TS5*276+TS6*452)/(276+452)
:return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
| def load_mpi_test(file_path, seq, norm):
"""
Usage: Load a section once
:param dataset_root: root path
:param section: There are six sequences in this (seq=0,1,2,3,4,5). And 2935 poses in a unique set(seq==7).
If you want to evaluate by scene setting, you can use the sequencewise evaluation
to convert to these numbers by doing
#1:Studio with Green Screen (TS1*603 + TS2 *540)/ (603+540)
#2:Studio without Green Screen (TS3*505+TS4*553)/(505+553)
#3:Outdoor (TS5*276+TS6*452)/(276+452)
:return: Normalized 2d/3d pose, normalization params and camera intrinics. All types: List
"""
info = np.load(file_path, allow_pickle=True)
if seq in range(0,6):
pose_3d = info['pose3d_univ'][seq]
pose_2d = info['pose2d'][seq]
if seq in [0, 1, 2, 3]:
img_w, img_h = 2048, 2048
cam_intri = np.array([1500.0686135995716, 1500.6590966853348, 1017.3794860438494, 1043.062824876024, 1,1,1,1,1])
elif seq in [4, 5]:
img_w, img_h = 1920, 1080
cam_intri = np.array([1683.482559482185, 1671.927242063379, 939.9278168524228, 560.2072491988034, 1,1,1,1,1])
elif seq == 7:
pose_3d = info['pose3d_univ'][0]
pose_2d = info['pose2d'][0]
img_w, img_h = 2048, 2048
cam_intri = np.array([1504.1479043534127, 1556.86936732066, 991.7469587022122, 872.994958045596, 1, 1, 1, 1, 1])
params = {}
if norm == 'base':
# Remove global offset, but keep trajectory in first position
pose_3d[:, 1:] -= pose_3d[:, :1]
normed_pose_3d = pose_3d/1000
normed_pose_2d = normalize_screen_coordinates(pose_2d[..., :2], w=img_w, h=img_h)
params['intrinsic'] = cam_intri
else:
normed_pose_3d, normed_pose_2d, pixel_ratio, rescale_ratio, offset_2d, abs_root_Z = norm_to_pixel(pose_3d/1000, pose_2d, cam_intri, norm)
norm_params=np.concatenate((pixel_ratio, rescale_ratio, offset_2d, abs_root_Z), axis=-1) # [T, 1, 5], len()==4
params['intrinsic'] = cam_intri
params['normalization_params'] = norm_params
return normed_pose_3d, normed_pose_2d, params | [
"def",
"load_mpi_test",
"(",
"file_path",
",",
"seq",
",",
"norm",
")",
":",
"info",
"=",
"np",
".",
"load",
"(",
"file_path",
",",
"allow_pickle",
"=",
"True",
")",
"if",
"seq",
"in",
"range",
"(",
"0",
",",
"6",
")",
":",
"pose_3d",
"=",
"info",
"[",
"'pose3d_univ'",
"]",
"[",
"seq",
"]",
"pose_2d",
"=",
"info",
"[",
"'pose2d'",
"]",
"[",
"seq",
"]",
"if",
"seq",
"in",
"[",
"0",
",",
"1",
",",
"2",
",",
"3",
"]",
":",
"img_w",
",",
"img_h",
"=",
"2048",
",",
"2048",
"cam_intri",
"=",
"np",
".",
"array",
"(",
"[",
"1500.0686135995716",
",",
"1500.6590966853348",
",",
"1017.3794860438494",
",",
"1043.062824876024",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"elif",
"seq",
"in",
"[",
"4",
",",
"5",
"]",
":",
"img_w",
",",
"img_h",
"=",
"1920",
",",
"1080",
"cam_intri",
"=",
"np",
".",
"array",
"(",
"[",
"1683.482559482185",
",",
"1671.927242063379",
",",
"939.9278168524228",
",",
"560.2072491988034",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"elif",
"seq",
"==",
"7",
":",
"pose_3d",
"=",
"info",
"[",
"'pose3d_univ'",
"]",
"[",
"0",
"]",
"pose_2d",
"=",
"info",
"[",
"'pose2d'",
"]",
"[",
"0",
"]",
"img_w",
",",
"img_h",
"=",
"2048",
",",
"2048",
"cam_intri",
"=",
"np",
".",
"array",
"(",
"[",
"1504.1479043534127",
",",
"1556.86936732066",
",",
"991.7469587022122",
",",
"872.994958045596",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
",",
"1",
"]",
")",
"params",
"=",
"{",
"}",
"if",
"norm",
"==",
"'base'",
":",
"# Remove global offset, but keep trajectory in first position",
"pose_3d",
"[",
":",
",",
"1",
":",
"]",
"-=",
"pose_3d",
"[",
":",
",",
":",
"1",
"]",
"normed_pose_3d",
"=",
"pose_3d",
"/",
"1000",
"normed_pose_2d",
"=",
"normalize_screen_coordinates",
"(",
"pose_2d",
"[",
"...",
",",
":",
"2",
"]",
",",
"w",
"=",
"img_w",
",",
"h",
"=",
"img_h",
")",
"params",
"[",
"'intrinsic'",
"]",
"=",
"cam_intri",
"else",
":",
"normed_pose_3d",
",",
"normed_pose_2d",
",",
"pixel_ratio",
",",
"rescale_ratio",
",",
"offset_2d",
",",
"abs_root_Z",
"=",
"norm_to_pixel",
"(",
"pose_3d",
"/",
"1000",
",",
"pose_2d",
",",
"cam_intri",
",",
"norm",
")",
"norm_params",
"=",
"np",
".",
"concatenate",
"(",
"(",
"pixel_ratio",
",",
"rescale_ratio",
",",
"offset_2d",
",",
"abs_root_Z",
")",
",",
"axis",
"=",
"-",
"1",
")",
"# [T, 1, 5], len()==4",
"params",
"[",
"'intrinsic'",
"]",
"=",
"cam_intri",
"params",
"[",
"'normalization_params'",
"]",
"=",
"norm_params",
"return",
"normed_pose_3d",
",",
"normed_pose_2d",
",",
"params"
] | [
4,
0
] | [
44,
49
] | python | en | ['en', 'error', 'th'] | False |
download_selenium_server | () |
Downloads the Selenium Server JAR file from its
online location and stores it locally.
|
Downloads the Selenium Server JAR file from its
online location and stores it locally.
| def download_selenium_server():
"""
Downloads the Selenium Server JAR file from its
online location and stores it locally.
"""
try:
local_file = open(JAR_FILE, 'wb')
remote_file = urlopen(SELENIUM_JAR)
print('Downloading the Selenium Server JAR file...\n')
local_file.write(remote_file.read())
local_file.close()
remote_file.close()
print('Download Complete!')
except Exception:
raise Exception("Error downloading the Selenium Server JAR file.\n"
"Details: %s" % sys.exc_info()[1]) | [
"def",
"download_selenium_server",
"(",
")",
":",
"try",
":",
"local_file",
"=",
"open",
"(",
"JAR_FILE",
",",
"'wb'",
")",
"remote_file",
"=",
"urlopen",
"(",
"SELENIUM_JAR",
")",
"print",
"(",
"'Downloading the Selenium Server JAR file...\\n'",
")",
"local_file",
".",
"write",
"(",
"remote_file",
".",
"read",
"(",
")",
")",
"local_file",
".",
"close",
"(",
")",
"remote_file",
".",
"close",
"(",
")",
"print",
"(",
"'Download Complete!'",
")",
"except",
"Exception",
":",
"raise",
"Exception",
"(",
"\"Error downloading the Selenium Server JAR file.\\n\"",
"\"Details: %s\"",
"%",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")"
] | [
20,
0
] | [
35,
58
] | python | en | ['en', 'error', 'th'] | False |
impl_details.is_defined_in_xxx | (xxx, cls) |
Small helper method that checks whether the class `cls` is defined
under `::xxx` namespace
|
Small helper method that checks whether the class `cls` is defined
under `::xxx` namespace
| def is_defined_in_xxx(xxx, cls):
"""
Small helper method that checks whether the class `cls` is defined
under `::xxx` namespace
"""
if not cls.parent:
return False
if not isinstance(cls.parent, namespace.namespace_t):
return False
if xxx != cls.parent.name:
return False
xxx_ns = cls.parent
if not xxx_ns.parent:
return False
if not isinstance(xxx_ns.parent, namespace.namespace_t):
return False
if xxx_ns.parent.name != '::':
return False
global_ns = xxx_ns.parent
return None is global_ns.parent | [
"def",
"is_defined_in_xxx",
"(",
"xxx",
",",
"cls",
")",
":",
"if",
"not",
"cls",
".",
"parent",
":",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"cls",
".",
"parent",
",",
"namespace",
".",
"namespace_t",
")",
":",
"return",
"False",
"if",
"xxx",
"!=",
"cls",
".",
"parent",
".",
"name",
":",
"return",
"False",
"xxx_ns",
"=",
"cls",
".",
"parent",
"if",
"not",
"xxx_ns",
".",
"parent",
":",
"return",
"False",
"if",
"not",
"isinstance",
"(",
"xxx_ns",
".",
"parent",
",",
"namespace",
".",
"namespace_t",
")",
":",
"return",
"False",
"if",
"xxx_ns",
".",
"parent",
".",
"name",
"!=",
"'::'",
":",
"return",
"False",
"global_ns",
"=",
"xxx_ns",
".",
"parent",
"return",
"None",
"is",
"global_ns",
".",
"parent"
] | [
16,
4
] | [
41,
39
] | python | en | ['en', 'error', 'th'] | False |
impl_details.find_value_type | (global_ns, value_type_str) | implementation details | implementation details | def find_value_type(global_ns, value_type_str):
"""implementation details"""
if not value_type_str.startswith('::'):
value_type_str = '::' + value_type_str
found = global_ns.decls(
name=value_type_str,
function=lambda decl: not isinstance(decl, calldef.calldef_t),
allow_empty=True)
if not found:
no_global_ns_value_type_str = value_type_str[2:]
if no_global_ns_value_type_str in cpptypes.FUNDAMENTAL_TYPES:
return cpptypes.FUNDAMENTAL_TYPES[no_global_ns_value_type_str]
elif type_traits.is_std_string(value_type_str):
string_ = global_ns.typedef('::std::string')
return type_traits.remove_declarated(string_)
elif type_traits.is_std_wstring(value_type_str):
string_ = global_ns.typedef('::std::wstring')
return type_traits.remove_declarated(string_)
else:
value_type_str = no_global_ns_value_type_str
has_const = value_type_str.startswith('const ')
if has_const:
value_type_str = value_type_str[len('const '):]
has_pointer = value_type_str.endswith('*')
if has_pointer:
value_type_str = value_type_str[:-1]
found = None
if has_const or has_pointer:
found = impl_details.find_value_type(
global_ns,
value_type_str)
if not found:
return None
else:
if isinstance(found, class_declaration.class_types):
return cpptypes.declarated_t(found)
if has_const:
return cpptypes.const_t(found)
if has_pointer:
return cpptypes.pointer_t(found)
if len(found) == 1:
return found[0]
else:
return None | [
"def",
"find_value_type",
"(",
"global_ns",
",",
"value_type_str",
")",
":",
"if",
"not",
"value_type_str",
".",
"startswith",
"(",
"'::'",
")",
":",
"value_type_str",
"=",
"'::'",
"+",
"value_type_str",
"found",
"=",
"global_ns",
".",
"decls",
"(",
"name",
"=",
"value_type_str",
",",
"function",
"=",
"lambda",
"decl",
":",
"not",
"isinstance",
"(",
"decl",
",",
"calldef",
".",
"calldef_t",
")",
",",
"allow_empty",
"=",
"True",
")",
"if",
"not",
"found",
":",
"no_global_ns_value_type_str",
"=",
"value_type_str",
"[",
"2",
":",
"]",
"if",
"no_global_ns_value_type_str",
"in",
"cpptypes",
".",
"FUNDAMENTAL_TYPES",
":",
"return",
"cpptypes",
".",
"FUNDAMENTAL_TYPES",
"[",
"no_global_ns_value_type_str",
"]",
"elif",
"type_traits",
".",
"is_std_string",
"(",
"value_type_str",
")",
":",
"string_",
"=",
"global_ns",
".",
"typedef",
"(",
"'::std::string'",
")",
"return",
"type_traits",
".",
"remove_declarated",
"(",
"string_",
")",
"elif",
"type_traits",
".",
"is_std_wstring",
"(",
"value_type_str",
")",
":",
"string_",
"=",
"global_ns",
".",
"typedef",
"(",
"'::std::wstring'",
")",
"return",
"type_traits",
".",
"remove_declarated",
"(",
"string_",
")",
"else",
":",
"value_type_str",
"=",
"no_global_ns_value_type_str",
"has_const",
"=",
"value_type_str",
".",
"startswith",
"(",
"'const '",
")",
"if",
"has_const",
":",
"value_type_str",
"=",
"value_type_str",
"[",
"len",
"(",
"'const '",
")",
":",
"]",
"has_pointer",
"=",
"value_type_str",
".",
"endswith",
"(",
"'*'",
")",
"if",
"has_pointer",
":",
"value_type_str",
"=",
"value_type_str",
"[",
":",
"-",
"1",
"]",
"found",
"=",
"None",
"if",
"has_const",
"or",
"has_pointer",
":",
"found",
"=",
"impl_details",
".",
"find_value_type",
"(",
"global_ns",
",",
"value_type_str",
")",
"if",
"not",
"found",
":",
"return",
"None",
"else",
":",
"if",
"isinstance",
"(",
"found",
",",
"class_declaration",
".",
"class_types",
")",
":",
"return",
"cpptypes",
".",
"declarated_t",
"(",
"found",
")",
"if",
"has_const",
":",
"return",
"cpptypes",
".",
"const_t",
"(",
"found",
")",
"if",
"has_pointer",
":",
"return",
"cpptypes",
".",
"pointer_t",
"(",
"found",
")",
"if",
"len",
"(",
"found",
")",
"==",
"1",
":",
"return",
"found",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] | [
44,
4
] | [
87,
23
] | python | da | ['eo', 'da', 'en'] | False |
internal_handler | (f) |
Reject requests where the remote address is not localhost. See the docstring
for _is_local_request() for important caveats!
|
Reject requests where the remote address is not localhost. See the docstring
for _is_local_request() for important caveats!
| def internal_handler(f):
"""
Reject requests where the remote address is not localhost. See the docstring
for _is_local_request() for important caveats!
"""
func_name = getattr(f, '__name__', '<anonymous>')
@functools.wraps(f)
def wrapper(*args, **kwds):
if not _is_local_request():
return "Forbidden\n", 403
return f(*args, **kwds)
return wrapper | [
"def",
"internal_handler",
"(",
"f",
")",
":",
"func_name",
"=",
"getattr",
"(",
"f",
",",
"'__name__'",
",",
"'<anonymous>'",
")",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"not",
"_is_local_request",
"(",
")",
":",
"return",
"\"Forbidden\\n\"",
",",
"403",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"return",
"wrapper"
] | [
614,
0
] | [
627,
18
] | python | en | ['en', 'error', 'th'] | False |
_is_local_request | () |
Determine if this request originated with localhost.
When we are not running in LEGACY_MODE, we rely on healthcheck_server.go setting
the X-Ambassador-Diag-IP header for us (and we rely on it overwriting anything
that's already there!).
When we _are_ running in LEGACY_MODE, we rely on an implementation detail of
Flask (or maybe of GUnicorn?): the existence of the REMOTE_ADDR environment
variable. This may not work as intended on other WSGI implementations, though if
the environment variable is missing entirely, the effect is to fail closed, i.e.
all requests will be rejected, in which case the problem will become apparent
very quickly.
It might be possible to consider the environment variables SERVER_NAME and
SERVER_PORT instead, as those are allegedly required by WSGI... but attempting
to do so in Flask/GUnicorn yielded a worse implementation that was still not
portable.
|
Determine if this request originated with localhost. | def _is_local_request() -> bool:
"""
Determine if this request originated with localhost.
When we are not running in LEGACY_MODE, we rely on healthcheck_server.go setting
the X-Ambassador-Diag-IP header for us (and we rely on it overwriting anything
that's already there!).
When we _are_ running in LEGACY_MODE, we rely on an implementation detail of
Flask (or maybe of GUnicorn?): the existence of the REMOTE_ADDR environment
variable. This may not work as intended on other WSGI implementations, though if
the environment variable is missing entirely, the effect is to fail closed, i.e.
all requests will be rejected, in which case the problem will become apparent
very quickly.
It might be possible to consider the environment variables SERVER_NAME and
SERVER_PORT instead, as those are allegedly required by WSGI... but attempting
to do so in Flask/GUnicorn yielded a worse implementation that was still not
portable.
"""
remote_addr: Optional[str] = ""
if not app.legacy_mode:
remote_addr = request.headers.get("X-Ambassador-Diag-IP")
else:
remote_addr = request.environ.get("REMOTE_ADDR")
return remote_addr == "127.0.0.1" | [
"def",
"_is_local_request",
"(",
")",
"->",
"bool",
":",
"remote_addr",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"\"",
"if",
"not",
"app",
".",
"legacy_mode",
":",
"remote_addr",
"=",
"request",
".",
"headers",
".",
"get",
"(",
"\"X-Ambassador-Diag-IP\"",
")",
"else",
":",
"remote_addr",
"=",
"request",
".",
"environ",
".",
"get",
"(",
"\"REMOTE_ADDR\"",
")",
"return",
"remote_addr",
"==",
"\"127.0.0.1\""
] | [
633,
0
] | [
661,
37
] | python | en | ['en', 'error', 'th'] | False |
_allow_diag_ui | () |
Helper function to check if diag ui traffic is allowed or not
based on the different flags from the config:
* diagnostics.enabled: Enable to diag UI by adding mappings
* diagnostics.allow_non_local: Allow non local traffic
even when diagnotics UI is disabled.
Mappings are not added for the diag UI
but the diagnotics UI is still exposed for
the pod IP in the admin port.
* local traffic or not: When diagnotics disagled and allow_non_local is false,
allow traffic only from localhost clients
|
Helper function to check if diag ui traffic is allowed or not
based on the different flags from the config:
* diagnostics.enabled: Enable to diag UI by adding mappings
* diagnostics.allow_non_local: Allow non local traffic
even when diagnotics UI is disabled.
Mappings are not added for the diag UI
but the diagnotics UI is still exposed for
the pod IP in the admin port.
* local traffic or not: When diagnotics disagled and allow_non_local is false,
allow traffic only from localhost clients
| def _allow_diag_ui() -> bool:
"""
Helper function to check if diag ui traffic is allowed or not
based on the different flags from the config:
* diagnostics.enabled: Enable to diag UI by adding mappings
* diagnostics.allow_non_local: Allow non local traffic
even when diagnotics UI is disabled.
Mappings are not added for the diag UI
but the diagnotics UI is still exposed for
the pod IP in the admin port.
* local traffic or not: When diagnotics disagled and allow_non_local is false,
allow traffic only from localhost clients
"""
enabled = False
allow_non_local= False
ir = app.ir
if ir:
enabled = ir.ambassador_module.diagnostics.get("enabled", False)
allow_non_local = ir.ambassador_module.diagnostics.get("allow_non_local", False)
if not enabled and not _is_local_request() and not allow_non_local:
return False
return True | [
"def",
"_allow_diag_ui",
"(",
")",
"->",
"bool",
":",
"enabled",
"=",
"False",
"allow_non_local",
"=",
"False",
"ir",
"=",
"app",
".",
"ir",
"if",
"ir",
":",
"enabled",
"=",
"ir",
".",
"ambassador_module",
".",
"diagnostics",
".",
"get",
"(",
"\"enabled\"",
",",
"False",
")",
"allow_non_local",
"=",
"ir",
".",
"ambassador_module",
".",
"diagnostics",
".",
"get",
"(",
"\"allow_non_local\"",
",",
"False",
")",
"if",
"not",
"enabled",
"and",
"not",
"_is_local_request",
"(",
")",
"and",
"not",
"allow_non_local",
":",
"return",
"False",
"return",
"True"
] | [
664,
0
] | [
685,
15
] | python | en | ['en', 'error', 'th'] | False |
drop_serializer_key | (d: Dict[Any, Any]) |
Delete the "serialization" key (if present) in any dictionary passed in and
return that dictionary. This function is intended to be used as the
object_hook value for json.load[s].
|
Delete the "serialization" key (if present) in any dictionary passed in and
return that dictionary. This function is intended to be used as the
object_hook value for json.load[s].
| def drop_serializer_key(d: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Delete the "serialization" key (if present) in any dictionary passed in and
return that dictionary. This function is intended to be used as the
object_hook value for json.load[s].
"""
_ = d.pop("serialization", None)
return d | [
"def",
"drop_serializer_key",
"(",
"d",
":",
"Dict",
"[",
"Any",
",",
"Any",
"]",
")",
"->",
"Dict",
"[",
"Any",
",",
"Any",
"]",
":",
"_",
"=",
"d",
".",
"pop",
"(",
"\"serialization\"",
",",
"None",
")",
"return",
"d"
] | [
813,
0
] | [
820,
12
] | python | en | ['en', 'error', 'th'] | False |
_main | (snapshot_path=None, bootstrap_path=None, ads_path=None,
*, dev_magic=False, config_path=None, ambex_pid=0, kick=None,
banner_endpoint="http://127.0.0.1:8500/banner", metrics_endpoint="http://127.0.0.1:8500/metrics", k8s=False,
no_checks=False, no_envoy=False, reload=False, debug=False, verbose=False,
workers=None, port=-1, host="", notices=None,
validation_retries=5, allow_fs_commands=False, local_scout=False,
report_action_keys=False) |
Run the diagnostic daemon.
:param snapshot_path: Path to directory in which to save configuration snapshots and dynamic secrets
:param bootstrap_path: Path to which to write bootstrap Envoy configuration
:param ads_path: Path to which to write ADS Envoy configuration
:param config_path: Optional configuration path to scan for Ambassador YAML files
:param k8s: If True, assume config_path contains Kubernetes resources (only relevant with config_path)
:param ambex_pid: Optional PID to signal with HUP after updating Envoy configuration
:param kick: Optional command to run after updating Envoy configuration
:param banner_endpoint: Optional endpoint of extra banner to include
:param metrics_endpoint: Optional endpoint of extra prometheus metrics to include
:param no_checks: If True, don't do Envoy-cluster health checking
:param no_envoy: If True, don't interact with Envoy at all
:param reload: If True, run Flask in debug mode for live reloading
:param debug: If True, do debug logging
:param dev_magic: If True, override a bunch of things for Datawire dev-loop stuff
:param verbose: If True, do really verbose debug logging
:param workers: Number of workers; default is based on the number of CPUs present
:param host: Interface on which to listen
:param port: Port on which to listen
:param notices: Optional file to read for local notices
:param validation_retries: Number of times to retry Envoy configuration validation after a timeout
:param allow_fs_commands: If true, allow CONFIG_FS to support debug/testing commands
:param local_scout: Don't talk to remote Scout at all; keep everything purely local
:param report_action_keys: Report action keys when chiming
|
Run the diagnostic daemon. | def _main(snapshot_path=None, bootstrap_path=None, ads_path=None,
*, dev_magic=False, config_path=None, ambex_pid=0, kick=None,
banner_endpoint="http://127.0.0.1:8500/banner", metrics_endpoint="http://127.0.0.1:8500/metrics", k8s=False,
no_checks=False, no_envoy=False, reload=False, debug=False, verbose=False,
workers=None, port=-1, host="", notices=None,
validation_retries=5, allow_fs_commands=False, local_scout=False,
report_action_keys=False):
"""
Run the diagnostic daemon.
:param snapshot_path: Path to directory in which to save configuration snapshots and dynamic secrets
:param bootstrap_path: Path to which to write bootstrap Envoy configuration
:param ads_path: Path to which to write ADS Envoy configuration
:param config_path: Optional configuration path to scan for Ambassador YAML files
:param k8s: If True, assume config_path contains Kubernetes resources (only relevant with config_path)
:param ambex_pid: Optional PID to signal with HUP after updating Envoy configuration
:param kick: Optional command to run after updating Envoy configuration
:param banner_endpoint: Optional endpoint of extra banner to include
:param metrics_endpoint: Optional endpoint of extra prometheus metrics to include
:param no_checks: If True, don't do Envoy-cluster health checking
:param no_envoy: If True, don't interact with Envoy at all
:param reload: If True, run Flask in debug mode for live reloading
:param debug: If True, do debug logging
:param dev_magic: If True, override a bunch of things for Datawire dev-loop stuff
:param verbose: If True, do really verbose debug logging
:param workers: Number of workers; default is based on the number of CPUs present
:param host: Interface on which to listen
:param port: Port on which to listen
:param notices: Optional file to read for local notices
:param validation_retries: Number of times to retry Envoy configuration validation after a timeout
:param allow_fs_commands: If true, allow CONFIG_FS to support debug/testing commands
:param local_scout: Don't talk to remote Scout at all; keep everything purely local
:param report_action_keys: Report action keys when chiming
"""
enable_fast_reconfigure = parse_bool(os.environ.get("AMBASSADOR_FAST_RECONFIGURE", "false"))
legacy_mode = parse_bool(os.environ.get("AMBASSADOR_LEGACY_MODE", "false"))
if port < 0:
port = Constants.DIAG_PORT if not enable_fast_reconfigure else Constants.DIAG_PORT_ALT
# port = Constants.DIAG_PORT
if not host:
host = '0.0.0.0' if not enable_fast_reconfigure else '127.0.0.1'
if dev_magic:
# Override the world.
os.environ['SCOUT_HOST'] = '127.0.0.1:9999'
os.environ['SCOUT_HTTPS'] = 'no'
no_checks = True
no_envoy = True
os.makedirs('/tmp/snapshots', mode=0o755, exist_ok=True)
snapshot_path = '/tmp/snapshots'
bootstrap_path = '/tmp/boot.json'
ads_path = '/tmp/ads.json'
port = 9998
allow_fs_commands = True
local_scout = True
report_action_keys = True
if no_envoy:
no_checks = True
# Create the application itself.
app.setup(snapshot_path, bootstrap_path, ads_path, config_path, ambex_pid, kick, banner_endpoint,
metrics_endpoint, k8s, not no_checks, no_envoy, reload, debug, verbose, notices,
validation_retries, allow_fs_commands, local_scout, report_action_keys,
enable_fast_reconfigure, legacy_mode)
if not workers:
workers = number_of_workers()
gunicorn_config = {
'bind': '%s:%s' % (host, port),
# 'workers': 1,
'threads': workers,
}
app.logger.info("thread count %d, listening on %s" % (gunicorn_config['threads'], gunicorn_config['bind']))
StandaloneApplication(app, gunicorn_config).run() | [
"def",
"_main",
"(",
"snapshot_path",
"=",
"None",
",",
"bootstrap_path",
"=",
"None",
",",
"ads_path",
"=",
"None",
",",
"*",
",",
"dev_magic",
"=",
"False",
",",
"config_path",
"=",
"None",
",",
"ambex_pid",
"=",
"0",
",",
"kick",
"=",
"None",
",",
"banner_endpoint",
"=",
"\"http://127.0.0.1:8500/banner\"",
",",
"metrics_endpoint",
"=",
"\"http://127.0.0.1:8500/metrics\"",
",",
"k8s",
"=",
"False",
",",
"no_checks",
"=",
"False",
",",
"no_envoy",
"=",
"False",
",",
"reload",
"=",
"False",
",",
"debug",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"workers",
"=",
"None",
",",
"port",
"=",
"-",
"1",
",",
"host",
"=",
"\"\"",
",",
"notices",
"=",
"None",
",",
"validation_retries",
"=",
"5",
",",
"allow_fs_commands",
"=",
"False",
",",
"local_scout",
"=",
"False",
",",
"report_action_keys",
"=",
"False",
")",
":",
"enable_fast_reconfigure",
"=",
"parse_bool",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"\"AMBASSADOR_FAST_RECONFIGURE\"",
",",
"\"false\"",
")",
")",
"legacy_mode",
"=",
"parse_bool",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"\"AMBASSADOR_LEGACY_MODE\"",
",",
"\"false\"",
")",
")",
"if",
"port",
"<",
"0",
":",
"port",
"=",
"Constants",
".",
"DIAG_PORT",
"if",
"not",
"enable_fast_reconfigure",
"else",
"Constants",
".",
"DIAG_PORT_ALT",
"# port = Constants.DIAG_PORT",
"if",
"not",
"host",
":",
"host",
"=",
"'0.0.0.0'",
"if",
"not",
"enable_fast_reconfigure",
"else",
"'127.0.0.1'",
"if",
"dev_magic",
":",
"# Override the world.",
"os",
".",
"environ",
"[",
"'SCOUT_HOST'",
"]",
"=",
"'127.0.0.1:9999'",
"os",
".",
"environ",
"[",
"'SCOUT_HTTPS'",
"]",
"=",
"'no'",
"no_checks",
"=",
"True",
"no_envoy",
"=",
"True",
"os",
".",
"makedirs",
"(",
"'/tmp/snapshots'",
",",
"mode",
"=",
"0o755",
",",
"exist_ok",
"=",
"True",
")",
"snapshot_path",
"=",
"'/tmp/snapshots'",
"bootstrap_path",
"=",
"'/tmp/boot.json'",
"ads_path",
"=",
"'/tmp/ads.json'",
"port",
"=",
"9998",
"allow_fs_commands",
"=",
"True",
"local_scout",
"=",
"True",
"report_action_keys",
"=",
"True",
"if",
"no_envoy",
":",
"no_checks",
"=",
"True",
"# Create the application itself.",
"app",
".",
"setup",
"(",
"snapshot_path",
",",
"bootstrap_path",
",",
"ads_path",
",",
"config_path",
",",
"ambex_pid",
",",
"kick",
",",
"banner_endpoint",
",",
"metrics_endpoint",
",",
"k8s",
",",
"not",
"no_checks",
",",
"no_envoy",
",",
"reload",
",",
"debug",
",",
"verbose",
",",
"notices",
",",
"validation_retries",
",",
"allow_fs_commands",
",",
"local_scout",
",",
"report_action_keys",
",",
"enable_fast_reconfigure",
",",
"legacy_mode",
")",
"if",
"not",
"workers",
":",
"workers",
"=",
"number_of_workers",
"(",
")",
"gunicorn_config",
"=",
"{",
"'bind'",
":",
"'%s:%s'",
"%",
"(",
"host",
",",
"port",
")",
",",
"# 'workers': 1,",
"'threads'",
":",
"workers",
",",
"}",
"app",
".",
"logger",
".",
"info",
"(",
"\"thread count %d, listening on %s\"",
"%",
"(",
"gunicorn_config",
"[",
"'threads'",
"]",
",",
"gunicorn_config",
"[",
"'bind'",
"]",
")",
")",
"StandaloneApplication",
"(",
"app",
",",
"gunicorn_config",
")",
".",
"run",
"(",
")"
] | [
2103,
0
] | [
2188,
53
] | python | en | ['en', 'error', 'th'] | False |
DiagApp.diag | (self) |
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you.
You MUST NOT already hold the config_lock or the diag_lock when
trying to read app.diag.
You MUST already have loaded an IR.
|
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you. | def diag(self) -> Optional[Diagnostics]:
"""
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you.
You MUST NOT already hold the config_lock or the diag_lock when
trying to read app.diag.
You MUST already have loaded an IR.
"""
# The config_lock is meant to make sure that we don't ever update
# self.diag in two places at once, so grab that first.
with self.config_lock:
# If we've already generated diagnostics...
if app._diag:
# ...then we're good to go.
return app._diag
# If here, we have _not_ generated diagnostics, and we've dropped the
# config lock so as not to block anyone else. Next up: grab the diag
# lock, because we'd rather not have two diag generations happening at
# once.
with self.diag_lock:
# Did someone else sneak in between releasing the config lock and
# grabbing the diag lock?
if app._diag:
# Yup. Use their work.
return app._diag
# OK, go generate diagnostics.
_diag = self._generate_diagnostics()
# If that didn't work, no point in messing with the config lock.
if not _diag:
return None
# Once here, we need to - once again - grab the config lock to update
# app._diag. This is safe because this is the only place we ever mess
# with the diag lock, so nowhere else will try to grab the diag lock
# while holding the config lock.
with app.config_lock:
app._diag = _diag
# Finally, we can return app._diag to our caller.
return app._diag | [
"def",
"diag",
"(",
"self",
")",
"->",
"Optional",
"[",
"Diagnostics",
"]",
":",
"# The config_lock is meant to make sure that we don't ever update",
"# self.diag in two places at once, so grab that first.",
"with",
"self",
".",
"config_lock",
":",
"# If we've already generated diagnostics...",
"if",
"app",
".",
"_diag",
":",
"# ...then we're good to go.",
"return",
"app",
".",
"_diag",
"# If here, we have _not_ generated diagnostics, and we've dropped the",
"# config lock so as not to block anyone else. Next up: grab the diag",
"# lock, because we'd rather not have two diag generations happening at",
"# once.",
"with",
"self",
".",
"diag_lock",
":",
"# Did someone else sneak in between releasing the config lock and",
"# grabbing the diag lock?",
"if",
"app",
".",
"_diag",
":",
"# Yup. Use their work.",
"return",
"app",
".",
"_diag",
"# OK, go generate diagnostics.",
"_diag",
"=",
"self",
".",
"_generate_diagnostics",
"(",
")",
"# If that didn't work, no point in messing with the config lock.",
"if",
"not",
"_diag",
":",
"return",
"None",
"# Once here, we need to - once again - grab the config lock to update",
"# app._diag. This is safe because this is the only place we ever mess",
"# with the diag lock, so nowhere else will try to grab the diag lock",
"# while holding the config lock.",
"with",
"app",
".",
"config_lock",
":",
"app",
".",
"_diag",
"=",
"_diag",
"# Finally, we can return app._diag to our caller.",
"return",
"app",
".",
"_diag"
] | [
294,
4
] | [
340,
28
] | python | en | ['en', 'error', 'th'] | False |
DiagApp.diag | (self, diag: Optional[Diagnostics]) |
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you.
You MUST already hold the config_lock when trying to update app.diag.
You MUST NOT hold the diag_lock.
|
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you. | def diag(self, diag: Optional[Diagnostics]) -> None:
"""
It turns out to be expensive to generate the Diagnostics class, so
app.diag is a property that does it on demand, handling Timers and
the config lock for you.
You MUST already hold the config_lock when trying to update app.diag.
You MUST NOT hold the diag_lock.
"""
self._diag = diag | [
"def",
"diag",
"(",
"self",
",",
"diag",
":",
"Optional",
"[",
"Diagnostics",
"]",
")",
"->",
"None",
":",
"self",
".",
"_diag",
"=",
"diag"
] | [
343,
4
] | [
352,
25
] | python | en | ['en', 'error', 'th'] | False |
DiagApp._generate_diagnostics | (self) |
Do the heavy lifting of generating Diagnostics for our current configuration.
Really, only the diag() property should be calling this method, but if you
are convinced that you need to call it from elsewhere:
1. You're almost certainly wrong about needing to call it from elsewhere.
2. You MUST hold the diag_lock when calling this method.
3. You MUST NOT hold the config_lock when calling this method.
4. No, really, you're wrong. Don't call this method from anywhere but the
diag() property.
|
Do the heavy lifting of generating Diagnostics for our current configuration.
Really, only the diag() property should be calling this method, but if you
are convinced that you need to call it from elsewhere: | def _generate_diagnostics(self) -> Optional[Diagnostics]:
"""
Do the heavy lifting of generating Diagnostics for our current configuration.
Really, only the diag() property should be calling this method, but if you
are convinced that you need to call it from elsewhere:
1. You're almost certainly wrong about needing to call it from elsewhere.
2. You MUST hold the diag_lock when calling this method.
3. You MUST NOT hold the config_lock when calling this method.
4. No, really, you're wrong. Don't call this method from anywhere but the
diag() property.
"""
# Make sure we have an IR and econf to work with.
if not app.ir or not app.econf:
# Nope, bail.
return None
# OK, go ahead and generate diagnostics. Use the diag_timer to time
# this.
with self.diag_timer:
_diag = Diagnostics(app.ir, app.econf)
# Update some metrics data points given the new generated Diagnostics
diag_dict = _diag.as_dict()
self.diag_errors.set(len(diag_dict.get("errors", [])))
self.diag_notices.set(len(diag_dict.get("notices", [])))
# Note that we've updated diagnostics, since that might trigger a
# timer log.
self.reconf_stats.mark("diag")
return _diag | [
"def",
"_generate_diagnostics",
"(",
"self",
")",
"->",
"Optional",
"[",
"Diagnostics",
"]",
":",
"# Make sure we have an IR and econf to work with.",
"if",
"not",
"app",
".",
"ir",
"or",
"not",
"app",
".",
"econf",
":",
"# Nope, bail.",
"return",
"None",
"# OK, go ahead and generate diagnostics. Use the diag_timer to time",
"# this.",
"with",
"self",
".",
"diag_timer",
":",
"_diag",
"=",
"Diagnostics",
"(",
"app",
".",
"ir",
",",
"app",
".",
"econf",
")",
"# Update some metrics data points given the new generated Diagnostics",
"diag_dict",
"=",
"_diag",
".",
"as_dict",
"(",
")",
"self",
".",
"diag_errors",
".",
"set",
"(",
"len",
"(",
"diag_dict",
".",
"get",
"(",
"\"errors\"",
",",
"[",
"]",
")",
")",
")",
"self",
".",
"diag_notices",
".",
"set",
"(",
"len",
"(",
"diag_dict",
".",
"get",
"(",
"\"notices\"",
",",
"[",
"]",
")",
")",
")",
"# Note that we've updated diagnostics, since that might trigger a",
"# timer log.",
"self",
".",
"reconf_stats",
".",
"mark",
"(",
"\"diag\"",
")",
"return",
"_diag"
] | [
354,
4
] | [
386,
24
] | python | en | ['en', 'error', 'th'] | False |
create_expectation_suite | (
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
) |
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
|
Create a new expectation suite. | def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results | [
"def",
"create_expectation_suite",
"(",
"context",
",",
"datasource_name",
"=",
"None",
",",
"batch_kwargs_generator_name",
"=",
"None",
",",
"generator_asset",
"=",
"None",
",",
"batch_kwargs",
"=",
"None",
",",
"expectation_suite_name",
"=",
"None",
",",
"additional_batch_kwargs",
"=",
"None",
",",
"empty_suite",
"=",
"False",
",",
"show_intro_message",
"=",
"False",
",",
"flag_build_docs",
"=",
"True",
",",
"open_docs",
"=",
"False",
",",
"profiler_configuration",
"=",
"\"demo\"",
",",
"data_asset_name",
"=",
"None",
",",
")",
":",
"if",
"generator_asset",
":",
"warnings",
".",
"warn",
"(",
"\"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. \"",
"\"Please update code accordingly.\"",
",",
"DeprecationWarning",
",",
")",
"data_asset_name",
"=",
"generator_asset",
"if",
"show_intro_message",
"and",
"not",
"empty_suite",
":",
"cli_message",
"(",
"\"\\n<cyan>========== Create sample Expectations ==========</cyan>\\n\\n\"",
")",
"data_source",
"=",
"select_datasource",
"(",
"context",
",",
"datasource_name",
"=",
"datasource_name",
")",
"if",
"data_source",
"is",
"None",
":",
"# select_datasource takes care of displaying an error message, so all is left here is to exit.",
"sys",
".",
"exit",
"(",
"1",
")",
"datasource_name",
"=",
"data_source",
".",
"name",
"if",
"expectation_suite_name",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
":",
"tell_user_suite_exists",
"(",
"expectation_suite_name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"(",
"batch_kwargs_generator_name",
"is",
"None",
"or",
"data_asset_name",
"is",
"None",
"or",
"batch_kwargs",
"is",
"None",
")",
":",
"(",
"datasource_name",
",",
"batch_kwargs_generator_name",
",",
"data_asset_name",
",",
"batch_kwargs",
",",
")",
"=",
"get_batch_kwargs",
"(",
"context",
",",
"datasource_name",
"=",
"datasource_name",
",",
"batch_kwargs_generator_name",
"=",
"batch_kwargs_generator_name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
"additional_batch_kwargs",
"=",
"additional_batch_kwargs",
",",
")",
"# In this case, we have \"consumed\" the additional_batch_kwargs",
"additional_batch_kwargs",
"=",
"{",
"}",
"if",
"expectation_suite_name",
"is",
"None",
":",
"default_expectation_suite_name",
"=",
"_get_default_expectation_suite_name",
"(",
"batch_kwargs",
",",
"data_asset_name",
")",
"while",
"True",
":",
"expectation_suite_name",
"=",
"click",
".",
"prompt",
"(",
"\"\\nName the new Expectation Suite\"",
",",
"default",
"=",
"default_expectation_suite_name",
",",
")",
"if",
"expectation_suite_name",
"in",
"context",
".",
"list_expectation_suite_names",
"(",
")",
":",
"tell_user_suite_exists",
"(",
"expectation_suite_name",
")",
"else",
":",
"break",
"if",
"empty_suite",
":",
"create_empty_suite",
"(",
"context",
",",
"expectation_suite_name",
",",
"batch_kwargs",
")",
"return",
"True",
",",
"expectation_suite_name",
",",
"None",
"profiling_results",
"=",
"_profile_to_create_a_suite",
"(",
"additional_batch_kwargs",
",",
"batch_kwargs",
",",
"batch_kwargs_generator_name",
",",
"context",
",",
"datasource_name",
",",
"expectation_suite_name",
",",
"data_asset_name",
",",
"profiler_configuration",
",",
")",
"if",
"flag_build_docs",
":",
"build_docs",
"(",
"context",
",",
"view",
"=",
"False",
")",
"if",
"open_docs",
":",
"attempt_to_open_validation_results_in_data_docs",
"(",
"context",
",",
"profiling_results",
")",
"return",
"True",
",",
"expectation_suite_name",
",",
"profiling_results"
] | [
61,
0
] | [
162,
58
] | python | en | ['en', 'error', 'th'] | False |
load_expectation_suite | (
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) |
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
|
Load an expectation suite from a given context. | def load_expectation_suite(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError as e:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
) | [
"def",
"load_expectation_suite",
"(",
"# TODO consolidate all the myriad CLI tests into this",
"context",
":",
"DataContext",
",",
"suite_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
")",
"->",
"ExpectationSuite",
":",
"if",
"suite_name",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"suite_name",
"=",
"suite_name",
"[",
":",
"-",
"5",
"]",
"try",
":",
"suite",
"=",
"context",
".",
"get_expectation_suite",
"(",
"suite_name",
")",
"return",
"suite",
"except",
"ge_exceptions",
".",
"DataContextError",
"as",
"e",
":",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"<red>Could not find a suite named `{suite_name}`.</red> Please check \"",
"\"the name by running `great_expectations suite list` and try again.\"",
",",
")"
] | [
318,
0
] | [
341,
9
] | python | en | ['en', 'error', 'th'] | False |
load_checkpoint | (
context: DataContext,
checkpoint_name: str,
usage_event: str,
) | Load a checkpoint or raise helpful errors. | Load a checkpoint or raise helpful errors. | def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>") | [
"def",
"load_checkpoint",
"(",
"context",
":",
"DataContext",
",",
"checkpoint_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
")",
"->",
"Union",
"[",
"Checkpoint",
",",
"LegacyCheckpoint",
"]",
":",
"try",
":",
"checkpoint",
":",
"Union",
"[",
"Checkpoint",
",",
"LegacyCheckpoint",
"]",
"=",
"context",
".",
"get_checkpoint",
"(",
"name",
"=",
"checkpoint_name",
")",
"return",
"checkpoint",
"except",
"(",
"ge_exceptions",
".",
"CheckpointNotFoundError",
",",
"ge_exceptions",
".",
"InvalidCheckpointConfigError",
",",
")",
":",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"\"\"\\\n<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:\n - `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists\n - `<green>great_expectations checkpoint new</green>` to configure a new checkpoint\"\"\"",
",",
")",
"except",
"ge_exceptions",
".",
"CheckpointError",
"as",
"e",
":",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"<red>{e}</red>\"",
")"
] | [
352,
0
] | [
376,
84
] | python | en | ['en', 'en', 'en'] | True |
select_datasource | (context: DataContext, datasource_name: str = None) | Select a datasource interactively. | Select a datasource interactively. | def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
" {}. {}".format(i, data_source["name"])
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
"Select a datasource" + "\n" + choices + "\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source | [
"def",
"select_datasource",
"(",
"context",
":",
"DataContext",
",",
"datasource_name",
":",
"str",
"=",
"None",
")",
"->",
"Datasource",
":",
"# TODO consolidate all the myriad CLI tests into this",
"data_source",
"=",
"None",
"if",
"datasource_name",
"is",
"None",
":",
"data_sources",
"=",
"sorted",
"(",
"context",
".",
"list_datasources",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"\"name\"",
"]",
")",
"if",
"len",
"(",
"data_sources",
")",
"==",
"0",
":",
"cli_message",
"(",
"\"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>\"",
")",
"elif",
"len",
"(",
"data_sources",
")",
"==",
"1",
":",
"datasource_name",
"=",
"data_sources",
"[",
"0",
"]",
"[",
"\"name\"",
"]",
"else",
":",
"choices",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"\" {}. {}\"",
".",
"format",
"(",
"i",
",",
"data_source",
"[",
"\"name\"",
"]",
")",
"for",
"i",
",",
"data_source",
"in",
"enumerate",
"(",
"data_sources",
",",
"1",
")",
"]",
")",
"option_selection",
"=",
"click",
".",
"prompt",
"(",
"\"Select a datasource\"",
"+",
"\"\\n\"",
"+",
"choices",
"+",
"\"\\n\"",
",",
"type",
"=",
"click",
".",
"Choice",
"(",
"[",
"str",
"(",
"i",
")",
"for",
"i",
",",
"data_source",
"in",
"enumerate",
"(",
"data_sources",
",",
"1",
")",
"]",
")",
",",
"show_choices",
"=",
"False",
",",
")",
"datasource_name",
"=",
"data_sources",
"[",
"int",
"(",
"option_selection",
")",
"-",
"1",
"]",
"[",
"\"name\"",
"]",
"if",
"datasource_name",
"is",
"not",
"None",
":",
"data_source",
"=",
"context",
".",
"get_datasource",
"(",
"datasource_name",
")",
"return",
"data_source"
] | [
379,
0
] | [
411,
22
] | python | en | ['en', 'en', 'en'] | True |
load_data_context_with_error_handling | (
directory: str, from_cli_upgrade_command: bool = False
) | Return a DataContext with good error handling and exit codes. | Return a DataContext with good error handling and exit codes. | def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message("<red>{}</red>".format(err.message))
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message("<red>{}</red>".format(err.message))
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1) | [
"def",
"load_data_context_with_error_handling",
"(",
"directory",
":",
"str",
",",
"from_cli_upgrade_command",
":",
"bool",
"=",
"False",
")",
"->",
"DataContext",
":",
"try",
":",
"context",
":",
"DataContext",
"=",
"DataContext",
"(",
"context_root_dir",
"=",
"directory",
")",
"ge_config_version",
":",
"int",
"=",
"context",
".",
"get_config",
"(",
")",
".",
"config_version",
"if",
"(",
"from_cli_upgrade_command",
"and",
"int",
"(",
"ge_config_version",
")",
"<",
"CURRENT_GE_CONFIG_VERSION",
")",
":",
"directory",
"=",
"directory",
"or",
"context",
".",
"root_directory",
"(",
"increment_version",
",",
"exception_occurred",
",",
")",
"=",
"upgrade_project_one_version_increment",
"(",
"context_root_dir",
"=",
"directory",
",",
"ge_config_version",
"=",
"ge_config_version",
",",
"continuation_message",
"=",
"EXIT_UPGRADE_CONTINUATION_MESSAGE",
",",
"from_cli_upgrade_command",
"=",
"from_cli_upgrade_command",
",",
")",
"if",
"not",
"exception_occurred",
"and",
"increment_version",
":",
"context",
"=",
"DataContext",
"(",
"context_root_dir",
"=",
"directory",
")",
"return",
"context",
"except",
"ge_exceptions",
".",
"UnsupportedConfigVersionError",
"as",
"err",
":",
"directory",
"=",
"directory",
"or",
"DataContext",
".",
"find_context_root_dir",
"(",
")",
"ge_config_version",
"=",
"DataContext",
".",
"get_ge_config_version",
"(",
"context_root_dir",
"=",
"directory",
")",
"upgrade_helper_class",
"=",
"(",
"GE_UPGRADE_HELPER_VERSION_MAP",
".",
"get",
"(",
"int",
"(",
"ge_config_version",
")",
")",
"if",
"ge_config_version",
"else",
"None",
")",
"if",
"upgrade_helper_class",
"and",
"ge_config_version",
"<",
"CURRENT_GE_CONFIG_VERSION",
":",
"upgrade_project",
"(",
"context_root_dir",
"=",
"directory",
",",
"ge_config_version",
"=",
"ge_config_version",
",",
"from_cli_upgrade_command",
"=",
"from_cli_upgrade_command",
",",
")",
"else",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"err",
".",
"message",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"(",
"ge_exceptions",
".",
"ConfigNotFoundError",
",",
"ge_exceptions",
".",
"InvalidConfigError",
",",
")",
"as",
"err",
":",
"cli_message",
"(",
"\"<red>{}</red>\"",
".",
"format",
"(",
"err",
".",
"message",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"ge_exceptions",
".",
"PluginModuleNotFoundError",
"as",
"err",
":",
"cli_message",
"(",
"err",
".",
"cli",
".",
"v012_colored_message",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"ge_exceptions",
".",
"PluginClassNotFoundError",
"as",
"err",
":",
"cli_message",
"(",
"err",
".",
"cli",
".",
"v012_colored_message",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"except",
"ge_exceptions",
".",
"InvalidConfigurationYamlError",
"as",
"err",
":",
"cli_message",
"(",
"f\"<red>{str(err)}</red>\"",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | [
414,
0
] | [
471,
19
] | python | en | ['en', 'en', 'en'] | True |
confirm_proceed_or_exit | (
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) |
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
|
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation | def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True | [
"def",
"confirm_proceed_or_exit",
"(",
"confirm_prompt",
":",
"str",
"=",
"\"Would you like to proceed?\"",
",",
"continuation_message",
":",
"str",
"=",
"\"Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !\"",
",",
"exit_on_no",
":",
"bool",
"=",
"True",
",",
"exit_code",
":",
"int",
"=",
"0",
",",
")",
"->",
"Optional",
"[",
"bool",
"]",
":",
"confirm_prompt_colorized",
"=",
"cli_colorize_string",
"(",
"confirm_prompt",
")",
"continuation_message_colorized",
"=",
"cli_colorize_string",
"(",
"continuation_message",
")",
"if",
"not",
"click",
".",
"confirm",
"(",
"confirm_prompt_colorized",
",",
"default",
"=",
"True",
")",
":",
"if",
"exit_on_no",
":",
"cli_message",
"(",
"continuation_message_colorized",
")",
"sys",
".",
"exit",
"(",
"exit_code",
")",
"else",
":",
"return",
"False",
"return",
"True"
] | [
599,
0
] | [
626,
15
] | python | en | ['en', 'error', 'th'] | False |
build_parameter_container_for_variables | (
variables_configs: Dict[str, Any]
) |
Build a ParameterContainer for all of the profiler config variables passed as key value pairs
Args:
variables_configs: Variable key: value pairs e.g. {"variable_name": variable_value, ...}
Returns:
ParameterContainer containing all variables
|
Build a ParameterContainer for all of the profiler config variables passed as key value pairs
Args:
variables_configs: Variable key: value pairs e.g. {"variable_name": variable_value, ...} | def build_parameter_container_for_variables(
variables_configs: Dict[str, Any]
) -> ParameterContainer:
"""
Build a ParameterContainer for all of the profiler config variables passed as key value pairs
Args:
variables_configs: Variable key: value pairs e.g. {"variable_name": variable_value, ...}
Returns:
ParameterContainer containing all variables
"""
variable_config_key: str
variable_config_value: Any
parameter_values: Dict[str, Any] = {}
for variable_config_key, variable_config_value in variables_configs.items():
variable_config_key = f"{VARIABLES_KEY}{variable_config_key}"
parameter_values[variable_config_key] = variable_config_value
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
build_parameter_container(
parameter_container=parameter_container, parameter_values=parameter_values
)
return parameter_container | [
"def",
"build_parameter_container_for_variables",
"(",
"variables_configs",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"ParameterContainer",
":",
"variable_config_key",
":",
"str",
"variable_config_value",
":",
"Any",
"parameter_values",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"{",
"}",
"for",
"variable_config_key",
",",
"variable_config_value",
"in",
"variables_configs",
".",
"items",
"(",
")",
":",
"variable_config_key",
"=",
"f\"{VARIABLES_KEY}{variable_config_key}\"",
"parameter_values",
"[",
"variable_config_key",
"]",
"=",
"variable_config_value",
"parameter_container",
":",
"ParameterContainer",
"=",
"ParameterContainer",
"(",
"parameter_nodes",
"=",
"None",
")",
"build_parameter_container",
"(",
"parameter_container",
"=",
"parameter_container",
",",
"parameter_values",
"=",
"parameter_values",
")",
"return",
"parameter_container"
] | [
132,
0
] | [
155,
30
] | python | en | ['en', 'error', 'th'] | False |
build_parameter_container | (
parameter_container: ParameterContainer,
parameter_values: Dict[str, Any],
) |
Builds the ParameterNode trees, corresponding to the fully_qualified_parameter_name first-level keys.
:param parameter_container initialized ParameterContainer for all ParameterNode trees
:param parameter_values
Example of the name-value structure for building parameters (matching the type hint in the method signature):
{
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds.value": 100, # Actual value can of Any type.
# The "details" dictionary is Optional.
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds.details": {
"max_abs_error_time_milliseconds": {
"confidence": { # Arbitrary dictionary key
"success_ratio": 1.0, # Arbitrary entries
"comment": "matched template", # Arbitrary entries
}
},
},
# While highly recommended, the use of ".value" and ".details" keys is conventional (it is not enforced).
"$parameter.tolerances.mostly": 9.0e-1, # The key here does not end on ".value" and no ".details" is provided.
...
}
:return parameter_container holds the dictionary of ParameterNode objects corresponding to roots of parameter names
This function loops through the supplied pairs of fully-qualified parameter names and their corresponding values
(and any "details") and builds the tree under a single root-level ParameterNode object for a "name space".
In particular, if any ParameterNode object in the tree (starting with the root-level ParameterNode object) already
exists, it is reused; in other words, ParameterNode objects are unique per part of fully-qualified parameter names.
|
Builds the ParameterNode trees, corresponding to the fully_qualified_parameter_name first-level keys. | def build_parameter_container(
parameter_container: ParameterContainer,
parameter_values: Dict[str, Any],
):
"""
Builds the ParameterNode trees, corresponding to the fully_qualified_parameter_name first-level keys.
:param parameter_container initialized ParameterContainer for all ParameterNode trees
:param parameter_values
Example of the name-value structure for building parameters (matching the type hint in the method signature):
{
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds.value": 100, # Actual value can of Any type.
# The "details" dictionary is Optional.
"$parameter.date_strings.tolerances.max_abs_error_time_milliseconds.details": {
"max_abs_error_time_milliseconds": {
"confidence": { # Arbitrary dictionary key
"success_ratio": 1.0, # Arbitrary entries
"comment": "matched template", # Arbitrary entries
}
},
},
# While highly recommended, the use of ".value" and ".details" keys is conventional (it is not enforced).
"$parameter.tolerances.mostly": 9.0e-1, # The key here does not end on ".value" and no ".details" is provided.
...
}
:return parameter_container holds the dictionary of ParameterNode objects corresponding to roots of parameter names
This function loops through the supplied pairs of fully-qualified parameter names and their corresponding values
(and any "details") and builds the tree under a single root-level ParameterNode object for a "name space".
In particular, if any ParameterNode object in the tree (starting with the root-level ParameterNode object) already
exists, it is reused; in other words, ParameterNode objects are unique per part of fully-qualified parameter names.
"""
parameter_node: Optional[ParameterNode]
fully_qualified_parameter_name: str
parameter_value: Any
fully_qualified_parameter_name_as_list: List[str]
parameter_name_root: str
for (
fully_qualified_parameter_name,
parameter_value,
) in parameter_values.items():
validate_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name
)
fully_qualified_parameter_name_as_list = fully_qualified_parameter_name[
1:
].split(FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER)
parameter_name_root = fully_qualified_parameter_name_as_list[0]
parameter_node = parameter_container.get_parameter_node(
parameter_name_root=parameter_name_root
)
if parameter_node is None:
parameter_node = ParameterNode({})
parameter_container.set_parameter_node(
parameter_name_root=parameter_name_root, parameter_node=parameter_node
)
_build_parameter_node_tree_for_one_parameter(
parameter_node=parameter_node,
parameter_name_as_list=fully_qualified_parameter_name_as_list,
parameter_value=parameter_value,
) | [
"def",
"build_parameter_container",
"(",
"parameter_container",
":",
"ParameterContainer",
",",
"parameter_values",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
")",
":",
"parameter_node",
":",
"Optional",
"[",
"ParameterNode",
"]",
"fully_qualified_parameter_name",
":",
"str",
"parameter_value",
":",
"Any",
"fully_qualified_parameter_name_as_list",
":",
"List",
"[",
"str",
"]",
"parameter_name_root",
":",
"str",
"for",
"(",
"fully_qualified_parameter_name",
",",
"parameter_value",
",",
")",
"in",
"parameter_values",
".",
"items",
"(",
")",
":",
"validate_fully_qualified_parameter_name",
"(",
"fully_qualified_parameter_name",
"=",
"fully_qualified_parameter_name",
")",
"fully_qualified_parameter_name_as_list",
"=",
"fully_qualified_parameter_name",
"[",
"1",
":",
"]",
".",
"split",
"(",
"FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER",
")",
"parameter_name_root",
"=",
"fully_qualified_parameter_name_as_list",
"[",
"0",
"]",
"parameter_node",
"=",
"parameter_container",
".",
"get_parameter_node",
"(",
"parameter_name_root",
"=",
"parameter_name_root",
")",
"if",
"parameter_node",
"is",
"None",
":",
"parameter_node",
"=",
"ParameterNode",
"(",
"{",
"}",
")",
"parameter_container",
".",
"set_parameter_node",
"(",
"parameter_name_root",
"=",
"parameter_name_root",
",",
"parameter_node",
"=",
"parameter_node",
")",
"_build_parameter_node_tree_for_one_parameter",
"(",
"parameter_node",
"=",
"parameter_node",
",",
"parameter_name_as_list",
"=",
"fully_qualified_parameter_name_as_list",
",",
"parameter_value",
"=",
"parameter_value",
",",
")"
] | [
158,
0
] | [
218,
9
] | python | en | ['en', 'error', 'th'] | False |
_build_parameter_node_tree_for_one_parameter | (
parameter_node: ParameterNode,
parameter_name_as_list: List[str],
parameter_value: Any,
) |
Recursively builds a tree of ParameterNode objects, creating new ParameterNode objects parsimoniously (i.e., only if
ParameterNode object, corresponding to a part of fully-qualified parameter names in a "name space" does not exist).
:param parameter_node: root-level ParameterNode for the sub-tree, characterized by the first parameter name in list
:param parameter_name_as_list: list of parts of a fully-qualified parameter name of sub-tree (or sub "name space")
:param parameter_value: value pertaining to the last part of the fully-qualified parameter name ("leaf node")
|
Recursively builds a tree of ParameterNode objects, creating new ParameterNode objects parsimoniously (i.e., only if
ParameterNode object, corresponding to a part of fully-qualified parameter names in a "name space" does not exist).
:param parameter_node: root-level ParameterNode for the sub-tree, characterized by the first parameter name in list
:param parameter_name_as_list: list of parts of a fully-qualified parameter name of sub-tree (or sub "name space")
:param parameter_value: value pertaining to the last part of the fully-qualified parameter name ("leaf node")
| def _build_parameter_node_tree_for_one_parameter(
parameter_node: ParameterNode,
parameter_name_as_list: List[str],
parameter_value: Any,
):
"""
Recursively builds a tree of ParameterNode objects, creating new ParameterNode objects parsimoniously (i.e., only if
ParameterNode object, corresponding to a part of fully-qualified parameter names in a "name space" does not exist).
:param parameter_node: root-level ParameterNode for the sub-tree, characterized by the first parameter name in list
:param parameter_name_as_list: list of parts of a fully-qualified parameter name of sub-tree (or sub "name space")
:param parameter_value: value pertaining to the last part of the fully-qualified parameter name ("leaf node")
"""
parameter_name_part: str = parameter_name_as_list[0]
# If the fully-qualified parameter name (or "name space") is still compound (i.e., not at "leaf node" / last part),
# then build the sub-tree, creating the descendant ParameterNode (to hold the sub-tree), if no descendants exist.
if len(parameter_name_as_list) > 1:
if parameter_name_part not in parameter_node:
parameter_node[parameter_name_part] = ParameterNode({})
_build_parameter_node_tree_for_one_parameter(
parameter_node=parameter_node[parameter_name_part],
parameter_name_as_list=parameter_name_as_list[1:],
parameter_value=parameter_value,
)
else:
# If the fully-qualified parameter name (or "name space") is trivial (i.e., at "leaf node" / last part), then
# store the supplied attribute value into the given ParameterNode using leaf "parameter_name_part" name as key.
parameter_node[parameter_name_part] = parameter_value | [
"def",
"_build_parameter_node_tree_for_one_parameter",
"(",
"parameter_node",
":",
"ParameterNode",
",",
"parameter_name_as_list",
":",
"List",
"[",
"str",
"]",
",",
"parameter_value",
":",
"Any",
",",
")",
":",
"parameter_name_part",
":",
"str",
"=",
"parameter_name_as_list",
"[",
"0",
"]",
"# If the fully-qualified parameter name (or \"name space\") is still compound (i.e., not at \"leaf node\" / last part),",
"# then build the sub-tree, creating the descendant ParameterNode (to hold the sub-tree), if no descendants exist.",
"if",
"len",
"(",
"parameter_name_as_list",
")",
">",
"1",
":",
"if",
"parameter_name_part",
"not",
"in",
"parameter_node",
":",
"parameter_node",
"[",
"parameter_name_part",
"]",
"=",
"ParameterNode",
"(",
"{",
"}",
")",
"_build_parameter_node_tree_for_one_parameter",
"(",
"parameter_node",
"=",
"parameter_node",
"[",
"parameter_name_part",
"]",
",",
"parameter_name_as_list",
"=",
"parameter_name_as_list",
"[",
"1",
":",
"]",
",",
"parameter_value",
"=",
"parameter_value",
",",
")",
"else",
":",
"# If the fully-qualified parameter name (or \"name space\") is trivial (i.e., at \"leaf node\" / last part), then",
"# store the supplied attribute value into the given ParameterNode using leaf \"parameter_name_part\" name as key.",
"parameter_node",
"[",
"parameter_name_part",
"]",
"=",
"parameter_value"
] | [
221,
0
] | [
248,
61
] | python | en | ['en', 'error', 'th'] | False |
get_parameter_value_by_fully_qualified_parameter_name | (
fully_qualified_parameter_name: str,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) |
Get the parameter value from the current rule state using the fully-qualified parameter name.
A fully-qualified parameter name must be a dot-delimited string, or the name of a parameter (without the dots).
Args
:param fully_qualified_parameter_name: str -- A dot-separated string key starting with $ for fetching parameters
:param domain: Domain -- current Domain of interest
:param variables
:param parameters
:return: Optional[Union[Any, ParameterNode]] object corresponding to the last part of the fully-qualified parameter
name supplied as argument -- a value (of type "Any") or a ParameterNode object (containing the sub-tree structure).
|
Get the parameter value from the current rule state using the fully-qualified parameter name.
A fully-qualified parameter name must be a dot-delimited string, or the name of a parameter (without the dots).
Args
:param fully_qualified_parameter_name: str -- A dot-separated string key starting with $ for fetching parameters
:param domain: Domain -- current Domain of interest
:param variables
:param parameters
:return: Optional[Union[Any, ParameterNode]] object corresponding to the last part of the fully-qualified parameter
name supplied as argument -- a value (of type "Any") or a ParameterNode object (containing the sub-tree structure).
| def get_parameter_value_by_fully_qualified_parameter_name(
fully_qualified_parameter_name: str,
domain: Optional[Domain] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Union[Any, ParameterNode]]:
"""
Get the parameter value from the current rule state using the fully-qualified parameter name.
A fully-qualified parameter name must be a dot-delimited string, or the name of a parameter (without the dots).
Args
:param fully_qualified_parameter_name: str -- A dot-separated string key starting with $ for fetching parameters
:param domain: Domain -- current Domain of interest
:param variables
:param parameters
:return: Optional[Union[Any, ParameterNode]] object corresponding to the last part of the fully-qualified parameter
name supplied as argument -- a value (of type "Any") or a ParameterNode object (containing the sub-tree structure).
"""
validate_fully_qualified_parameter_name(
fully_qualified_parameter_name=fully_qualified_parameter_name
)
# Using "__getitem__" (bracket) notation instead of "__getattr__" (dot) notation in order to insure the
# compatibility of field names (e.g., "domain_kwargs") with user-facing syntax (as governed by the value of the
# DOMAIN_KWARGS_PARAMETER_NAME constant, which may change, requiring the same change to the field name).
if fully_qualified_parameter_name == DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME:
if domain:
# Supports the "$domain.domain_kwargs" style syntax.
return domain[DOMAIN_KWARGS_PARAMETER_NAME]
return None
if fully_qualified_parameter_name.startswith(
DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME
):
if domain and domain[DOMAIN_KWARGS_PARAMETER_NAME]:
# Supports the "$domain.domain_kwargs.column" style syntax.
return domain[DOMAIN_KWARGS_PARAMETER_NAME].get(
fully_qualified_parameter_name[
(len(DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME) + 1) :
]
)
return None
parameter_container: ParameterContainer
if fully_qualified_parameter_name.startswith(VARIABLES_KEY):
fully_qualified_parameter_name = fully_qualified_parameter_name[1:]
parameter_container = variables
else:
fully_qualified_parameter_name = fully_qualified_parameter_name[1:]
parameter_container = parameters[domain.id]
fully_qualified_parameter_name_as_list: List[
str
] = fully_qualified_parameter_name.split(
FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER
)
if len(fully_qualified_parameter_name_as_list) == 0:
return None
return _get_parameter_value_from_parameter_container(
fully_qualified_parameter_name=fully_qualified_parameter_name,
fully_qualified_parameter_name_as_list=fully_qualified_parameter_name_as_list,
parameter_container=parameter_container,
) | [
"def",
"get_parameter_value_by_fully_qualified_parameter_name",
"(",
"fully_qualified_parameter_name",
":",
"str",
",",
"domain",
":",
"Optional",
"[",
"Domain",
"]",
"=",
"None",
",",
"variables",
":",
"Optional",
"[",
"ParameterContainer",
"]",
"=",
"None",
",",
"parameters",
":",
"Optional",
"[",
"Dict",
"[",
"str",
",",
"ParameterContainer",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"Union",
"[",
"Any",
",",
"ParameterNode",
"]",
"]",
":",
"validate_fully_qualified_parameter_name",
"(",
"fully_qualified_parameter_name",
"=",
"fully_qualified_parameter_name",
")",
"# Using \"__getitem__\" (bracket) notation instead of \"__getattr__\" (dot) notation in order to insure the",
"# compatibility of field names (e.g., \"domain_kwargs\") with user-facing syntax (as governed by the value of the",
"# DOMAIN_KWARGS_PARAMETER_NAME constant, which may change, requiring the same change to the field name).",
"if",
"fully_qualified_parameter_name",
"==",
"DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME",
":",
"if",
"domain",
":",
"# Supports the \"$domain.domain_kwargs\" style syntax.",
"return",
"domain",
"[",
"DOMAIN_KWARGS_PARAMETER_NAME",
"]",
"return",
"None",
"if",
"fully_qualified_parameter_name",
".",
"startswith",
"(",
"DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME",
")",
":",
"if",
"domain",
"and",
"domain",
"[",
"DOMAIN_KWARGS_PARAMETER_NAME",
"]",
":",
"# Supports the \"$domain.domain_kwargs.column\" style syntax.",
"return",
"domain",
"[",
"DOMAIN_KWARGS_PARAMETER_NAME",
"]",
".",
"get",
"(",
"fully_qualified_parameter_name",
"[",
"(",
"len",
"(",
"DOMAIN_KWARGS_PARAMETER_FULLY_QUALIFIED_NAME",
")",
"+",
"1",
")",
":",
"]",
")",
"return",
"None",
"parameter_container",
":",
"ParameterContainer",
"if",
"fully_qualified_parameter_name",
".",
"startswith",
"(",
"VARIABLES_KEY",
")",
":",
"fully_qualified_parameter_name",
"=",
"fully_qualified_parameter_name",
"[",
"1",
":",
"]",
"parameter_container",
"=",
"variables",
"else",
":",
"fully_qualified_parameter_name",
"=",
"fully_qualified_parameter_name",
"[",
"1",
":",
"]",
"parameter_container",
"=",
"parameters",
"[",
"domain",
".",
"id",
"]",
"fully_qualified_parameter_name_as_list",
":",
"List",
"[",
"str",
"]",
"=",
"fully_qualified_parameter_name",
".",
"split",
"(",
"FULLY_QUALIFIED_PARAMETER_NAME_SEPARATOR_CHARACTER",
")",
"if",
"len",
"(",
"fully_qualified_parameter_name_as_list",
")",
"==",
"0",
":",
"return",
"None",
"return",
"_get_parameter_value_from_parameter_container",
"(",
"fully_qualified_parameter_name",
"=",
"fully_qualified_parameter_name",
",",
"fully_qualified_parameter_name_as_list",
"=",
"fully_qualified_parameter_name_as_list",
",",
"parameter_container",
"=",
"parameter_container",
",",
")"
] | [
251,
0
] | [
315,
5
] | python | en | ['en', 'error', 'th'] | False |
linear_move | (joint: tuple, point: tuple) | Take a joint and applies a cartesian relative-move | Take a joint and applies a cartesian relative-move | def linear_move(joint: tuple, point: tuple) -> tuple:
"""Take a joint and applies a cartesian relative-move"""
c_joint = forward_kinematics(*joint)
pos = inverse_kinematics(c_joint[0] + point[0], c_joint[1] + point[1])
return pos[0], pos[1], joint[2] | [
"def",
"linear_move",
"(",
"joint",
":",
"tuple",
",",
"point",
":",
"tuple",
")",
"->",
"tuple",
":",
"c_joint",
"=",
"forward_kinematics",
"(",
"*",
"joint",
")",
"pos",
"=",
"inverse_kinematics",
"(",
"c_joint",
"[",
"0",
"]",
"+",
"point",
"[",
"0",
"]",
",",
"c_joint",
"[",
"1",
"]",
"+",
"point",
"[",
"1",
"]",
")",
"return",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
",",
"joint",
"[",
"2",
"]"
] | [
8,
0
] | [
15,
35
] | python | en | ['en', 'en', 'en'] | True |
inverse_kinematics | (x: float, y: float) | Computes the inverse kinematics for a planar 2DOF arm | Computes the inverse kinematics for a planar 2DOF arm | def inverse_kinematics(x: float, y: float) -> tuple:
"""Computes the inverse kinematics for a planar 2DOF arm"""
try:
theta2 = round(math.acos((y ** 2 + x ** 2 - l1 ** 2 - l2 ** 2) / (2 * l1 * l2)), 3)
theta1 = round(math.atan2(x, y) - math.atan2(l2 * math.sin(theta2), (l1 + l2 * math.cos(theta2))), 3)
if theta1 < 0:
theta2 = -theta2
theta1 = round(math.atan2(x, y) - math.atan2(l2 * math.sin(theta2), (l1 + l2 * math.cos(theta2))), 3)
except ValueError:
raise ValueError("Unreachable goal")
theta1 = round((math.degrees(theta1) / 36) - OFFSET[1], 3)
theta2 = round((math.degrees(theta2) / 36) - OFFSET[0], 3)
return theta2, theta1, None | [
"def",
"inverse_kinematics",
"(",
"x",
":",
"float",
",",
"y",
":",
"float",
")",
"->",
"tuple",
":",
"try",
":",
"theta2",
"=",
"round",
"(",
"math",
".",
"acos",
"(",
"(",
"y",
"**",
"2",
"+",
"x",
"**",
"2",
"-",
"l1",
"**",
"2",
"-",
"l2",
"**",
"2",
")",
"/",
"(",
"2",
"*",
"l1",
"*",
"l2",
")",
")",
",",
"3",
")",
"theta1",
"=",
"round",
"(",
"math",
".",
"atan2",
"(",
"x",
",",
"y",
")",
"-",
"math",
".",
"atan2",
"(",
"l2",
"*",
"math",
".",
"sin",
"(",
"theta2",
")",
",",
"(",
"l1",
"+",
"l2",
"*",
"math",
".",
"cos",
"(",
"theta2",
")",
")",
")",
",",
"3",
")",
"if",
"theta1",
"<",
"0",
":",
"theta2",
"=",
"-",
"theta2",
"theta1",
"=",
"round",
"(",
"math",
".",
"atan2",
"(",
"x",
",",
"y",
")",
"-",
"math",
".",
"atan2",
"(",
"l2",
"*",
"math",
".",
"sin",
"(",
"theta2",
")",
",",
"(",
"l1",
"+",
"l2",
"*",
"math",
".",
"cos",
"(",
"theta2",
")",
")",
")",
",",
"3",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unreachable goal\"",
")",
"theta1",
"=",
"round",
"(",
"(",
"math",
".",
"degrees",
"(",
"theta1",
")",
"/",
"36",
")",
"-",
"OFFSET",
"[",
"1",
"]",
",",
"3",
")",
"theta2",
"=",
"round",
"(",
"(",
"math",
".",
"degrees",
"(",
"theta2",
")",
"/",
"36",
")",
"-",
"OFFSET",
"[",
"0",
"]",
",",
"3",
")",
"return",
"theta2",
",",
"theta1",
",",
"None"
] | [
18,
0
] | [
35,
31
] | python | en | ['en', 'en', 'en'] | True |
forward_kinematics | (theta1: float, theta2: float, theta3=0.0) | Computes the forward kinematics for a planar 2DOF arm | Computes the forward kinematics for a planar 2DOF arm | def forward_kinematics(theta1: float, theta2: float, theta3=0.0) -> tuple:
"""Computes the forward kinematics for a planar 2DOF arm"""
theta1 = math.radians((theta1 + OFFSET[0]) * 36)
theta2 = math.radians((theta2 + OFFSET[1]) * 36)
arm1_pos = (l1 * math.cos(theta2), l1 * math.sin(theta2))
arm2_pos = ((l2 * math.sin(theta1 + theta2)) + arm1_pos[1], (l2 * math.cos(theta1 + theta2)) + arm1_pos[0])
return arm2_pos | [
"def",
"forward_kinematics",
"(",
"theta1",
":",
"float",
",",
"theta2",
":",
"float",
",",
"theta3",
"=",
"0.0",
")",
"->",
"tuple",
":",
"theta1",
"=",
"math",
".",
"radians",
"(",
"(",
"theta1",
"+",
"OFFSET",
"[",
"0",
"]",
")",
"*",
"36",
")",
"theta2",
"=",
"math",
".",
"radians",
"(",
"(",
"theta2",
"+",
"OFFSET",
"[",
"1",
"]",
")",
"*",
"36",
")",
"arm1_pos",
"=",
"(",
"l1",
"*",
"math",
".",
"cos",
"(",
"theta2",
")",
",",
"l1",
"*",
"math",
".",
"sin",
"(",
"theta2",
")",
")",
"arm2_pos",
"=",
"(",
"(",
"l2",
"*",
"math",
".",
"sin",
"(",
"theta1",
"+",
"theta2",
")",
")",
"+",
"arm1_pos",
"[",
"1",
"]",
",",
"(",
"l2",
"*",
"math",
".",
"cos",
"(",
"theta1",
"+",
"theta2",
")",
")",
"+",
"arm1_pos",
"[",
"0",
"]",
")",
"return",
"arm2_pos"
] | [
38,
0
] | [
48,
19
] | python | en | ['en', 'ga', 'en'] | True |
gaussian_fit | (x, y, x_smooth=None, n_pts=200) |
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
|
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
| def gaussian_fit(x, y, x_smooth=None, n_pts=200):
"""
Fits a Gaussian to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
"""
if x_smooth is None:
x_smooth = np.linspace(x.min(), x.max(), n_pts)
mean, sigma = np.nanmean(y), np.nanstd(y)
popt,pcov = curve_fit(gauss,x,y,p0=[1,mean,sigma], maxfev=np.iinfo(np.int32).max)
return gauss(x_smooth,*popt) | [
"def",
"gaussian_fit",
"(",
"x",
",",
"y",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"200",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth",
"=",
"np",
".",
"linspace",
"(",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
",",
"n_pts",
")",
"mean",
",",
"sigma",
"=",
"np",
".",
"nanmean",
"(",
"y",
")",
",",
"np",
".",
"nanstd",
"(",
"y",
")",
"popt",
",",
"pcov",
"=",
"curve_fit",
"(",
"gauss",
",",
"x",
",",
"y",
",",
"p0",
"=",
"[",
"1",
",",
"mean",
",",
"sigma",
"]",
",",
"maxfev",
"=",
"np",
".",
"iinfo",
"(",
"np",
".",
"int32",
")",
".",
"max",
")",
"return",
"gauss",
"(",
"x_smooth",
",",
"*",
"popt",
")"
] | [
6,
0
] | [
25,
32
] | python | en | ['en', 'error', 'th'] | False |
poly_fit | (x, y, degree, x_smooth=None, n_pts=200) |
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
|
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
| def poly_fit(x, y, degree, x_smooth=None, n_pts=200):
"""
Fits a polynomial of any positive integer degree to some data - x and y. Returns predicted interpolation values.
Parameters
----------
x: list-like
The x values of the data to fit to.
y: list-like
The y values of the data to fit to.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
degree: int
The degree of the polynomial to fit.
"""
if x_smooth is None:
x_smooth = np.linspace(x.min(), x.max(), n_pts)
return np.array([np.array([coef*(x_val**current_degree) for coef, current_degree in
zip(np.polyfit(x, y, degree), range(degree, -1, -1))]).sum() for x_val in x_smooth]) | [
"def",
"poly_fit",
"(",
"x",
",",
"y",
",",
"degree",
",",
"x_smooth",
"=",
"None",
",",
"n_pts",
"=",
"200",
")",
":",
"if",
"x_smooth",
"is",
"None",
":",
"x_smooth",
"=",
"np",
".",
"linspace",
"(",
"x",
".",
"min",
"(",
")",
",",
"x",
".",
"max",
"(",
")",
",",
"n_pts",
")",
"return",
"np",
".",
"array",
"(",
"[",
"np",
".",
"array",
"(",
"[",
"coef",
"*",
"(",
"x_val",
"**",
"current_degree",
")",
"for",
"coef",
",",
"current_degree",
"in",
"zip",
"(",
"np",
".",
"polyfit",
"(",
"x",
",",
"y",
",",
"degree",
")",
",",
"range",
"(",
"degree",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
"]",
")",
".",
"sum",
"(",
")",
"for",
"x_val",
"in",
"x_smooth",
"]",
")"
] | [
27,
0
] | [
47,
115
] | python | en | ['en', 'error', 'th'] | False |
test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_no_then_yes_to_fixing_them | (
caplog,
monkeypatch,
tmp_path,
) |
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
|
This test walks through the onboarding experience. | def test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_no_then_yes_to_fixing_them(
caplog,
monkeypatch,
tmp_path,
):
"""
This test walks through the onboarding experience.
The user just checked an existing project out of source control and does
not yet have an uncommitted directory.
"""
root_dir_path = tmp_path / "hiya"
root_dir_path.mkdir()
root_dir = str(root_dir_path)
# Create a new project from scratch that we will use for the test in the next step
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(root_dir)
result = runner.invoke(
cli,
["--v3-api", "init"],
input=f"Y\n",
catch_exceptions=False,
)
stdout = result.output
assert result.exit_code == 0
assert (
"Congratulations! You are now ready to customize your Great Expectations configuration."
in stdout
)
context = DataContext(os.path.join(root_dir, DataContext.GE_DIR))
uncommitted_dir = os.path.join(context.root_directory, "uncommitted")
shutil.rmtree(uncommitted_dir)
assert not os.path.isdir(uncommitted_dir)
# Test the second invocation of init (answer N to update broken init)
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
result = runner.invoke(
cli,
["--v3-api", "init"],
input="N\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
"It looks like you have a partially initialized Great Expectations project. Would you like to fix this automatically by adding the following missing files (existing files will not be modified)?"
in stdout
)
assert "Great Expectations added some missing files required to run." not in stdout
assert "OK. You must run" in stdout
assert "great_expectations init" in stdout
assert "to fix the missing files!" in stdout
assert "You may see new files in" not in stdout
assert "Would you like to build & view this project's Data Docs!?" not in stdout
assert not os.path.isdir(uncommitted_dir)
config_var_path = os.path.join(uncommitted_dir, "config_variables.yml")
assert not os.path.isfile(config_var_path)
# Test the third invocation of init (answer Yes to update broken init)
runner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(context.root_directory))
with pytest.warns(
UserWarning, match="Warning. An existing `great_expectations.yml` was found"
):
result = runner.invoke(
cli,
["--v3-api", "init"],
input="Y\n",
catch_exceptions=False,
)
stdout = result.stdout
assert result.exit_code == 0
assert (
"It looks like you have a partially initialized Great Expectations project. Would you like to fix this automatically by adding the following missing files (existing files will not be modified)?"
in stdout
)
assert "Great Expectations added some missing files required to run." in stdout
assert "You may see new files in" in stdout
assert "OK. You must run" not in stdout
assert "great_expectations init" not in stdout
assert "to fix the missing files!" not in stdout
assert "Would you like to build & view this project's Data Docs!?" not in stdout
assert os.path.isdir(uncommitted_dir)
config_var_path = os.path.join(uncommitted_dir, "config_variables.yml")
assert os.path.isfile(config_var_path)
with open(config_var_path) as f:
assert f.read() == CONFIG_VARIABLES_TEMPLATE
assert_no_logging_messages_or_tracebacks(caplog, result) | [
"def",
"test_cli_init_on_existing_project_with_no_uncommitted_dirs_answering_no_then_yes_to_fixing_them",
"(",
"caplog",
",",
"monkeypatch",
",",
"tmp_path",
",",
")",
":",
"root_dir_path",
"=",
"tmp_path",
"/",
"\"hiya\"",
"root_dir_path",
".",
"mkdir",
"(",
")",
"root_dir",
"=",
"str",
"(",
"root_dir_path",
")",
"# Create a new project from scratch that we will use for the test in the next step",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"monkeypatch",
".",
"chdir",
"(",
"root_dir",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"--v3-api\"",
",",
"\"init\"",
"]",
",",
"input",
"=",
"f\"Y\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"output",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"Congratulations! You are now ready to customize your Great Expectations configuration.\"",
"in",
"stdout",
")",
"context",
"=",
"DataContext",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"DataContext",
".",
"GE_DIR",
")",
")",
"uncommitted_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"\"uncommitted\"",
")",
"shutil",
".",
"rmtree",
"(",
"uncommitted_dir",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"uncommitted_dir",
")",
"# Test the second invocation of init (answer N to update broken init)",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"--v3-api\"",
",",
"\"init\"",
"]",
",",
"input",
"=",
"\"N\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"It looks like you have a partially initialized Great Expectations project. Would you like to fix this automatically by adding the following missing files (existing files will not be modified)?\"",
"in",
"stdout",
")",
"assert",
"\"Great Expectations added some missing files required to run.\"",
"not",
"in",
"stdout",
"assert",
"\"OK. You must run\"",
"in",
"stdout",
"assert",
"\"great_expectations init\"",
"in",
"stdout",
"assert",
"\"to fix the missing files!\"",
"in",
"stdout",
"assert",
"\"You may see new files in\"",
"not",
"in",
"stdout",
"assert",
"\"Would you like to build & view this project's Data Docs!?\"",
"not",
"in",
"stdout",
"assert",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"uncommitted_dir",
")",
"config_var_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncommitted_dir",
",",
"\"config_variables.yml\"",
")",
"assert",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"config_var_path",
")",
"# Test the third invocation of init (answer Yes to update broken init)",
"runner",
"=",
"CliRunner",
"(",
"mix_stderr",
"=",
"False",
")",
"monkeypatch",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"context",
".",
"root_directory",
")",
")",
"with",
"pytest",
".",
"warns",
"(",
"UserWarning",
",",
"match",
"=",
"\"Warning. An existing `great_expectations.yml` was found\"",
")",
":",
"result",
"=",
"runner",
".",
"invoke",
"(",
"cli",
",",
"[",
"\"--v3-api\"",
",",
"\"init\"",
"]",
",",
"input",
"=",
"\"Y\\n\"",
",",
"catch_exceptions",
"=",
"False",
",",
")",
"stdout",
"=",
"result",
".",
"stdout",
"assert",
"result",
".",
"exit_code",
"==",
"0",
"assert",
"(",
"\"It looks like you have a partially initialized Great Expectations project. Would you like to fix this automatically by adding the following missing files (existing files will not be modified)?\"",
"in",
"stdout",
")",
"assert",
"\"Great Expectations added some missing files required to run.\"",
"in",
"stdout",
"assert",
"\"You may see new files in\"",
"in",
"stdout",
"assert",
"\"OK. You must run\"",
"not",
"in",
"stdout",
"assert",
"\"great_expectations init\"",
"not",
"in",
"stdout",
"assert",
"\"to fix the missing files!\"",
"not",
"in",
"stdout",
"assert",
"\"Would you like to build & view this project's Data Docs!?\"",
"not",
"in",
"stdout",
"assert",
"os",
".",
"path",
".",
"isdir",
"(",
"uncommitted_dir",
")",
"config_var_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"uncommitted_dir",
",",
"\"config_variables.yml\"",
")",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"config_var_path",
")",
"with",
"open",
"(",
"config_var_path",
")",
"as",
"f",
":",
"assert",
"f",
".",
"read",
"(",
")",
"==",
"CONFIG_VARIABLES_TEMPLATE",
"assert_no_logging_messages_or_tracebacks",
"(",
"caplog",
",",
"result",
")"
] | [
187,
0
] | [
285,
60
] | python | en | ['en', 'error', 'th'] | False |
is_same_function | (f1, f2) | returns true if f1 and f2 is same function
Use case: sometimes when user defines some virtual function in base class,
it overrides it in a derived one. Sometimes we need to know whether two
member functions is actually same function.
| returns true if f1 and f2 is same function | def is_same_function(f1, f2):
"""returns true if f1 and f2 is same function
Use case: sometimes when user defines some virtual function in base class,
it overrides it in a derived one. Sometimes we need to know whether two
member functions is actually same function.
"""
if f1 is f2:
return True
if f1.__class__ is not f2.__class__:
return False
if isinstance(f1, calldef_members.member_calldef_t) and \
f1.has_const != f2.has_const:
return False
if f1.name != f2.name:
return False
if not is_same_return_type(f1, f2):
return False
if len(f1.arguments) != len(f2.arguments):
return False
for f1_arg, f2_arg in zip(f1.arguments, f2.arguments):
if not type_traits.is_same(f1_arg.decl_type, f2_arg.decl_type):
return False
return True | [
"def",
"is_same_function",
"(",
"f1",
",",
"f2",
")",
":",
"if",
"f1",
"is",
"f2",
":",
"return",
"True",
"if",
"f1",
".",
"__class__",
"is",
"not",
"f2",
".",
"__class__",
":",
"return",
"False",
"if",
"isinstance",
"(",
"f1",
",",
"calldef_members",
".",
"member_calldef_t",
")",
"and",
"f1",
".",
"has_const",
"!=",
"f2",
".",
"has_const",
":",
"return",
"False",
"if",
"f1",
".",
"name",
"!=",
"f2",
".",
"name",
":",
"return",
"False",
"if",
"not",
"is_same_return_type",
"(",
"f1",
",",
"f2",
")",
":",
"return",
"False",
"if",
"len",
"(",
"f1",
".",
"arguments",
")",
"!=",
"len",
"(",
"f2",
".",
"arguments",
")",
":",
"return",
"False",
"for",
"f1_arg",
",",
"f2_arg",
"in",
"zip",
"(",
"f1",
".",
"arguments",
",",
"f2",
".",
"arguments",
")",
":",
"if",
"not",
"type_traits",
".",
"is_same",
"(",
"f1_arg",
".",
"decl_type",
",",
"f2_arg",
".",
"decl_type",
")",
":",
"return",
"False",
"return",
"True"
] | [
72,
0
] | [
95,
15
] | python | en | ['en', 'en', 'en'] | True |
from_dataset | (cls, dataset=None) | This base implementation naively passes arguments on to the real constructor, which
is suitable really when a constructor knows to take its own type. In general, this should be overridden | This base implementation naively passes arguments on to the real constructor, which
is suitable really when a constructor knows to take its own type. In general, this should be overridden | def from_dataset(cls, dataset=None):
"""This base implementation naively passes arguments on to the real constructor, which
is suitable really when a constructor knows to take its own type. In general, this should be overridden"""
return cls(dataset) | [
"def",
"from_dataset",
"(",
"cls",
",",
"dataset",
"=",
"None",
")",
":",
"return",
"cls",
"(",
"dataset",
")"
] | [
246,
4
] | [
249,
27
] | python | en | ['en', 'en', 'en'] | True |
get_row_count | (self) | Returns: int, table row count | Returns: int, table row count | def get_row_count(self):
"""Returns: int, table row count"""
raise NotImplementedError | [
"def",
"get_row_count",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | [
251,
4
] | [
253,
33
] | python | en | ['en', 'en', 'en'] | True |
get_column_count | (self) | Returns: int, table column count | Returns: int, table column count | def get_column_count(self):
"""Returns: int, table column count"""
raise NotImplementedError | [
"def",
"get_column_count",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | [
255,
4
] | [
257,
33
] | python | en | ['en', 'en', 'en'] | True |
get_table_columns | (self) | Returns: List[str], list of column names | Returns: List[str], list of column names | def get_table_columns(self) -> List[str]:
"""Returns: List[str], list of column names"""
raise NotImplementedError | [
"def",
"get_table_columns",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"raise",
"NotImplementedError"
] | [
259,
4
] | [
261,
33
] | python | en | ['en', 'is', 'en'] | True |
get_column_nonnull_count | (self, column) | Returns: int | Returns: int | def get_column_nonnull_count(self, column):
"""Returns: int"""
raise NotImplementedError | [
"def",
"get_column_nonnull_count",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
263,
4
] | [
265,
33
] | python | en | ['en', 'bg', 'en'] | False |
get_column_mean | (self, column) | Returns: float | Returns: float | def get_column_mean(self, column):
"""Returns: float"""
raise NotImplementedError | [
"def",
"get_column_mean",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
267,
4
] | [
269,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_value_counts | (self, column, sort="value", collate=None) | Get a series containing the frequency counts of unique values from the named column.
Args:
column: the column for which to obtain value_counts
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
collate (string): the collate (sort) method to be used on supported backends (SqlAlchemy only)
Returns:
pd.Series of value counts for a column, sorted according to the value requested in sort
| Get a series containing the frequency counts of unique values from the named column. | def get_column_value_counts(self, column, sort="value", collate=None):
"""Get a series containing the frequency counts of unique values from the named column.
Args:
column: the column for which to obtain value_counts
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
collate (string): the collate (sort) method to be used on supported backends (SqlAlchemy only)
Returns:
pd.Series of value counts for a column, sorted according to the value requested in sort
"""
raise NotImplementedError | [
"def",
"get_column_value_counts",
"(",
"self",
",",
"column",
",",
"sort",
"=",
"\"value\"",
",",
"collate",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] | [
271,
4
] | [
286,
33
] | python | en | ['en', 'en', 'en'] | True |
get_column_sum | (self, column) | Returns: float | Returns: float | def get_column_sum(self, column):
"""Returns: float"""
raise NotImplementedError | [
"def",
"get_column_sum",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
288,
4
] | [
290,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_max | (self, column, parse_strings_as_datetimes=False) | Returns: Any | Returns: Any | def get_column_max(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError | [
"def",
"get_column_max",
"(",
"self",
",",
"column",
",",
"parse_strings_as_datetimes",
"=",
"False",
")",
":",
"raise",
"NotImplementedError"
] | [
292,
4
] | [
294,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_min | (self, column, parse_strings_as_datetimes=False) | Returns: Any | Returns: Any | def get_column_min(self, column, parse_strings_as_datetimes=False):
"""Returns: Any"""
raise NotImplementedError | [
"def",
"get_column_min",
"(",
"self",
",",
"column",
",",
"parse_strings_as_datetimes",
"=",
"False",
")",
":",
"raise",
"NotImplementedError"
] | [
296,
4
] | [
298,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_unique_count | (self, column) | Returns: int | Returns: int | def get_column_unique_count(self, column):
"""Returns: int"""
raise NotImplementedError | [
"def",
"get_column_unique_count",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
300,
4
] | [
302,
33
] | python | en | ['en', 'bg', 'en'] | False |
get_column_modes | (self, column) | Returns: List[Any], list of modes (ties OK) | Returns: List[Any], list of modes (ties OK) | def get_column_modes(self, column):
"""Returns: List[Any], list of modes (ties OK)"""
raise NotImplementedError | [
"def",
"get_column_modes",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
304,
4
] | [
306,
33
] | python | en | ['en', 'et', 'en'] | True |
get_column_median | (self, column) | Returns: Any | Returns: Any | def get_column_median(self, column):
"""Returns: Any"""
raise NotImplementedError | [
"def",
"get_column_median",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
308,
4
] | [
310,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_quantiles | (
self, column, quantiles, allow_relative_error=False
) | Get the values in column closest to the requested quantiles
Args:
column (string): name of column
quantiles (tuple of float): the quantiles to return. quantiles \
*must* be a tuple to ensure caching is possible
Returns:
List[Any]: the nearest values in the dataset to those quantiles
| Get the values in column closest to the requested quantiles
Args:
column (string): name of column
quantiles (tuple of float): the quantiles to return. quantiles \
*must* be a tuple to ensure caching is possible | def get_column_quantiles(
self, column, quantiles, allow_relative_error=False
) -> List[Any]:
"""Get the values in column closest to the requested quantiles
Args:
column (string): name of column
quantiles (tuple of float): the quantiles to return. quantiles \
*must* be a tuple to ensure caching is possible
Returns:
List[Any]: the nearest values in the dataset to those quantiles
"""
raise NotImplementedError | [
"def",
"get_column_quantiles",
"(",
"self",
",",
"column",
",",
"quantiles",
",",
"allow_relative_error",
"=",
"False",
")",
"->",
"List",
"[",
"Any",
"]",
":",
"raise",
"NotImplementedError"
] | [
312,
4
] | [
324,
33
] | python | en | ['en', 'en', 'en'] | True |
get_column_stdev | (self, column) | Returns: float | Returns: float | def get_column_stdev(self, column):
"""Returns: float"""
raise NotImplementedError | [
"def",
"get_column_stdev",
"(",
"self",
",",
"column",
")",
":",
"raise",
"NotImplementedError"
] | [
326,
4
] | [
328,
33
] | python | en | ['en', 'no', 'en'] | False |
get_column_partition | (
self, column, bins="uniform", n_bins=10, allow_relative_error=False
) | Get a partition of the range of values in the specified column.
Args:
column: the name of the column
bins: 'uniform' for evenly spaced bins or 'quantile' for bins spaced according to quantiles
n_bins: the number of bins to produce
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A list of bins
| Get a partition of the range of values in the specified column. | def get_column_partition(
self, column, bins="uniform", n_bins=10, allow_relative_error=False
):
"""Get a partition of the range of values in the specified column.
Args:
column: the name of the column
bins: 'uniform' for evenly spaced bins or 'quantile' for bins spaced according to quantiles
n_bins: the number of bins to produce
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A list of bins
"""
if bins == "uniform":
# TODO: in the event that we shift the compute model for
# min and max to have a single pass, use that instead of
# quantiles for clarity
# min_ = self.get_column_min(column)
# max_ = self.get_column_max(column)
min_, max_ = self.get_column_quantiles(
column, (0.0, 1.0), allow_relative_error=allow_relative_error
)
# PRECISION NOTE: some implementations of quantiles could produce
# varying levels of precision (e.g. a NUMERIC column producing
# Decimal from a SQLAlchemy source, so we cast to float for numpy)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
elif bins in ["ntile", "quantile", "percentile"]:
bins = self.get_column_quantiles(
column,
tuple(np.linspace(start=0, stop=1, num=n_bins + 1)),
allow_relative_error=allow_relative_error,
)
elif bins == "auto":
# Use the method from numpy histogram_bin_edges
nonnull_count = self.get_column_nonnull_count(column)
sturges = np.log2(nonnull_count + 1)
min_, _25, _75, max_ = self.get_column_quantiles(
column,
(0.0, 0.25, 0.75, 1.0),
allow_relative_error=allow_relative_error,
)
iqr = _75 - _25
if iqr < 1e-10: # Consider IQR 0 and do not use variance-based estimator
n_bins = sturges
else:
fd = (2 * float(iqr)) / (nonnull_count ** (1 / 3))
n_bins = max(
int(np.ceil(sturges)), int(np.ceil(float(max_ - min_) / fd))
)
bins = np.linspace(start=float(min_), stop=float(max_), num=n_bins + 1)
else:
raise ValueError("Invalid parameter for bins argument")
return bins | [
"def",
"get_column_partition",
"(",
"self",
",",
"column",
",",
"bins",
"=",
"\"uniform\"",
",",
"n_bins",
"=",
"10",
",",
"allow_relative_error",
"=",
"False",
")",
":",
"if",
"bins",
"==",
"\"uniform\"",
":",
"# TODO: in the event that we shift the compute model for",
"# min and max to have a single pass, use that instead of",
"# quantiles for clarity",
"# min_ = self.get_column_min(column)",
"# max_ = self.get_column_max(column)",
"min_",
",",
"max_",
"=",
"self",
".",
"get_column_quantiles",
"(",
"column",
",",
"(",
"0.0",
",",
"1.0",
")",
",",
"allow_relative_error",
"=",
"allow_relative_error",
")",
"# PRECISION NOTE: some implementations of quantiles could produce",
"# varying levels of precision (e.g. a NUMERIC column producing",
"# Decimal from a SQLAlchemy source, so we cast to float for numpy)",
"bins",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"float",
"(",
"min_",
")",
",",
"stop",
"=",
"float",
"(",
"max_",
")",
",",
"num",
"=",
"n_bins",
"+",
"1",
")",
"elif",
"bins",
"in",
"[",
"\"ntile\"",
",",
"\"quantile\"",
",",
"\"percentile\"",
"]",
":",
"bins",
"=",
"self",
".",
"get_column_quantiles",
"(",
"column",
",",
"tuple",
"(",
"np",
".",
"linspace",
"(",
"start",
"=",
"0",
",",
"stop",
"=",
"1",
",",
"num",
"=",
"n_bins",
"+",
"1",
")",
")",
",",
"allow_relative_error",
"=",
"allow_relative_error",
",",
")",
"elif",
"bins",
"==",
"\"auto\"",
":",
"# Use the method from numpy histogram_bin_edges",
"nonnull_count",
"=",
"self",
".",
"get_column_nonnull_count",
"(",
"column",
")",
"sturges",
"=",
"np",
".",
"log2",
"(",
"nonnull_count",
"+",
"1",
")",
"min_",
",",
"_25",
",",
"_75",
",",
"max_",
"=",
"self",
".",
"get_column_quantiles",
"(",
"column",
",",
"(",
"0.0",
",",
"0.25",
",",
"0.75",
",",
"1.0",
")",
",",
"allow_relative_error",
"=",
"allow_relative_error",
",",
")",
"iqr",
"=",
"_75",
"-",
"_25",
"if",
"iqr",
"<",
"1e-10",
":",
"# Consider IQR 0 and do not use variance-based estimator",
"n_bins",
"=",
"sturges",
"else",
":",
"fd",
"=",
"(",
"2",
"*",
"float",
"(",
"iqr",
")",
")",
"/",
"(",
"nonnull_count",
"**",
"(",
"1",
"/",
"3",
")",
")",
"n_bins",
"=",
"max",
"(",
"int",
"(",
"np",
".",
"ceil",
"(",
"sturges",
")",
")",
",",
"int",
"(",
"np",
".",
"ceil",
"(",
"float",
"(",
"max_",
"-",
"min_",
")",
"/",
"fd",
")",
")",
")",
"bins",
"=",
"np",
".",
"linspace",
"(",
"start",
"=",
"float",
"(",
"min_",
")",
",",
"stop",
"=",
"float",
"(",
"max_",
")",
",",
"num",
"=",
"n_bins",
"+",
"1",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameter for bins argument\"",
")",
"return",
"bins"
] | [
330,
4
] | [
386,
19
] | python | en | ['en', 'en', 'en'] | True |
get_column_hist | (self, column, bins) | Get a histogram of column values
Args:
column: the column for which to generate the histogram
bins (tuple): the bins to slice the histogram. bins *must* be a tuple to ensure caching is possible
Returns: List[int], a list of counts corresponding to bins | Get a histogram of column values
Args:
column: the column for which to generate the histogram
bins (tuple): the bins to slice the histogram. bins *must* be a tuple to ensure caching is possible | def get_column_hist(self, column, bins):
"""Get a histogram of column values
Args:
column: the column for which to generate the histogram
bins (tuple): the bins to slice the histogram. bins *must* be a tuple to ensure caching is possible
Returns: List[int], a list of counts corresponding to bins"""
raise NotImplementedError | [
"def",
"get_column_hist",
"(",
"self",
",",
"column",
",",
"bins",
")",
":",
"raise",
"NotImplementedError"
] | [
388,
4
] | [
395,
33
] | python | en | ['en', 'ca', 'en'] | True |
get_column_count_in_range | (
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
) | Returns: int | Returns: int | def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
"""Returns: int"""
raise NotImplementedError | [
"def",
"get_column_count_in_range",
"(",
"self",
",",
"column",
",",
"min_val",
"=",
"None",
",",
"max_val",
"=",
"None",
",",
"strict_min",
"=",
"False",
",",
"strict_max",
"=",
"True",
")",
":",
"raise",
"NotImplementedError"
] | [
397,
4
] | [
401,
33
] | python | en | ['en', 'bg', 'en'] | False |
get_crosstab | (
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
) | Get crosstab of column_A and column_B, binning values if necessary | Get crosstab of column_A and column_B, binning values if necessary | def get_crosstab(
self,
column_A,
column_B,
bins_A=None,
bins_B=None,
n_bins_A=None,
n_bins_B=None,
):
"""Get crosstab of column_A and column_B, binning values if necessary"""
raise NotImplementedError | [
"def",
"get_crosstab",
"(",
"self",
",",
"column_A",
",",
"column_B",
",",
"bins_A",
"=",
"None",
",",
"bins_B",
"=",
"None",
",",
"n_bins_A",
"=",
"None",
",",
"n_bins_B",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
403,
4
] | [
413,
33
] | python | en | ['en', 'en', 'en'] | True |
test_column_map_expectation_function | (self, function, *args, **kwargs) | Test a column map expectation function
Args:
function (func): The function to be tested. (Must be a valid column_map_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
| Test a column map expectation function | def test_column_map_expectation_function(self, function, *args, **kwargs):
"""Test a column map expectation function
Args:
function (func): The function to be tested. (Must be a valid column_map_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_map_expectation(function)
return new_function(self, *args, **kwargs) | [
"def",
"test_column_map_expectation_function",
"(",
"self",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"new_function",
"=",
"self",
".",
"column_map_expectation",
"(",
"function",
")",
"return",
"new_function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
415,
4
] | [
435,
50
] | python | en | ['ca', 'en', 'en'] | True |
test_column_aggregate_expectation_function | (self, function, *args, **kwargs) | Test a column aggregate expectation function
Args:
function (func): The function to be tested. (Must be a valid column_aggregate_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
| Test a column aggregate expectation function | def test_column_aggregate_expectation_function(self, function, *args, **kwargs):
"""Test a column aggregate expectation function
Args:
function (func): The function to be tested. (Must be a valid column_aggregate_expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
An ExpectationSuiteValidationResult
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you'll still need to \
define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for more information.
"""
new_function = self.column_aggregate_expectation(function)
return new_function(self, *args, **kwargs) | [
"def",
"test_column_aggregate_expectation_function",
"(",
"self",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"new_function",
"=",
"self",
".",
"column_aggregate_expectation",
"(",
"function",
")",
"return",
"new_function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | [
437,
4
] | [
457,
50
] | python | en | ['es', 'en', 'en'] | True |
expect_column_to_exist | (
self,
column,
column_index=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the specified column to exist.
expect_column_to_exist is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column (str): \
The column name.
Other Parameters:
column_index (int or None): \
If not None, checks the order of the columns. The expectation will fail if the \
column is not in location column_index (zero-indexed).
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect the specified column to exist. | def expect_column_to_exist(
self,
column,
column_index=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the specified column to exist.
expect_column_to_exist is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column (str): \
The column name.
Other Parameters:
column_index (int or None): \
If not None, checks the order of the columns. The expectation will fail if the \
column is not in location column_index (zero-indexed).
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column in columns:
return {
# FIXME: list.index does not check for duplicate values.
"success": (column_index is None)
or (columns.index(column) == column_index)
}
else:
return {"success": False} | [
"def",
"expect_column_to_exist",
"(",
"self",
",",
"column",
",",
"column_index",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"columns",
"=",
"self",
".",
"get_table_columns",
"(",
")",
"if",
"column",
"in",
"columns",
":",
"return",
"{",
"# FIXME: list.index does not check for duplicate values.",
"\"success\"",
":",
"(",
"column_index",
"is",
"None",
")",
"or",
"(",
"columns",
".",
"index",
"(",
"column",
")",
"==",
"column_index",
")",
"}",
"else",
":",
"return",
"{",
"\"success\"",
":",
"False",
"}"
] | [
467,
4
] | [
518,
37
] | python | en | ['en', 'en', 'en'] | True |
expect_table_columns_to_match_ordered_list | (
self,
column_list,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the columns to exactly match a specified list.
expect_table_columns_to_match_ordered_list is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_list (list of str): \
The column names, in the correct order.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect the columns to exactly match a specified list. | def expect_table_columns_to_match_ordered_list(
self,
column_list,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to exactly match a specified list.
expect_table_columns_to_match_ordered_list is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_list (list of str): \
The column names, in the correct order.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
columns = self.get_table_columns()
if column_list is None or list(columns) == list(column_list):
return {"success": True, "result": {"observed_value": list(columns)}}
else:
# In the case of differing column lengths between the defined expectation and the observed column set, the
# max is determined to generate the column_index.
number_of_columns = max(len(column_list), len(columns))
column_index = range(number_of_columns)
# Create a list of the mismatched details
compared_lists = list(
zip_longest(column_index, list(column_list), list(columns))
)
mismatched = [
{"Expected Column Position": i, "Expected": k, "Found": v}
for i, k, v in compared_lists
if k != v
]
return {
"success": False,
"result": {
"observed_value": list(columns),
"details": {"mismatched": mismatched},
},
} | [
"def",
"expect_table_columns_to_match_ordered_list",
"(",
"self",
",",
"column_list",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"columns",
"=",
"self",
".",
"get_table_columns",
"(",
")",
"if",
"column_list",
"is",
"None",
"or",
"list",
"(",
"columns",
")",
"==",
"list",
"(",
"column_list",
")",
":",
"return",
"{",
"\"success\"",
":",
"True",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"list",
"(",
"columns",
")",
"}",
"}",
"else",
":",
"# In the case of differing column lengths between the defined expectation and the observed column set, the",
"# max is determined to generate the column_index.",
"number_of_columns",
"=",
"max",
"(",
"len",
"(",
"column_list",
")",
",",
"len",
"(",
"columns",
")",
")",
"column_index",
"=",
"range",
"(",
"number_of_columns",
")",
"# Create a list of the mismatched details",
"compared_lists",
"=",
"list",
"(",
"zip_longest",
"(",
"column_index",
",",
"list",
"(",
"column_list",
")",
",",
"list",
"(",
"columns",
")",
")",
")",
"mismatched",
"=",
"[",
"{",
"\"Expected Column Position\"",
":",
"i",
",",
"\"Expected\"",
":",
"k",
",",
"\"Found\"",
":",
"v",
"}",
"for",
"i",
",",
"k",
",",
"v",
"in",
"compared_lists",
"if",
"k",
"!=",
"v",
"]",
"return",
"{",
"\"success\"",
":",
"False",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"list",
"(",
"columns",
")",
",",
"\"details\"",
":",
"{",
"\"mismatched\"",
":",
"mismatched",
"}",
",",
"}",
",",
"}"
] | [
522,
4
] | [
585,
13
] | python | en | ['en', 'en', 'en'] | True |
expect_table_columns_to_match_set | (
self,
column_set: Optional[Union[Set[str], List[str]]],
exact_match: Optional[bool] = True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the columns to match a specified set.
expect_table_columns_to_match_set is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_set (set of str or list of str): \
The column names you wish to check. If given a list, it will be converted to \
a set before processing. Column names are case sensitive.
exact_match (bool): \
Whether to make sure there are no extra columns in either the dataset or in \
the column_set.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect the columns to match a specified set. | def expect_table_columns_to_match_set(
self,
column_set: Optional[Union[Set[str], List[str]]],
exact_match: Optional[bool] = True,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the columns to match a specified set.
expect_table_columns_to_match_set is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
column_set (set of str or list of str): \
The column names you wish to check. If given a list, it will be converted to \
a set before processing. Column names are case sensitive.
exact_match (bool): \
Whether to make sure there are no extra columns in either the dataset or in \
the column_set.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
column_set = set(column_set) if column_set is not None else set()
dataset_columns_list = self.get_table_columns()
dataset_columns_set = set(dataset_columns_list)
if (
(column_set is None) and (exact_match is not True)
) or dataset_columns_set == column_set:
return {"success": True, "result": {"observed_value": dataset_columns_list}}
else:
# Convert to lists and sort to lock order for testing and output rendering
# unexpected_list contains items from the dataset columns that are not in column_set
unexpected_list = sorted(list(dataset_columns_set - column_set))
# missing_list contains items from column_set that are not in the dataset columns
missing_list = sorted(list(column_set - dataset_columns_set))
# observed_value contains items that are in the dataset columns
observed_value = sorted(dataset_columns_list)
mismatched = {}
if len(unexpected_list) > 0:
mismatched["unexpected"] = unexpected_list
if len(missing_list) > 0:
mismatched["missing"] = missing_list
result = {
"observed_value": observed_value,
"details": {"mismatched": mismatched},
}
return_success = {
"success": True,
"result": result,
}
return_failed = {
"success": False,
"result": result,
}
if exact_match:
return return_failed
else:
# Failed if there are items in the missing list (but OK to have unexpected_list)
if len(missing_list) > 0:
return return_failed
# Passed if there are no items in the missing list
else:
return return_success | [
"def",
"expect_table_columns_to_match_set",
"(",
"self",
",",
"column_set",
":",
"Optional",
"[",
"Union",
"[",
"Set",
"[",
"str",
"]",
",",
"List",
"[",
"str",
"]",
"]",
"]",
",",
"exact_match",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"column_set",
"=",
"set",
"(",
"column_set",
")",
"if",
"column_set",
"is",
"not",
"None",
"else",
"set",
"(",
")",
"dataset_columns_list",
"=",
"self",
".",
"get_table_columns",
"(",
")",
"dataset_columns_set",
"=",
"set",
"(",
"dataset_columns_list",
")",
"if",
"(",
"(",
"column_set",
"is",
"None",
")",
"and",
"(",
"exact_match",
"is",
"not",
"True",
")",
")",
"or",
"dataset_columns_set",
"==",
"column_set",
":",
"return",
"{",
"\"success\"",
":",
"True",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"dataset_columns_list",
"}",
"}",
"else",
":",
"# Convert to lists and sort to lock order for testing and output rendering",
"# unexpected_list contains items from the dataset columns that are not in column_set",
"unexpected_list",
"=",
"sorted",
"(",
"list",
"(",
"dataset_columns_set",
"-",
"column_set",
")",
")",
"# missing_list contains items from column_set that are not in the dataset columns",
"missing_list",
"=",
"sorted",
"(",
"list",
"(",
"column_set",
"-",
"dataset_columns_set",
")",
")",
"# observed_value contains items that are in the dataset columns",
"observed_value",
"=",
"sorted",
"(",
"dataset_columns_list",
")",
"mismatched",
"=",
"{",
"}",
"if",
"len",
"(",
"unexpected_list",
")",
">",
"0",
":",
"mismatched",
"[",
"\"unexpected\"",
"]",
"=",
"unexpected_list",
"if",
"len",
"(",
"missing_list",
")",
">",
"0",
":",
"mismatched",
"[",
"\"missing\"",
"]",
"=",
"missing_list",
"result",
"=",
"{",
"\"observed_value\"",
":",
"observed_value",
",",
"\"details\"",
":",
"{",
"\"mismatched\"",
":",
"mismatched",
"}",
",",
"}",
"return_success",
"=",
"{",
"\"success\"",
":",
"True",
",",
"\"result\"",
":",
"result",
",",
"}",
"return_failed",
"=",
"{",
"\"success\"",
":",
"False",
",",
"\"result\"",
":",
"result",
",",
"}",
"if",
"exact_match",
":",
"return",
"return_failed",
"else",
":",
"# Failed if there are items in the missing list (but OK to have unexpected_list)",
"if",
"len",
"(",
"missing_list",
")",
">",
"0",
":",
"return",
"return_failed",
"# Passed if there are no items in the missing list",
"else",
":",
"return",
"return_success"
] | [
589,
4
] | [
678,
41
] | python | en | ['en', 'en', 'en'] | True |
expect_table_column_count_to_be_between | (
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the number of columns to be between two values.
expect_table_column_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of columns, inclusive.
max_value (int or None): \
The maximum number of columns, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable columns \
has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable columns \
has no maximum.
See Also:
expect_table_column_count_to_equal
| Expect the number of columns to be between two values. | def expect_table_column_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to be between two values.
expect_table_column_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of columns, inclusive.
max_value (int or None): \
The maximum number of columns, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable columns \
has no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable columns \
has no maximum.
See Also:
expect_table_column_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
column_count = self.get_column_count()
if min_value is not None:
above_min = column_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = column_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": column_count}} | [
"def",
"expect_table_column_count_to_be_between",
"(",
"self",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"if",
"min_value",
"is",
"not",
"None",
":",
"if",
"not",
"float",
"(",
"min_value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"min_value must be integer\"",
")",
"if",
"max_value",
"is",
"not",
"None",
":",
"if",
"not",
"float",
"(",
"max_value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"max_value must be integer\"",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"min_value and max_value must be integers\"",
")",
"# check that min_value or max_value is set",
"# if min_value is None and max_value is None:",
"# raise Exception('Must specify either or both of min_value and max_value')",
"column_count",
"=",
"self",
".",
"get_column_count",
"(",
")",
"if",
"min_value",
"is",
"not",
"None",
":",
"above_min",
"=",
"column_count",
">=",
"min_value",
"else",
":",
"above_min",
"=",
"True",
"if",
"max_value",
"is",
"not",
"None",
":",
"below_max",
"=",
"column_count",
"<=",
"max_value",
"else",
":",
"below_max",
"=",
"True",
"outcome",
"=",
"above_min",
"and",
"below_max",
"return",
"{",
"\"success\"",
":",
"outcome",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"column_count",
"}",
"}"
] | [
683,
4
] | [
762,
79
] | python | en | ['en', 'en', 'en'] | True |
expect_table_column_count_to_equal | (
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the number of columns to equal a value.
expect_table_column_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of columns.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_column_count_to_be_between
| Expect the number of columns to equal a value. | def expect_table_column_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of columns to equal a value.
expect_table_column_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of columns.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_column_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
column_count = self.get_column_count()
return {
"success": column_count == value,
"result": {"observed_value": column_count},
} | [
"def",
"expect_table_column_count_to_equal",
"(",
"self",
",",
"value",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"if",
"not",
"float",
"(",
"value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"value must be an integer\"",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"value must be an integer\"",
")",
"column_count",
"=",
"self",
".",
"get_column_count",
"(",
")",
"return",
"{",
"\"success\"",
":",
"column_count",
"==",
"value",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"column_count",
"}",
",",
"}"
] | [
767,
4
] | [
819,
9
] | python | en | ['en', 'en', 'en'] | True |
expect_table_row_count_to_be_between | (
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the number of rows to be between two values.
expect_table_row_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of rows, inclusive.
max_value (int or None): \
The maximum number of rows, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
expect_table_row_count_to_equal
| Expect the number of rows to be between two values. | def expect_table_row_count_to_be_between(
self,
min_value=None,
max_value=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to be between two values.
expect_table_row_count_to_be_between is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Keyword Args:
min_value (int or None): \
The minimum number of rows, inclusive.
max_value (int or None): \
The maximum number of rows, inclusive.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
expect_table_row_count_to_equal
"""
try:
if min_value is not None:
if not float(min_value).is_integer():
raise ValueError("min_value must be integer")
if max_value is not None:
if not float(max_value).is_integer():
raise ValueError("max_value must be integer")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
# check that min_value or max_value is set
# if min_value is None and max_value is None:
# raise Exception('Must specify either or both of min_value and max_value')
row_count = self.get_row_count()
if min_value is not None:
above_min = row_count >= min_value
else:
above_min = True
if max_value is not None:
below_max = row_count <= max_value
else:
below_max = True
outcome = above_min and below_max
return {"success": outcome, "result": {"observed_value": row_count}} | [
"def",
"expect_table_row_count_to_be_between",
"(",
"self",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"if",
"min_value",
"is",
"not",
"None",
":",
"if",
"not",
"float",
"(",
"min_value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"min_value must be integer\"",
")",
"if",
"max_value",
"is",
"not",
"None",
":",
"if",
"not",
"float",
"(",
"max_value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"max_value must be integer\"",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"min_value and max_value must be integers\"",
")",
"if",
"min_value",
"is",
"not",
"None",
"and",
"max_value",
"is",
"not",
"None",
"and",
"min_value",
">",
"max_value",
":",
"raise",
"ValueError",
"(",
"\"min_value cannot be greater than max_value\"",
")",
"# check that min_value or max_value is set",
"# if min_value is None and max_value is None:",
"# raise Exception('Must specify either or both of min_value and max_value')",
"row_count",
"=",
"self",
".",
"get_row_count",
"(",
")",
"if",
"min_value",
"is",
"not",
"None",
":",
"above_min",
"=",
"row_count",
">=",
"min_value",
"else",
":",
"above_min",
"=",
"True",
"if",
"max_value",
"is",
"not",
"None",
":",
"below_max",
"=",
"row_count",
"<=",
"max_value",
"else",
":",
"below_max",
"=",
"True",
"outcome",
"=",
"above_min",
"and",
"below_max",
"return",
"{",
"\"success\"",
":",
"outcome",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"row_count",
"}",
"}"
] | [
824,
4
] | [
906,
76
] | python | en | ['en', 'en', 'en'] | True |
expect_table_row_count_to_equal | (
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
| Expect the number of rows to equal a value. | def expect_table_row_count_to_equal(
self,
value,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows to equal a value.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
value (int): \
The expected number of rows.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
try:
if not float(value).is_integer():
raise ValueError("value must be an integer")
except ValueError:
raise ValueError("value must be an integer")
row_count = self.get_row_count()
return {"success": row_count == value, "result": {"observed_value": row_count}} | [
"def",
"expect_table_row_count_to_equal",
"(",
"self",
",",
"value",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"try",
":",
"if",
"not",
"float",
"(",
"value",
")",
".",
"is_integer",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"value must be an integer\"",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"value must be an integer\"",
")",
"row_count",
"=",
"self",
".",
"get_row_count",
"(",
")",
"return",
"{",
"\"success\"",
":",
"row_count",
"==",
"value",
",",
"\"result\"",
":",
"{",
"\"observed_value\"",
":",
"row_count",
"}",
"}"
] | [
911,
4
] | [
960,
87
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_unique | (
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect each column value to be unique. | def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect each column value to be unique.
This expectation detects duplicates. All duplicated values are counted as exceptions.
For example, `[1, 2, 3, 3, 3]` will return `[3, 3, 3]` in `result.exceptions_list`, with \
`unexpected_percent = 60.0`.
expect_column_values_to_be_unique is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_unique",
"(",
"self",
",",
"column",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
968,
4
] | [
1018,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_not_be_null | (
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column values to not be null.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an
np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_null>`
| Expect column values to not be null. | def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to not be null.
To be counted as an exception, values must be explicitly null or missing, such as a NULL in PostgreSQL or an
np.NaN in pandas. Empty strings don't count as null unless they have been coerced to a null type.
expect_column_values_to_not_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_null>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_not_be_null",
"(",
"self",
",",
"column",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1020,
4
] | [
1073,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_null | (
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column values to be null.
expect_column_values_to_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_null>`
| Expect column values to be null. | def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be null.
expect_column_values_to_be_null is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_null \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_null>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_null",
"(",
"self",
",",
"column",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1075,
4
] | [
1125,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_of_type | (
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect a column to contain values of a specified data type.
expect_column_values_to_be_of_type is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype and provided type_ are unambiguous constraints (any dtype
except 'object' or dtype of 'object' with type_ specified as 'object').
For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type\\_ (str): \
A string representing the data type that each column should have as entries. Valid types are defined
by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_in_type_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_type_list>`
| Expect a column to contain values of a specified data type. | def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values of a specified data type.
expect_column_values_to_be_of_type is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype and provided type_ are unambiguous constraints (any dtype
except 'object' or dtype of 'object' with type_ specified as 'object').
For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type\\_ (str): \
A string representing the data type that each column should have as entries. Valid types are defined
by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_in_type_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_type_list>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_of_type",
"(",
"self",
",",
"column",
",",
"type_",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1127,
4
] | [
1192,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_in_type_list | (
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
| Expect a column to contain values from a specified type list. | def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect a column to contain values from a specified type list.
expect_column_values_to_be_in_type_list is a :func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>` for typed-column backends,
and also for PandasDataset where the column dtype provides an unambiguous constraints (any dtype except
'object'). For PandasDataset columns with dtype of 'object' expect_column_values_to_be_of_type is a
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>` and will
independently check each row's type.
Args:
column (str): \
The column name.
type_list (str): \
A list of strings representing the data type that each column should have as entries. Valid types are
defined by the current backend implementation and are dynamically loaded. For example, valid types for
PandasDataset include any numpy dtype values (such as 'int64') or native python types (such as 'int'),
whereas valid types for a SqlAlchemyDataset include types named by the current driver such as 'INTEGER'
in most SQL dialects and 'TEXT' in dialects such as postgresql. Valid types for SparkDFDataset include
'StringType', 'BooleanType' and other pyspark-defined type names.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See also:
:func:`expect_column_values_to_be_of_type \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_of_type>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_in_type_list",
"(",
"self",
",",
"column",
",",
"type_list",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1194,
4
] | [
1256,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_in_set | (
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect each column value to be in a given set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"unexpected_count": 1
"unexpected_percent": 16.66666666666666666,
"unexpected_percent_nonmissing": 16.66666666666666666,
"partial_unexpected_list": [
1
],
},
}
expect_column_values_to_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_in_set>`
| Expect each column value to be in a given set. | def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect each column value to be in a given set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_be_in_set(
"my_col",
[2,3]
)
{
"success": false
"result": {
"unexpected_count": 1
"unexpected_percent": 16.66666666666666666,
"unexpected_percent_nonmissing": 16.66666666666666666,
"partial_unexpected_list": [
1
],
},
}
expect_column_values_to_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
parse_strings_as_datetimes (boolean or None) : If True values provided in value_set will be parsed as \
datetimes before making comparisons.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_be_in_set>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_in_set",
"(",
"self",
",",
"column",
",",
"value_set",
",",
"mostly",
"=",
"None",
",",
"parse_strings_as_datetimes",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"# noinspection PyUnresolvedReferences",
"raise",
"NotImplementedError"
] | [
1264,
4
] | [
1341,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_not_be_in_set | (
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to not be in the set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_not_be_in_set(
"my_col",
[1,2]
)
{
"success": false
"result": {
"unexpected_count": 3
"unexpected_percent": 50.0,
"unexpected_percent_nonmissing": 50.0,
"partial_unexpected_list": [
1, 2, 2
],
},
}
expect_column_values_to_not_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>`
| Expect column entries to not be in the set. | def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# noinspection PyUnresolvedReferences
"""Expect column entries to not be in the set.
For example:
::
# my_df.my_col = [1,2,2,3,3,3]
>>> my_df.expect_column_values_to_not_be_in_set(
"my_col",
[1,2]
)
{
"success": false
"result": {
"unexpected_count": 3
"unexpected_percent": 50.0,
"unexpected_percent_nonmissing": 50.0,
"partial_unexpected_list": [
1, 2, 2
],
},
}
expect_column_values_to_not_be_in_set is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value_set (set-like): \
A set of objects used for comparison.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_not_be_in_set",
"(",
"self",
",",
"column",
",",
"value_set",
",",
"mostly",
"=",
"None",
",",
"parse_strings_as_datetimes",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"# noinspection PyUnresolvedReferences",
"raise",
"NotImplementedError"
] | [
1343,
4
] | [
1418,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_between | (
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=False,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
strict_min (boolean):
If True, values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, values must be strictly smaller than max_value, default=False
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked.
* If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
| Expect column entries to be between a minimum value and a maximum value (inclusive). | def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
# tolerance=1e-9,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=False,
output_strftime_format=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be between a minimum value and a maximum value (inclusive).
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
min_value (comparable type or None): The minimum value for a column entry.
max_value (comparable type or None): The maximum value for a column entry.
Keyword Args:
strict_min (boolean):
If True, values must be strictly larger than min_value, default=False
strict_max (boolean):
If True, values must be strictly smaller than max_value, default=False
allow_cross_type_comparisons (boolean or None) : If True, allow comparisons between types (e.g. integer and\
string). Otherwise, attempting such comparisons will raise an exception.
parse_strings_as_datetimes (boolean or None) : If True, parse min_value, max_value, and all non-null column\
values to datetimes before making comparisons.
output_strftime_format (str or None): \
A valid strfime format for datetime output. Only used if parse_strings_as_datetimes=True.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive unless strict_min or strict_max are set to True.
* If min_value is None, then max_value is treated as an upper bound, and there is no minimum value checked.
* If max_value is None, then min_value is treated as a lower bound, and there is no maximum value checked.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_between",
"(",
"self",
",",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"strict_min",
"=",
"False",
",",
"strict_max",
"=",
"False",
",",
"# tolerance=1e-9,",
"allow_cross_type_comparisons",
"=",
"None",
",",
"parse_strings_as_datetimes",
"=",
"False",
",",
"output_strftime_format",
"=",
"None",
",",
"mostly",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1420,
4
] | [
1496,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_increasing | (
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column values to be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_decreasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_decreasing>`
| Expect column values to be increasing. | def expect_column_values_to_be_increasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be increasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly increasing--equal values are treated as failures.
expect_column_values_to_be_increasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_decreasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_decreasing>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_increasing",
"(",
"self",
",",
"column",
",",
"strictly",
"=",
"None",
",",
"parse_strings_as_datetimes",
"=",
"False",
",",
"mostly",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1498,
4
] | [
1560,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_decreasing | (
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_increasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_increasing>`
| Expect column values to be decreasing. | def expect_column_values_to_be_decreasing(
self,
column,
strictly=None,
parse_strings_as_datetimes=False,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column values to be decreasing.
By default, this expectation only works for numeric or datetime data.
When `parse_strings_as_datetimes=True`, it can also parse strings to datetimes.
If `strictly=True`, then this expectation is only satisfied if each consecutive value
is strictly decreasing--equal values are treated as failures.
expect_column_values_to_be_decreasing is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
strictly (Boolean or None): \
If True, values must be strictly greater than previous values
parse_strings_as_datetimes (boolean or None) : \
If True, all non-null column values to datetimes before making comparisons
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_increasing \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_increasing>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_decreasing",
"(",
"self",
",",
"column",
",",
"strictly",
"=",
"None",
",",
"parse_strings_as_datetimes",
"=",
"False",
",",
"mostly",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1562,
4
] | [
1624,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_value_lengths_to_be_between | (
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
:func:`expect_column_value_lengths_to_equal \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_equal>`
| Expect column entries to be strings with length between a minimum value and a maximum value (inclusive). | def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
row_condition=None,
condition_parser=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length between a minimum value and a maximum value (inclusive).
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_value_lengths_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
min_value (int or None): \
The minimum value for a column entry length.
max_value (int or None): \
The maximum value for a column entry length.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
* min_value and max_value are both inclusive.
* If min_value is None, then max_value is treated as an upper bound, and the number of acceptable rows has \
no minimum.
* If max_value is None, then min_value is treated as a lower bound, and the number of acceptable rows has \
no maximum.
See Also:
:func:`expect_column_value_lengths_to_equal \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_equal>`
"""
raise NotImplementedError | [
"def",
"expect_column_value_lengths_to_be_between",
"(",
"self",
",",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"mostly",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1632,
4
] | [
1697,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_value_lengths_to_equal | (
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
| Expect column entries to be strings with length equal to the provided value. | def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_value_lengths_to_be_between>`
"""
raise NotImplementedError | [
"def",
"expect_column_value_lengths_to_equal",
"(",
"self",
",",
"column",
",",
"value",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1699,
4
] | [
1754,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_match_regex | (
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
| Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog". | def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that match a given regular expression. Valid matches can be found \
anywhere in the string, for example "[at]+" will identify the following strings as expected: "cat", "hat", \
"aa", "a", and "t", and the following strings as unexpected: "fish", "dog".
expect_column_values_to_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_match_regex",
"(",
"self",
",",
"column",
",",
"regex",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1756,
4
] | [
1814,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_not_match_regex | (
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat".
expect_column_values_to_not_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
| Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat". | def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings that do NOT match a given regular expression. The regex must not match \
any portion of the provided string. For example, "[at]+" would identify the following strings as expected: \
"fish", "dog", and the following as unexpected: "cat", "hat".
expect_column_values_to_not_match_regex is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex (str): \
The regular expression the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_not_match_regex",
"(",
"self",
",",
"column",
",",
"regex",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1816,
4
] | [
1874,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_match_regex_list | (
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the column entries to be strings that can be matched to either any of or all of a list of regular
expressions. Matches can be anywhere in the string.
expect_column_values_to_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should match
Keyword Args:
match_on= (string): \
"any" or "all".
Use "any" if the value should match at least one regular expression in the list.
Use "all" if it should match each regular expression in the list.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
| Expect the column entries to be strings that can be matched to either any of or all of a list of regular
expressions. Matches can be anywhere in the string. | def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that can be matched to either any of or all of a list of regular
expressions. Matches can be anywhere in the string.
expect_column_values_to_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should match
Keyword Args:
match_on= (string): \
"any" or "all".
Use "any" if the value should match at least one regular expression in the list.
Use "all" if it should match each regular expression in the list.
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_not_match_regex \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_not_match_regex>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_match_regex_list",
"(",
"self",
",",
"column",
",",
"regex_list",
",",
"match_on",
"=",
"\"any\"",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1876,
4
] | [
1938,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_not_match_regex_list | (
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can
be anywhere in the string.
expect_column_values_to_not_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should not match
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
| Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can
be anywhere in the string. | def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can
be anywhere in the string.
expect_column_values_to_not_match_regex_list is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
regex_list (list): \
The list of regular expressions which the column entries should not match
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. \
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex_list \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_regex_list>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_not_match_regex_list",
"(",
"self",
",",
"column",
",",
"regex_list",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
1940,
4
] | [
1994,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_match_strftime_format | (
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be strings representing a date or time with a given format.
expect_column_values_to_match_strftime_format is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
strftime_format (str): \
A strftime format string to use for matching
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect column entries to be strings representing a date or time with a given format. | def expect_column_values_to_match_strftime_format(
self,
column,
strftime_format,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be strings representing a date or time with a given format.
expect_column_values_to_match_strftime_format is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
strftime_format (str): \
A strftime format string to use for matching
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_match_strftime_format",
"(",
"self",
",",
"column",
",",
"strftime_format",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
2002,
4
] | [
2051,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_dateutil_parseable | (
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be parsable using dateutil.
expect_column_values_to_be_dateutil_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
| Expect column entries to be parsable using dateutil. | def expect_column_values_to_be_dateutil_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be parsable using dateutil.
expect_column_values_to_be_dateutil_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_dateutil_parseable",
"(",
"self",
",",
"column",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
2053,
4
] | [
2099,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_be_json_parseable | (
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be data written in JavaScript Object Notation.
expect_column_values_to_be_json_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_json_schema \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_json_schema>`
| Expect column entries to be data written in JavaScript Object Notation. | def expect_column_values_to_be_json_parseable(
self,
column,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be data written in JavaScript Object Notation.
expect_column_values_to_be_json_parseable is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_json_schema \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_match_json_schema>`
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_be_json_parseable",
"(",
"self",
",",
"column",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
2101,
4
] | [
2151,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_values_to_match_json_schema | (
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) | Expect column entries to be JSON objects matching a given JSON schema.
expect_column_values_to_match_json_schema is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_json_parseable \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_json_parseable>`
The `JSON-schema docs <http://json-schema.org/>`_.
| Expect column entries to be JSON objects matching a given JSON schema. | def expect_column_values_to_match_json_schema(
self,
column,
json_schema,
mostly=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect column entries to be JSON objects matching a given JSON schema.
expect_column_values_to_match_json_schema is a \
:func:`column_map_expectation <great_expectations.dataset.dataset.MetaDataset.column_map_expectation>`.
Args:
column (str): \
The column name.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_be_json_parseable \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_json_parseable>`
The `JSON-schema docs <http://json-schema.org/>`_.
"""
raise NotImplementedError | [
"def",
"expect_column_values_to_match_json_schema",
"(",
"self",
",",
"column",
",",
"json_schema",
",",
"mostly",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
2153,
4
] | [
2206,
33
] | python | en | ['en', 'en', 'en'] | True |
expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than | (
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
) |
Expect the column values to be distributed similarly to a scipy distribution. \
This expectation compares the provided column to the specified continuous distribution with a parametric \
Kolmogorov-Smirnov test. The K-S test compares the provided column to the cumulative density function (CDF) of \
the specified scipy distribution. If you don't know the desired distribution shape parameters, use the \
`ge.dataset.util.infer_distribution_parameters()` utility function to estimate them.
It returns 'success'=True if the p-value from the K-S test is greater than or equal to the provided p-value.
``expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
distribution (str): \
The scipy distribution name. See: `<https://docs.scipy.org/doc/scipy/reference/stats.html>`_ Currently
supported distributions are listed in the Notes section below.
p_value (float): \
The threshold p-value for a passing test. Default is 0.05.
params (dict or list) : \
A dictionary or positional list of shape parameters that describe the distribution you want to test the\
data against. Include key values specific to the distribution from the appropriate scipy \
distribution CDF function. 'loc' and 'scale' are used as translational parameters.\
See `<https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions>`_
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"details":
"expected_params" (dict): The specified or inferred parameters of the distribution to test \
against
"ks_results" (dict): The raw result of stats.kstest()
}
* The Kolmogorov-Smirnov test's null hypothesis is that the column is similar to the provided distribution.
* Supported scipy distributions:
* norm
* beta
* gamma
* uniform
* chi2
* expon
|
Expect the column values to be distributed similarly to a scipy distribution. \ | def expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than(
self,
column,
distribution,
p_value=0.05,
params=None,
result_format=None,
row_condition=None,
condition_parser=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""
Expect the column values to be distributed similarly to a scipy distribution. \
This expectation compares the provided column to the specified continuous distribution with a parametric \
Kolmogorov-Smirnov test. The K-S test compares the provided column to the cumulative density function (CDF) of \
the specified scipy distribution. If you don't know the desired distribution shape parameters, use the \
`ge.dataset.util.infer_distribution_parameters()` utility function to estimate them.
It returns 'success'=True if the p-value from the K-S test is greater than or equal to the provided p-value.
``expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than`` is a \
:func:`column_aggregate_expectation \
<great_expectations.dataset.dataset.MetaDataset.column_aggregate_expectation>`.
Args:
column (str): \
The column name.
distribution (str): \
The scipy distribution name. See: `<https://docs.scipy.org/doc/scipy/reference/stats.html>`_ Currently
supported distributions are listed in the Notes section below.
p_value (float): \
The threshold p-value for a passing test. Default is 0.05.
params (dict or list) : \
A dictionary or positional list of shape parameters that describe the distribution you want to test the\
data against. Include key values specific to the distribution from the appropriate scipy \
distribution CDF function. 'loc' and 'scale' are used as translational parameters.\
See `<https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions>`_
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Notes:
These fields in the result object are customized for this expectation:
::
{
"details":
"expected_params" (dict): The specified or inferred parameters of the distribution to test \
against
"ks_results" (dict): The raw result of stats.kstest()
}
* The Kolmogorov-Smirnov test's null hypothesis is that the column is similar to the provided distribution.
* Supported scipy distributions:
* norm
* beta
* gamma
* uniform
* chi2
* expon
"""
raise NotImplementedError | [
"def",
"expect_column_parameterized_distribution_ks_test_p_value_to_be_greater_than",
"(",
"self",
",",
"column",
",",
"distribution",
",",
"p_value",
"=",
"0.05",
",",
"params",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"include_config",
"=",
"True",
",",
"catch_exceptions",
"=",
"None",
",",
"meta",
"=",
"None",
",",
")",
":",
"raise",
"NotImplementedError"
] | [
2214,
4
] | [
2297,
33
] | python | en | ['en', 'error', 'th'] | False |
MetaDataset.column_map_expectation | (cls, func) | Constructs an expectation using column-map semantics.
The column_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per-row basis.
Args:
func (function): \
The function implementing a row-wise expectation. The function should take a column of data and \
return an equally-long column of boolean values corresponding to the truthiness of the \
underlying expectation.
Notes:
column_map_expectation intercepts and takes action based on the following parameters:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
column_map_expectation *excludes null values* from being passed to the function
Depending on the `result_format` selected, column_map_expectation can additional data to a return object, \
including `element_count`, `nonnull_values`, `nonnull_count`, `success_count`, `unexpected_list`, and \
`unexpected_index_list`. \
See :func:`_format_map_output <great_expectations.data_asset.dataset.Dataset._format_map_output>`
See also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>` \
for an example of a column_map_expectation
| Constructs an expectation using column-map semantics. | def column_map_expectation(cls, func):
"""Constructs an expectation using column-map semantics.
The column_map_expectation decorator handles boilerplate issues surrounding the common pattern of evaluating
truthiness of some condition on a per-row basis.
Args:
func (function): \
The function implementing a row-wise expectation. The function should take a column of data and \
return an equally-long column of boolean values corresponding to the truthiness of the \
underlying expectation.
Notes:
column_map_expectation intercepts and takes action based on the following parameters:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
column_map_expectation *excludes null values* from being passed to the function
Depending on the `result_format` selected, column_map_expectation can additional data to a return object, \
including `element_count`, `nonnull_values`, `nonnull_count`, `success_count`, `unexpected_list`, and \
`unexpected_index_list`. \
See :func:`_format_map_output <great_expectations.data_asset.dataset.Dataset._format_map_output>`
See also:
:func:`expect_column_values_to_be_in_set \
<great_expectations.dataset.dataset.Dataset.expect_column_values_to_be_in_set>` \
for an example of a column_map_expectation
"""
raise NotImplementedError | [
"def",
"column_map_expectation",
"(",
"cls",
",",
"func",
")",
":",
"raise",
"NotImplementedError"
] | [
40,
4
] | [
70,
33
] | python | en | ['en', 'lb', 'en'] | True |
MetaDataset.column_aggregate_expectation | (cls, func) | Constructs an expectation using column-aggregate semantics.
The column_aggregate_expectation decorator handles boilerplate issues surrounding the common pattern of \
evaluating truthiness of some condition on an aggregated-column basis.
Args:
func (function): \
The function implementing an expectation using an aggregate property of a column. \
The function should take a column of data and return the aggregate value it computes.
Notes:
column_aggregate_expectation *excludes null values* from being passed to the function
See also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>` \
for an example of a column_aggregate_expectation
| Constructs an expectation using column-aggregate semantics. | def column_aggregate_expectation(cls, func):
"""Constructs an expectation using column-aggregate semantics.
The column_aggregate_expectation decorator handles boilerplate issues surrounding the common pattern of \
evaluating truthiness of some condition on an aggregated-column basis.
Args:
func (function): \
The function implementing an expectation using an aggregate property of a column. \
The function should take a column of data and return the aggregate value it computes.
Notes:
column_aggregate_expectation *excludes null values* from being passed to the function
See also:
:func:`expect_column_mean_to_be_between \
<great_expectations.dataset.dataset.Dataset.expect_column_mean_to_be_between>` \
for an example of a column_aggregate_expectation
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self,
column=None,
result_format=None,
row_condition=None,
condition_parser=None,
*args,
**kwargs
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
# Retain support for string-only output formats:
result_format = parse_result_format(result_format)
if row_condition and self._supports_row_condition:
self = self.query(row_condition, parser=condition_parser).reset_index(
drop=True
)
element_count = self.get_row_count()
if kwargs.get("column"):
column = kwargs.get("column")
if column is not None:
# We test whether the dataset is a sqlalchemy_dataset by seeing if it has an engine. We don't test
# whether it is actually an instance to avoid circular dependency issues.
if (
hasattr(self, "engine")
and self.batch_kwargs.get("use_quoted_name")
and quoted_name
):
column = quoted_name(column, quote=True)
nonnull_count = self.get_column_nonnull_count(
kwargs.get("column", column)
)
# column is treated specially as a positional argument in most expectations
args = tuple((column, *args))
elif kwargs.get("column_A") and kwargs.get("column_B"):
try:
nonnull_count = (
self[kwargs.get("column_A")].notnull()
& self[kwargs.get("column_B")].notnull()
).sum()
except TypeError:
nonnull_count = None
else:
raise ValueError(
"The column_aggregate_expectation wrapper requires either column or "
"both column_A and column_B as input."
)
if nonnull_count:
null_count = element_count - nonnull_count
else:
null_count = None
evaluation_result = func(self, *args, **kwargs)
if "success" not in evaluation_result:
raise ValueError(
"Column aggregate expectation failed to return required information: success"
)
if ("result" not in evaluation_result) or (
"observed_value" not in evaluation_result["result"]
):
raise ValueError(
"Column aggregate expectation failed to return required information: observed_value"
)
return_obj = {"success": bool(evaluation_result["success"])}
if result_format["result_format"] == "BOOLEAN_ONLY":
return return_obj
return_obj["result"] = {
"observed_value": evaluation_result["result"]["observed_value"],
"element_count": element_count,
}
if null_count:
return_obj["result"]["missing_count"] = null_count
if element_count > 0:
return_obj["result"]["missing_percent"] = (
null_count * 100.0 / element_count
)
else:
return_obj["result"]["missing_percent"] = None
else:
return_obj["result"]["missing_count"] = None
return_obj["result"]["missing_percent"] = None
if result_format["result_format"] == "BASIC":
return return_obj
if "details" in evaluation_result["result"]:
return_obj["result"]["details"] = evaluation_result["result"]["details"]
if result_format["result_format"] in ["SUMMARY", "COMPLETE"]:
return return_obj
raise ValueError(
"Unknown result_format %s." % result_format["result_format"]
)
return inner_wrapper | [
"def",
"column_aggregate_expectation",
"(",
"cls",
",",
"func",
")",
":",
"argspec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"func",
")",
"[",
"0",
"]",
"[",
"1",
":",
"]",
"@",
"cls",
".",
"expectation",
"(",
"argspec",
")",
"@",
"wraps",
"(",
"func",
")",
"def",
"inner_wrapper",
"(",
"self",
",",
"column",
"=",
"None",
",",
"result_format",
"=",
"None",
",",
"row_condition",
"=",
"None",
",",
"condition_parser",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"result_format",
"is",
"None",
":",
"result_format",
"=",
"self",
".",
"default_expectation_args",
"[",
"\"result_format\"",
"]",
"# Retain support for string-only output formats:",
"result_format",
"=",
"parse_result_format",
"(",
"result_format",
")",
"if",
"row_condition",
"and",
"self",
".",
"_supports_row_condition",
":",
"self",
"=",
"self",
".",
"query",
"(",
"row_condition",
",",
"parser",
"=",
"condition_parser",
")",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"element_count",
"=",
"self",
".",
"get_row_count",
"(",
")",
"if",
"kwargs",
".",
"get",
"(",
"\"column\"",
")",
":",
"column",
"=",
"kwargs",
".",
"get",
"(",
"\"column\"",
")",
"if",
"column",
"is",
"not",
"None",
":",
"# We test whether the dataset is a sqlalchemy_dataset by seeing if it has an engine. We don't test",
"# whether it is actually an instance to avoid circular dependency issues.",
"if",
"(",
"hasattr",
"(",
"self",
",",
"\"engine\"",
")",
"and",
"self",
".",
"batch_kwargs",
".",
"get",
"(",
"\"use_quoted_name\"",
")",
"and",
"quoted_name",
")",
":",
"column",
"=",
"quoted_name",
"(",
"column",
",",
"quote",
"=",
"True",
")",
"nonnull_count",
"=",
"self",
".",
"get_column_nonnull_count",
"(",
"kwargs",
".",
"get",
"(",
"\"column\"",
",",
"column",
")",
")",
"# column is treated specially as a positional argument in most expectations",
"args",
"=",
"tuple",
"(",
"(",
"column",
",",
"*",
"args",
")",
")",
"elif",
"kwargs",
".",
"get",
"(",
"\"column_A\"",
")",
"and",
"kwargs",
".",
"get",
"(",
"\"column_B\"",
")",
":",
"try",
":",
"nonnull_count",
"=",
"(",
"self",
"[",
"kwargs",
".",
"get",
"(",
"\"column_A\"",
")",
"]",
".",
"notnull",
"(",
")",
"&",
"self",
"[",
"kwargs",
".",
"get",
"(",
"\"column_B\"",
")",
"]",
".",
"notnull",
"(",
")",
")",
".",
"sum",
"(",
")",
"except",
"TypeError",
":",
"nonnull_count",
"=",
"None",
"else",
":",
"raise",
"ValueError",
"(",
"\"The column_aggregate_expectation wrapper requires either column or \"",
"\"both column_A and column_B as input.\"",
")",
"if",
"nonnull_count",
":",
"null_count",
"=",
"element_count",
"-",
"nonnull_count",
"else",
":",
"null_count",
"=",
"None",
"evaluation_result",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"\"success\"",
"not",
"in",
"evaluation_result",
":",
"raise",
"ValueError",
"(",
"\"Column aggregate expectation failed to return required information: success\"",
")",
"if",
"(",
"\"result\"",
"not",
"in",
"evaluation_result",
")",
"or",
"(",
"\"observed_value\"",
"not",
"in",
"evaluation_result",
"[",
"\"result\"",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"Column aggregate expectation failed to return required information: observed_value\"",
")",
"return_obj",
"=",
"{",
"\"success\"",
":",
"bool",
"(",
"evaluation_result",
"[",
"\"success\"",
"]",
")",
"}",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"BOOLEAN_ONLY\"",
":",
"return",
"return_obj",
"return_obj",
"[",
"\"result\"",
"]",
"=",
"{",
"\"observed_value\"",
":",
"evaluation_result",
"[",
"\"result\"",
"]",
"[",
"\"observed_value\"",
"]",
",",
"\"element_count\"",
":",
"element_count",
",",
"}",
"if",
"null_count",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_count\"",
"]",
"=",
"null_count",
"if",
"element_count",
">",
"0",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_percent\"",
"]",
"=",
"(",
"null_count",
"*",
"100.0",
"/",
"element_count",
")",
"else",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_percent\"",
"]",
"=",
"None",
"else",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_count\"",
"]",
"=",
"None",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"missing_percent\"",
"]",
"=",
"None",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"==",
"\"BASIC\"",
":",
"return",
"return_obj",
"if",
"\"details\"",
"in",
"evaluation_result",
"[",
"\"result\"",
"]",
":",
"return_obj",
"[",
"\"result\"",
"]",
"[",
"\"details\"",
"]",
"=",
"evaluation_result",
"[",
"\"result\"",
"]",
"[",
"\"details\"",
"]",
"if",
"result_format",
"[",
"\"result_format\"",
"]",
"in",
"[",
"\"SUMMARY\"",
",",
"\"COMPLETE\"",
"]",
":",
"return",
"return_obj",
"raise",
"ValueError",
"(",
"\"Unknown result_format %s.\"",
"%",
"result_format",
"[",
"\"result_format\"",
"]",
")",
"return",
"inner_wrapper"
] | [
73,
4
] | [
203,
28
] | python | en | ['en', 'en', 'en'] | True |
convert_error_to_string | (err, frames_to_skip_from_tail=None) |
:param frames_to_skip_from_tail: may be int or list of str. In latter case frames with these strings are skipped
| def convert_error_to_string(err, frames_to_skip_from_tail=None):
"""
:param frames_to_skip_from_tail: may be int or list of str. In latter case frames with these strings are skipped
"""
try:
if hasattr(err, "type") and hasattr(err, "value") and hasattr(err, "tb"):
exctype, value, tb = err.type, err.value, err.tb
else:
exctype, value, tb = err
trace = traceback.format_exception(exctype, value, tb)
if frames_to_skip_from_tail:
if isinstance(frames_to_skip_from_tail, list):
new_trace = []
for line in trace:
if len([w for w in frames_to_skip_from_tail if w in line]) > 0:
continue
else:
new_trace += line
trace = new_trace
if isinstance(frames_to_skip_from_tail, int):
trace = trace[:-frames_to_skip_from_tail]
return ''.join(trace)
except Exception:
tb = traceback.format_exc()
return "*FAILED TO GET TRACEBACK*: " + tb | [
"def",
"convert_error_to_string",
"(",
"err",
",",
"frames_to_skip_from_tail",
"=",
"None",
")",
":",
"try",
":",
"if",
"hasattr",
"(",
"err",
",",
"\"type\"",
")",
"and",
"hasattr",
"(",
"err",
",",
"\"value\"",
")",
"and",
"hasattr",
"(",
"err",
",",
"\"tb\"",
")",
":",
"exctype",
",",
"value",
",",
"tb",
"=",
"err",
".",
"type",
",",
"err",
".",
"value",
",",
"err",
".",
"tb",
"else",
":",
"exctype",
",",
"value",
",",
"tb",
"=",
"err",
"trace",
"=",
"traceback",
".",
"format_exception",
"(",
"exctype",
",",
"value",
",",
"tb",
")",
"if",
"frames_to_skip_from_tail",
":",
"if",
"isinstance",
"(",
"frames_to_skip_from_tail",
",",
"list",
")",
":",
"new_trace",
"=",
"[",
"]",
"for",
"line",
"in",
"trace",
":",
"if",
"len",
"(",
"[",
"w",
"for",
"w",
"in",
"frames_to_skip_from_tail",
"if",
"w",
"in",
"line",
"]",
")",
">",
"0",
":",
"continue",
"else",
":",
"new_trace",
"+=",
"line",
"trace",
"=",
"new_trace",
"if",
"isinstance",
"(",
"frames_to_skip_from_tail",
",",
"int",
")",
":",
"trace",
"=",
"trace",
"[",
":",
"-",
"frames_to_skip_from_tail",
"]",
"return",
"''",
".",
"join",
"(",
"trace",
")",
"except",
"Exception",
":",
"tb",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"return",
"\"*FAILED TO GET TRACEBACK*: \"",
"+",
"tb"
] | [
125,
0
] | [
150,
49
] | python | en | ['en', 'error', 'th'] | False |
|
load_expectation_suite | (
data_context: DataContext,
expectation_suite_name: str,
usage_event: str,
suppress_usage_message: Optional[bool] = False,
create_if_not_exist: Optional[bool] = True,
) |
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param data_context:
:param expectation_suite_name:
:param usage_event:
:param suppress_usage_message:
:param create_if_not_exist:
|
Load an expectation suite from a given context. | def load_expectation_suite(
data_context: DataContext,
expectation_suite_name: str,
usage_event: str,
suppress_usage_message: Optional[bool] = False,
create_if_not_exist: Optional[bool] = True,
) -> Optional[ExpectationSuite]:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param data_context:
:param expectation_suite_name:
:param usage_event:
:param suppress_usage_message:
:param create_if_not_exist:
"""
if expectation_suite_name.endswith(".json"):
expectation_suite_name = expectation_suite_name[:-5]
suite: Optional[ExpectationSuite]
try:
suite = data_context.get_expectation_suite(
expectation_suite_name=expectation_suite_name
)
return suite
except ge_exceptions.DataContextError:
if create_if_not_exist:
suite = data_context.create_expectation_suite(
expectation_suite_name=expectation_suite_name
)
return suite
else:
suite = None
exit_with_failure_message_and_stats(
data_context=data_context,
usage_event=usage_event,
suppress_usage_message=suppress_usage_message,
message=f"<red>Could not find a suite named `{expectation_suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
return suite | [
"def",
"load_expectation_suite",
"(",
"data_context",
":",
"DataContext",
",",
"expectation_suite_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
"suppress_usage_message",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"create_if_not_exist",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
")",
"->",
"Optional",
"[",
"ExpectationSuite",
"]",
":",
"if",
"expectation_suite_name",
".",
"endswith",
"(",
"\".json\"",
")",
":",
"expectation_suite_name",
"=",
"expectation_suite_name",
"[",
":",
"-",
"5",
"]",
"suite",
":",
"Optional",
"[",
"ExpectationSuite",
"]",
"try",
":",
"suite",
"=",
"data_context",
".",
"get_expectation_suite",
"(",
"expectation_suite_name",
"=",
"expectation_suite_name",
")",
"return",
"suite",
"except",
"ge_exceptions",
".",
"DataContextError",
":",
"if",
"create_if_not_exist",
":",
"suite",
"=",
"data_context",
".",
"create_expectation_suite",
"(",
"expectation_suite_name",
"=",
"expectation_suite_name",
")",
"return",
"suite",
"else",
":",
"suite",
"=",
"None",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"data_context",
",",
"usage_event",
"=",
"usage_event",
",",
"suppress_usage_message",
"=",
"suppress_usage_message",
",",
"message",
"=",
"f\"<red>Could not find a suite named `{expectation_suite_name}`.</red> Please check \"",
"\"the name by running `great_expectations suite list` and try again.\"",
",",
")",
"return",
"suite"
] | [
208,
0
] | [
249,
16
] | python | en | ['en', 'error', 'th'] | False |
delete_checkpoint | (
context: DataContext,
checkpoint_name: str,
usage_event: str,
assume_yes: bool,
) | Delete a Checkpoint or raise helpful errors. | Delete a Checkpoint or raise helpful errors. | def delete_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
assume_yes: bool,
):
"""Delete a Checkpoint or raise helpful errors."""
validate_checkpoint(
context=context,
checkpoint_name=checkpoint_name,
usage_event=usage_event,
)
confirm_prompt: str = f"""\nAre you sure you want to delete the Checkpoint "{checkpoint_name}" (this action is irreversible)?"
"""
continuation_message: str = (
f'The Checkpoint "{checkpoint_name}" was not deleted. Exiting now.'
)
if not assume_yes:
confirm_proceed_or_exit(
confirm_prompt=confirm_prompt,
continuation_message=continuation_message,
data_context=context,
usage_stats_event=usage_event,
)
context.delete_checkpoint(name=checkpoint_name) | [
"def",
"delete_checkpoint",
"(",
"context",
":",
"DataContext",
",",
"checkpoint_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
"assume_yes",
":",
"bool",
",",
")",
":",
"validate_checkpoint",
"(",
"context",
"=",
"context",
",",
"checkpoint_name",
"=",
"checkpoint_name",
",",
"usage_event",
"=",
"usage_event",
",",
")",
"confirm_prompt",
":",
"str",
"=",
"f\"\"\"\\nAre you sure you want to delete the Checkpoint \"{checkpoint_name}\" (this action is irreversible)?\"\n\"\"\"",
"continuation_message",
":",
"str",
"=",
"(",
"f'The Checkpoint \"{checkpoint_name}\" was not deleted. Exiting now.'",
")",
"if",
"not",
"assume_yes",
":",
"confirm_proceed_or_exit",
"(",
"confirm_prompt",
"=",
"confirm_prompt",
",",
"continuation_message",
"=",
"continuation_message",
",",
"data_context",
"=",
"context",
",",
"usage_stats_event",
"=",
"usage_event",
",",
")",
"context",
".",
"delete_checkpoint",
"(",
"name",
"=",
"checkpoint_name",
")"
] | [
265,
0
] | [
289,
51
] | python | en | ['en', 'en', 'en'] | True |
run_checkpoint | (
context: DataContext,
checkpoint_name: str,
usage_event: str,
) | Run a Checkpoint or raise helpful errors. | Run a Checkpoint or raise helpful errors. | def run_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> CheckpointResult:
"""Run a Checkpoint or raise helpful errors."""
failure_message: str = "Exception occurred while running Checkpoint."
validate_checkpoint(
context=context,
checkpoint_name=checkpoint_name,
usage_event=usage_event,
failure_message=failure_message,
)
try:
result: CheckpointResult = context.run_checkpoint(
checkpoint_name=checkpoint_name
)
return result
except ge_exceptions.CheckpointError as e:
cli_message(string=failure_message)
exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event,
message=f"<red>{e}.</red>",
) | [
"def",
"run_checkpoint",
"(",
"context",
":",
"DataContext",
",",
"checkpoint_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
")",
"->",
"CheckpointResult",
":",
"failure_message",
":",
"str",
"=",
"\"Exception occurred while running Checkpoint.\"",
"validate_checkpoint",
"(",
"context",
"=",
"context",
",",
"checkpoint_name",
"=",
"checkpoint_name",
",",
"usage_event",
"=",
"usage_event",
",",
"failure_message",
"=",
"failure_message",
",",
")",
"try",
":",
"result",
":",
"CheckpointResult",
"=",
"context",
".",
"run_checkpoint",
"(",
"checkpoint_name",
"=",
"checkpoint_name",
")",
"return",
"result",
"except",
"ge_exceptions",
".",
"CheckpointError",
"as",
"e",
":",
"cli_message",
"(",
"string",
"=",
"failure_message",
")",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event",
",",
"message",
"=",
"f\"<red>{e}.</red>\"",
",",
")"
] | [
292,
0
] | [
316,
9
] | python | en | ['en', 'gd', 'en'] | True |
load_checkpoint | (
context: DataContext,
checkpoint_name: str,
usage_event: str,
) | Load a Checkpoint or raise helpful errors. | Load a Checkpoint or raise helpful errors. | def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a Checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
data_context=context,
usage_event=usage_event,
message=f"""\
<red>Could not find Checkpoint `{checkpoint_name}` (or its configuration is invalid).</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your Checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new Checkpoint""",
) | [
"def",
"load_checkpoint",
"(",
"context",
":",
"DataContext",
",",
"checkpoint_name",
":",
"str",
",",
"usage_event",
":",
"str",
",",
")",
"->",
"Union",
"[",
"Checkpoint",
",",
"LegacyCheckpoint",
"]",
":",
"try",
":",
"checkpoint",
":",
"Union",
"[",
"Checkpoint",
",",
"LegacyCheckpoint",
"]",
"=",
"context",
".",
"get_checkpoint",
"(",
"name",
"=",
"checkpoint_name",
")",
"return",
"checkpoint",
"except",
"(",
"ge_exceptions",
".",
"CheckpointNotFoundError",
",",
"ge_exceptions",
".",
"InvalidCheckpointConfigError",
",",
")",
":",
"exit_with_failure_message_and_stats",
"(",
"data_context",
"=",
"context",
",",
"usage_event",
"=",
"usage_event",
",",
"message",
"=",
"f\"\"\"\\\n<red>Could not find Checkpoint `{checkpoint_name}` (or its configuration is invalid).</red> Try running:\n - `<green>great_expectations checkpoint list</green>` to verify your Checkpoint exists\n - `<green>great_expectations checkpoint new</green>` to configure a new Checkpoint\"\"\"",
",",
")"
] | [
340,
0
] | [
362,
9
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.