Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
find_container_traits | (cls_or_string) |
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
|
Find the container traits type of a declaration. | def find_container_traits(cls_or_string):
"""
Find the container traits type of a declaration.
Args:
cls_or_string (str | declarations.declaration_t): a string
Returns:
declarations.container_traits: a container traits
"""
if utils.is_str(cls_or_string):
if not templates.is_instantiation(cls_or_string):
return None
name = templates.name(cls_or_string)
if name.startswith('std::'):
name = name[len('std::'):]
if name.startswith('std::tr1::'):
name = name[len('std::tr1::'):]
for cls_traits in all_container_traits:
if cls_traits.name() == name:
return cls_traits
else:
if isinstance(cls_or_string, class_declaration.class_types):
# Look in the cache.
if cls_or_string.cache.container_traits is not None:
return cls_or_string.cache.container_traits
# Look for a container traits
for cls_traits in all_container_traits:
if cls_traits.is_my_case(cls_or_string):
# Store in the cache
if isinstance(cls_or_string, class_declaration.class_types):
cls_or_string.cache.container_traits = cls_traits
return cls_traits | [
"def",
"find_container_traits",
"(",
"cls_or_string",
")",
":",
"if",
"utils",
".",
"is_str",
"(",
"cls_or_string",
")",
":",
"if",
"not",
"templates",
".",
"is_instantiation",
"(",
"cls_or_string",
")",
":",
"return",
"None",
"name",
"=",
"templates",
".",
"name",
"(",
"cls_or_string",
")",
"if",
"name",
".",
"startswith",
"(",
"'std::'",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"'std::'",
")",
":",
"]",
"if",
"name",
".",
"startswith",
"(",
"'std::tr1::'",
")",
":",
"name",
"=",
"name",
"[",
"len",
"(",
"'std::tr1::'",
")",
":",
"]",
"for",
"cls_traits",
"in",
"all_container_traits",
":",
"if",
"cls_traits",
".",
"name",
"(",
")",
"==",
"name",
":",
"return",
"cls_traits",
"else",
":",
"if",
"isinstance",
"(",
"cls_or_string",
",",
"class_declaration",
".",
"class_types",
")",
":",
"# Look in the cache.",
"if",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"is",
"not",
"None",
":",
"return",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"# Look for a container traits",
"for",
"cls_traits",
"in",
"all_container_traits",
":",
"if",
"cls_traits",
".",
"is_my_case",
"(",
"cls_or_string",
")",
":",
"# Store in the cache",
"if",
"isinstance",
"(",
"cls_or_string",
",",
"class_declaration",
".",
"class_types",
")",
":",
"cls_or_string",
".",
"cache",
".",
"container_traits",
"=",
"cls_traits",
"return",
"cls_traits"
] | [
697,
0
] | [
732,
33
] | python | en | ['en', 'error', 'th'] | False |
container_traits_impl_t.__init__ | (
self,
container_name,
element_type_index,
element_type_typedef,
eraser,
key_type_index=None,
key_type_typedef=None,
unordered_maps_and_sets=False) |
:param container_name: std container name
:param element_type_index: position of value\\mapped type within
template arguments list
:param element_type_typedef: class typedef to the value\\mapped type
:param key_type_index: position of key type within template arguments
list
:param key_type_typedef: class typedef to the key type
|
:param container_name: std container name
:param element_type_index: position of value\\mapped type within
template arguments list
:param element_type_typedef: class typedef to the value\\mapped type
:param key_type_index: position of key type within template arguments
list
:param key_type_typedef: class typedef to the key type
| def __init__(
self,
container_name,
element_type_index,
element_type_typedef,
eraser,
key_type_index=None,
key_type_typedef=None,
unordered_maps_and_sets=False):
"""
:param container_name: std container name
:param element_type_index: position of value\\mapped type within
template arguments list
:param element_type_typedef: class typedef to the value\\mapped type
:param key_type_index: position of key type within template arguments
list
:param key_type_typedef: class typedef to the key type
"""
self._name = container_name
self.element_type_index = element_type_index
self.element_type_typedef = element_type_typedef
self.key_type_index = key_type_index
self.key_type_typedef = key_type_typedef
self.unordered_maps_and_sets = unordered_maps_and_sets
# Get the method from defaults_eraser using it's name
self.remove_defaults_impl = getattr(
defaults_eraser(unordered_maps_and_sets), eraser) | [
"def",
"__init__",
"(",
"self",
",",
"container_name",
",",
"element_type_index",
",",
"element_type_typedef",
",",
"eraser",
",",
"key_type_index",
"=",
"None",
",",
"key_type_typedef",
"=",
"None",
",",
"unordered_maps_and_sets",
"=",
"False",
")",
":",
"self",
".",
"_name",
"=",
"container_name",
"self",
".",
"element_type_index",
"=",
"element_type_index",
"self",
".",
"element_type_typedef",
"=",
"element_type_typedef",
"self",
".",
"key_type_index",
"=",
"key_type_index",
"self",
".",
"key_type_typedef",
"=",
"key_type_typedef",
"self",
".",
"unordered_maps_and_sets",
"=",
"unordered_maps_and_sets",
"# Get the method from defaults_eraser using it's name",
"self",
".",
"remove_defaults_impl",
"=",
"getattr",
"(",
"defaults_eraser",
"(",
"unordered_maps_and_sets",
")",
",",
"eraser",
")"
] | [
342,
4
] | [
369,
61
] | python | en | ['en', 'error', 'th'] | False |
container_traits_impl_t.get_container_or_none | (self, type_) |
Returns reference to the class declaration or None.
|
Returns reference to the class declaration or None. | def get_container_or_none(self, type_):
"""
Returns reference to the class declaration or None.
"""
type_ = type_traits.remove_alias(type_)
type_ = type_traits.remove_cv(type_)
utils.loggers.queries_engine.debug(
"Container traits: cleaned up search %s", type_)
if isinstance(type_, cpptypes.declarated_t):
cls_declaration = type_traits.remove_alias(type_.declaration)
elif isinstance(type_, class_declaration.class_t):
cls_declaration = type_
elif isinstance(type_, class_declaration.class_declaration_t):
cls_declaration = type_
else:
utils.loggers.queries_engine.debug(
"Container traits: returning None, type not known\n")
return
if not cls_declaration.name.startswith(self.name() + '<'):
utils.loggers.queries_engine.debug(
"Container traits: returning None, " +
"declaration starts with " + self.name() + '<\n')
return
# When using libstd++, some container traits are defined in
# std::tr1::. See remove_template_defaults_tester.py.
# In this case the is_defined_in_xxx test needs to be done
# on the parent
decl = cls_declaration
if isinstance(type_, class_declaration.class_declaration_t):
is_ns = isinstance(type_.parent, namespace.namespace_t)
if is_ns and type_.parent.name == "tr1":
decl = cls_declaration.parent
elif isinstance(type_, cpptypes.declarated_t):
is_ns = isinstance(type_.declaration.parent, namespace.namespace_t)
if is_ns and type_.declaration.parent.name == "tr1":
decl = cls_declaration.parent
for ns in std_namespaces:
if traits_impl_details.impl_details.is_defined_in_xxx(ns, decl):
utils.loggers.queries_engine.debug(
"Container traits: get_container_or_none() will return " +
cls_declaration.name)
# The is_defined_in_xxx check is done on decl, but we return
# the original declation so that the rest of the algorithm
# is able to work with it.
return cls_declaration
# This should not happen
utils.loggers.queries_engine.debug(
"Container traits: get_container_or_none() will return None\n") | [
"def",
"get_container_or_none",
"(",
"self",
",",
"type_",
")",
":",
"type_",
"=",
"type_traits",
".",
"remove_alias",
"(",
"type_",
")",
"type_",
"=",
"type_traits",
".",
"remove_cv",
"(",
"type_",
")",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: cleaned up search %s\"",
",",
"type_",
")",
"if",
"isinstance",
"(",
"type_",
",",
"cpptypes",
".",
"declarated_t",
")",
":",
"cls_declaration",
"=",
"type_traits",
".",
"remove_alias",
"(",
"type_",
".",
"declaration",
")",
"elif",
"isinstance",
"(",
"type_",
",",
"class_declaration",
".",
"class_t",
")",
":",
"cls_declaration",
"=",
"type_",
"elif",
"isinstance",
"(",
"type_",
",",
"class_declaration",
".",
"class_declaration_t",
")",
":",
"cls_declaration",
"=",
"type_",
"else",
":",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: returning None, type not known\\n\"",
")",
"return",
"if",
"not",
"cls_declaration",
".",
"name",
".",
"startswith",
"(",
"self",
".",
"name",
"(",
")",
"+",
"'<'",
")",
":",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: returning None, \"",
"+",
"\"declaration starts with \"",
"+",
"self",
".",
"name",
"(",
")",
"+",
"'<\\n'",
")",
"return",
"# When using libstd++, some container traits are defined in",
"# std::tr1::. See remove_template_defaults_tester.py.",
"# In this case the is_defined_in_xxx test needs to be done",
"# on the parent",
"decl",
"=",
"cls_declaration",
"if",
"isinstance",
"(",
"type_",
",",
"class_declaration",
".",
"class_declaration_t",
")",
":",
"is_ns",
"=",
"isinstance",
"(",
"type_",
".",
"parent",
",",
"namespace",
".",
"namespace_t",
")",
"if",
"is_ns",
"and",
"type_",
".",
"parent",
".",
"name",
"==",
"\"tr1\"",
":",
"decl",
"=",
"cls_declaration",
".",
"parent",
"elif",
"isinstance",
"(",
"type_",
",",
"cpptypes",
".",
"declarated_t",
")",
":",
"is_ns",
"=",
"isinstance",
"(",
"type_",
".",
"declaration",
".",
"parent",
",",
"namespace",
".",
"namespace_t",
")",
"if",
"is_ns",
"and",
"type_",
".",
"declaration",
".",
"parent",
".",
"name",
"==",
"\"tr1\"",
":",
"decl",
"=",
"cls_declaration",
".",
"parent",
"for",
"ns",
"in",
"std_namespaces",
":",
"if",
"traits_impl_details",
".",
"impl_details",
".",
"is_defined_in_xxx",
"(",
"ns",
",",
"decl",
")",
":",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: get_container_or_none() will return \"",
"+",
"cls_declaration",
".",
"name",
")",
"# The is_defined_in_xxx check is done on decl, but we return",
"# the original declation so that the rest of the algorithm",
"# is able to work with it.",
"return",
"cls_declaration",
"# This should not happen",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: get_container_or_none() will return None\\n\"",
")"
] | [
374,
4
] | [
429,
75
] | python | en | ['en', 'error', 'th'] | False |
container_traits_impl_t.is_my_case | (self, type_) |
Checks, whether type is STD container or not.
|
Checks, whether type is STD container or not. | def is_my_case(self, type_):
"""
Checks, whether type is STD container or not.
"""
return bool(self.get_container_or_none(type_)) | [
"def",
"is_my_case",
"(",
"self",
",",
"type_",
")",
":",
"return",
"bool",
"(",
"self",
".",
"get_container_or_none",
"(",
"type_",
")",
")"
] | [
431,
4
] | [
437,
54
] | python | en | ['en', 'error', 'th'] | False |
container_traits_impl_t.class_declaration | (self, type_) |
Returns reference to the class declaration.
|
Returns reference to the class declaration. | def class_declaration(self, type_):
"""
Returns reference to the class declaration.
"""
utils.loggers.queries_engine.debug(
"Container traits: searching class declaration for %s", type_)
cls_declaration = self.get_container_or_none(type_)
if not cls_declaration:
raise TypeError(
'Type "%s" is not instantiation of std::%s' %
(type_.decl_string, self.name()))
return cls_declaration | [
"def",
"class_declaration",
"(",
"self",
",",
"type_",
")",
":",
"utils",
".",
"loggers",
".",
"queries_engine",
".",
"debug",
"(",
"\"Container traits: searching class declaration for %s\"",
",",
"type_",
")",
"cls_declaration",
"=",
"self",
".",
"get_container_or_none",
"(",
"type_",
")",
"if",
"not",
"cls_declaration",
":",
"raise",
"TypeError",
"(",
"'Type \"%s\" is not instantiation of std::%s'",
"%",
"(",
"type_",
".",
"decl_string",
",",
"self",
".",
"name",
"(",
")",
")",
")",
"return",
"cls_declaration"
] | [
439,
4
] | [
453,
30
] | python | en | ['en', 'error', 'th'] | False |
container_traits_impl_t.element_type | (self, type_) | returns reference to the class value\\mapped type declaration | returns reference to the class value\\mapped type declaration | def element_type(self, type_):
"""returns reference to the class value\\mapped type declaration"""
return self.__find_xxx_type(
type_,
self.element_type_index,
self.element_type_typedef,
'container_element_type') | [
"def",
"element_type",
"(",
"self",
",",
"type_",
")",
":",
"return",
"self",
".",
"__find_xxx_type",
"(",
"type_",
",",
"self",
".",
"element_type_index",
",",
"self",
".",
"element_type_typedef",
",",
"'container_element_type'",
")"
] | [
487,
4
] | [
493,
37
] | python | en | ['en', 'en', 'en'] | True |
container_traits_impl_t.key_type | (self, type_) | returns reference to the class key type declaration | returns reference to the class key type declaration | def key_type(self, type_):
"""returns reference to the class key type declaration"""
if not self.is_mapping(type_):
raise TypeError(
'Type "%s" is not "mapping" container' %
str(type_))
return self.__find_xxx_type(
type_,
self.key_type_index,
self.key_type_typedef,
'container_key_type') | [
"def",
"key_type",
"(",
"self",
",",
"type_",
")",
":",
"if",
"not",
"self",
".",
"is_mapping",
"(",
"type_",
")",
":",
"raise",
"TypeError",
"(",
"'Type \"%s\" is not \"mapping\" container'",
"%",
"str",
"(",
"type_",
")",
")",
"return",
"self",
".",
"__find_xxx_type",
"(",
"type_",
",",
"self",
".",
"key_type_index",
",",
"self",
".",
"key_type_typedef",
",",
"'container_key_type'",
")"
] | [
495,
4
] | [
505,
33
] | python | en | ['en', 'en', 'en'] | True |
container_traits_impl_t.remove_defaults | (self, type_or_string) |
Removes template defaults from a templated class instantiation.
For example:
.. code-block:: c++
std::vector< int, std::allocator< int > >
will become:
.. code-block:: c++
std::vector< int >
|
Removes template defaults from a templated class instantiation. | def remove_defaults(self, type_or_string):
"""
Removes template defaults from a templated class instantiation.
For example:
.. code-block:: c++
std::vector< int, std::allocator< int > >
will become:
.. code-block:: c++
std::vector< int >
"""
name = type_or_string
if not utils.is_str(type_or_string):
name = self.class_declaration(type_or_string).name
if not self.remove_defaults_impl:
return name
no_defaults = self.remove_defaults_impl(name)
if not no_defaults:
return name
else:
return no_defaults | [
"def",
"remove_defaults",
"(",
"self",
",",
"type_or_string",
")",
":",
"name",
"=",
"type_or_string",
"if",
"not",
"utils",
".",
"is_str",
"(",
"type_or_string",
")",
":",
"name",
"=",
"self",
".",
"class_declaration",
"(",
"type_or_string",
")",
".",
"name",
"if",
"not",
"self",
".",
"remove_defaults_impl",
":",
"return",
"name",
"no_defaults",
"=",
"self",
".",
"remove_defaults_impl",
"(",
"name",
")",
"if",
"not",
"no_defaults",
":",
"return",
"name",
"else",
":",
"return",
"no_defaults"
] | [
507,
4
] | [
532,
30
] | python | en | ['en', 'error', 'th'] | False |
linker_t.instance | (self, inst) |
Called by __parse_xml_file in source_reader.
|
Called by __parse_xml_file in source_reader. | def instance(self, inst):
"""
Called by __parse_xml_file in source_reader.
"""
self.__inst = inst
# use inst, to reduce attribute access time
if isinstance(inst, declarations.declaration_t) and \
inst.location is not None and \
inst.location.file_name != '':
inst.location.file_name = self.__files[inst.location.file_name] | [
"def",
"instance",
"(",
"self",
",",
"inst",
")",
":",
"self",
".",
"__inst",
"=",
"inst",
"# use inst, to reduce attribute access time",
"if",
"isinstance",
"(",
"inst",
",",
"declarations",
".",
"declaration_t",
")",
"and",
"inst",
".",
"location",
"is",
"not",
"None",
"and",
"inst",
".",
"location",
".",
"file_name",
"!=",
"''",
":",
"inst",
".",
"location",
".",
"file_name",
"=",
"self",
".",
"__files",
"[",
"inst",
".",
"location",
".",
"file_name",
"]"
] | [
34,
4
] | [
46,
75
] | python | en | ['en', 'error', 'th'] | False |
CIDRRange.__init__ | (self, spec: str) |
Initialize a CIDRRange from a spec, which can look like any of:
127.0.0.1 -- an exact IPv4 match
::1 -- an exact IPv6 match
192.168.0.0/16 -- an IPv4 range
2001:2000::/64 -- an IPv6 range
If the prefix is not a valid IP address, or if the prefix length
isn't a valid length for the class of IP address, the CIDRRange
object will evaluate False, with information about the error in
self.error.
:param spec: string specifying the CIDR block in question
|
Initialize a CIDRRange from a spec, which can look like any of: | def __init__(self, spec: str) -> None:
"""
Initialize a CIDRRange from a spec, which can look like any of:
127.0.0.1 -- an exact IPv4 match
::1 -- an exact IPv6 match
192.168.0.0/16 -- an IPv4 range
2001:2000::/64 -- an IPv6 range
If the prefix is not a valid IP address, or if the prefix length
isn't a valid length for the class of IP address, the CIDRRange
object will evaluate False, with information about the error in
self.error.
:param spec: string specifying the CIDR block in question
"""
self.error: Optional[str] = None
self.address: Optional[str] = None
self.prefix_len: Optional[int] = None
prefix: Optional[str] = None
pfx_len: Optional[int] = None
addr: Optional[Union[IPv4Address, IPv6Address]] = None
if '/' in spec:
# CIDR range! Try to separate the address and its length.
address, lenstr = spec.split('/', 1)
try:
pfx_len = int(lenstr)
except ValueError:
self.error = f"CIDR range {spec} has an invalid length, ignoring"
return
else:
address = spec
try:
addr = ip_address(address)
except ValueError:
pass
if addr is None:
self.error = f"Invalid IP address {address}"
return
if pfx_len is None:
pfx_len = addr.max_prefixlen
elif pfx_len > addr.max_prefixlen:
self.error = f"Invalid prefix length for IPv{addr.version} address {address}/{pfx_len}"
return
# Convert the parsed address to a string, so that any normalization
# appropriate to the IP version can happen.
self.address = str(addr)
self.prefix_len = pfx_len | [
"def",
"__init__",
"(",
"self",
",",
"spec",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"error",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"self",
".",
"address",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"self",
".",
"prefix_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
"prefix",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
"pfx_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
"addr",
":",
"Optional",
"[",
"Union",
"[",
"IPv4Address",
",",
"IPv6Address",
"]",
"]",
"=",
"None",
"if",
"'/'",
"in",
"spec",
":",
"# CIDR range! Try to separate the address and its length.",
"address",
",",
"lenstr",
"=",
"spec",
".",
"split",
"(",
"'/'",
",",
"1",
")",
"try",
":",
"pfx_len",
"=",
"int",
"(",
"lenstr",
")",
"except",
"ValueError",
":",
"self",
".",
"error",
"=",
"f\"CIDR range {spec} has an invalid length, ignoring\"",
"return",
"else",
":",
"address",
"=",
"spec",
"try",
":",
"addr",
"=",
"ip_address",
"(",
"address",
")",
"except",
"ValueError",
":",
"pass",
"if",
"addr",
"is",
"None",
":",
"self",
".",
"error",
"=",
"f\"Invalid IP address {address}\"",
"return",
"if",
"pfx_len",
"is",
"None",
":",
"pfx_len",
"=",
"addr",
".",
"max_prefixlen",
"elif",
"pfx_len",
">",
"addr",
".",
"max_prefixlen",
":",
"self",
".",
"error",
"=",
"f\"Invalid prefix length for IPv{addr.version} address {address}/{pfx_len}\"",
"return",
"# Convert the parsed address to a string, so that any normalization",
"# appropriate to the IP version can happen.",
"self",
".",
"address",
"=",
"str",
"(",
"addr",
")",
"self",
".",
"prefix_len",
"=",
"pfx_len"
] | [
10,
4
] | [
65,
33
] | python | en | ['en', 'error', 'th'] | False |
CIDRRange.__bool__ | (self) |
A CIDRRange will evaluate as True IFF there is no error, the address
is not None, and the prefix_len is not None.
|
A CIDRRange will evaluate as True IFF there is no error, the address
is not None, and the prefix_len is not None.
| def __bool__(self) -> bool:
"""
A CIDRRange will evaluate as True IFF there is no error, the address
is not None, and the prefix_len is not None.
"""
return ((not self.error) and
(self.address is not None) and
(self.prefix_len is not None)) | [
"def",
"__bool__",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"(",
"(",
"not",
"self",
".",
"error",
")",
"and",
"(",
"self",
".",
"address",
"is",
"not",
"None",
")",
"and",
"(",
"self",
".",
"prefix_len",
"is",
"not",
"None",
")",
")"
] | [
67,
4
] | [
75,
46
] | python | en | ['en', 'error', 'th'] | False |
CIDRRange.as_dict | (self) |
Return a dictionary version of a CIDRRange, suitable for use in
an Envoy config as an envoy.api.v3.core.CidrRange.
|
Return a dictionary version of a CIDRRange, suitable for use in
an Envoy config as an envoy.api.v3.core.CidrRange.
| def as_dict(self) -> dict:
"""
Return a dictionary version of a CIDRRange, suitable for use in
an Envoy config as an envoy.api.v3.core.CidrRange.
"""
return {
"address_prefix": self.address,
"prefix_len": self.prefix_len
} | [
"def",
"as_dict",
"(",
"self",
")",
"->",
"dict",
":",
"return",
"{",
"\"address_prefix\"",
":",
"self",
".",
"address",
",",
"\"prefix_len\"",
":",
"self",
".",
"prefix_len",
"}"
] | [
83,
4
] | [
92,
9
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler.__init__ | (
self,
profile_dataset,
excluded_expectations: list = None,
ignored_columns: list = None,
not_null_only: bool = False,
primary_or_compound_key: list = False,
semantic_types_dict: dict = None,
table_expectations_only: bool = False,
value_set_threshold: str = "MANY",
) |
The UserConfigurableProfiler is used to build an expectation suite from a dataset. The profiler may be
instantiated with or without a config. The config may contain a semantic_types dict or not. Once a profiler is
instantiated, if config items change, a new profiler will be needed.
Write an entry on how to use the profiler for the GE docs site
Args:
profile_dataset: A Great Expectations Dataset or Validator object
excluded_expectations: A list of expectations to not include in the suite
ignored_columns: A list of columns for which you would like to NOT create expectations
not_null_only: Boolean, default False. By default, each column is evaluated for nullity. If the column
values contain fewer than 50% null values, then the profiler will add
`expect_column_values_to_not_be_null`; if greater than 50% it will add
`expect_column_values_to_be_null`. If not_null_only is set to True, the profiler will add a
not_null expectation irrespective of the percent nullity (and therefore will not add an
`expect_column_values_to_be_null`
primary_or_compound_key: A list containing one or more columns which are a dataset's primary or
compound key. This will create an `expect_column_values_to_be_unique` or
`expect_compound_columns_to_be_unique` expectation. This will occur even if one or more of the
primary_or_compound_key columns are specified in ignored_columns
semantic_types_dict: A dictionary where the keys are available semantic_types (see profiler.base.profiler_semantic_types)
and the values are lists of columns for which you would like to create semantic_type specific
expectations e.g.:
"semantic_types": { "value_set": ["state","country"], "numeric":["age", "amount_due"]}
table_expectations_only: Boolean, default False. If True, this will only create the two table level expectations
available to this profiler (`expect_table_columns_to_match_ordered_list` and
`expect_table_row_count_to_be_between`). If a primary_or_compound key is specified, it will create
a uniqueness expectation for that column as well
value_set_threshold: Takes a string from the following ordered list - "none", "one", "two",
"very_few", "few", "many", "very_many", "unique". When the profiler runs without a semantic_types
dict, each column is profiled for cardinality. This threshold determines the greatest cardinality
for which to add `expect_column_values_to_be_in_set`. For example, if value_set_threshold is set to
"unique", it will add a value_set expectation for every included column. If set to "few", it will
add a value_set expectation for columns whose cardinality is one of "one", "two", "very_few" or
"few". The default value is "many". For the purposes of comparing whether two tables are identical,
it might make the most sense to set this to "unique"
|
The UserConfigurableProfiler is used to build an expectation suite from a dataset. The profiler may be
instantiated with or without a config. The config may contain a semantic_types dict or not. Once a profiler is
instantiated, if config items change, a new profiler will be needed. | def __init__(
self,
profile_dataset,
excluded_expectations: list = None,
ignored_columns: list = None,
not_null_only: bool = False,
primary_or_compound_key: list = False,
semantic_types_dict: dict = None,
table_expectations_only: bool = False,
value_set_threshold: str = "MANY",
):
"""
The UserConfigurableProfiler is used to build an expectation suite from a dataset. The profiler may be
instantiated with or without a config. The config may contain a semantic_types dict or not. Once a profiler is
instantiated, if config items change, a new profiler will be needed.
Write an entry on how to use the profiler for the GE docs site
Args:
profile_dataset: A Great Expectations Dataset or Validator object
excluded_expectations: A list of expectations to not include in the suite
ignored_columns: A list of columns for which you would like to NOT create expectations
not_null_only: Boolean, default False. By default, each column is evaluated for nullity. If the column
values contain fewer than 50% null values, then the profiler will add
`expect_column_values_to_not_be_null`; if greater than 50% it will add
`expect_column_values_to_be_null`. If not_null_only is set to True, the profiler will add a
not_null expectation irrespective of the percent nullity (and therefore will not add an
`expect_column_values_to_be_null`
primary_or_compound_key: A list containing one or more columns which are a dataset's primary or
compound key. This will create an `expect_column_values_to_be_unique` or
`expect_compound_columns_to_be_unique` expectation. This will occur even if one or more of the
primary_or_compound_key columns are specified in ignored_columns
semantic_types_dict: A dictionary where the keys are available semantic_types (see profiler.base.profiler_semantic_types)
and the values are lists of columns for which you would like to create semantic_type specific
expectations e.g.:
"semantic_types": { "value_set": ["state","country"], "numeric":["age", "amount_due"]}
table_expectations_only: Boolean, default False. If True, this will only create the two table level expectations
available to this profiler (`expect_table_columns_to_match_ordered_list` and
`expect_table_row_count_to_be_between`). If a primary_or_compound key is specified, it will create
a uniqueness expectation for that column as well
value_set_threshold: Takes a string from the following ordered list - "none", "one", "two",
"very_few", "few", "many", "very_many", "unique". When the profiler runs without a semantic_types
dict, each column is profiled for cardinality. This threshold determines the greatest cardinality
for which to add `expect_column_values_to_be_in_set`. For example, if value_set_threshold is set to
"unique", it will add a value_set expectation for every included column. If set to "few", it will
add a value_set expectation for columns whose cardinality is one of "one", "two", "very_few" or
"few". The default value is "many". For the purposes of comparing whether two tables are identical,
it might make the most sense to set this to "unique"
"""
self.column_info = {}
self.profile_dataset = profile_dataset
assert isinstance(self.profile_dataset, (Dataset, Validator, Batch))
if isinstance(self.profile_dataset, Batch):
self.profile_dataset = Validator(
execution_engine=self.profile_dataset.data.execution_engine,
batches=[self.profile_dataset],
)
self.all_table_columns = self.profile_dataset.get_metric(
MetricConfiguration("table.columns", dict())
)
elif isinstance(self.profile_dataset, Validator):
self.all_table_columns = self.profile_dataset.get_metric(
MetricConfiguration("table.columns", dict())
)
else:
self.all_table_columns = self.profile_dataset.get_table_columns()
self.semantic_types_dict = semantic_types_dict
assert isinstance(self.semantic_types_dict, (dict, type(None)))
self.ignored_columns = ignored_columns or []
assert isinstance(self.ignored_columns, list)
self.excluded_expectations = excluded_expectations or []
assert isinstance(self.excluded_expectations, list)
assert isinstance(
value_set_threshold, str
), "value_set_threshold must be a string"
self.value_set_threshold = value_set_threshold.upper()
assert (
self.value_set_threshold in OrderedProfilerCardinality.__members__
), f"value_set_threshold must be one of {[i for i in OrderedProfilerCardinality.__members__]}"
self.not_null_only = not_null_only
assert isinstance(self.not_null_only, bool)
self.table_expectations_only = table_expectations_only
assert isinstance(self.table_expectations_only, bool)
if self.table_expectations_only is True:
logger.info(
"table_expectations_only is set to True. When used to build a suite, this profiler will ignore all"
"columns and create expectations only at the table level. If you would also like to create expectations "
"at the column level, you can instantiate a new profiler with table_expectations_only set to False"
)
self.primary_or_compound_key = primary_or_compound_key or []
assert isinstance(self.primary_or_compound_key, list)
if self.table_expectations_only:
self.ignored_columns = self.all_table_columns
if self.primary_or_compound_key:
for column in self.primary_or_compound_key:
if column not in self.all_table_columns:
raise ValueError(
f"Column {column} not found. Please ensure that this column is in the {type(profile_dataset).__name__} "
f"if you would like to use it as a primary_or_compound_key."
)
included_columns = [
column_name
for column_name in self.all_table_columns
if column_name not in self.ignored_columns
]
for column_name in included_columns:
self._add_column_cardinality_to_column_info(
self.profile_dataset, column_name
)
self._add_column_type_to_column_info(self.profile_dataset, column_name)
if self.semantic_types_dict is not None:
self._validate_semantic_types_dict(self.profile_dataset)
for column_name in included_columns:
self._add_semantic_types_by_column_from_config_to_column_info(
column_name
)
self.semantic_type_functions = {
"DATETIME": self._build_expectations_datetime,
"NUMERIC": self._build_expectations_numeric,
"STRING": self._build_expectations_string,
"VALUE_SET": self._build_expectations_value_set,
"BOOLEAN": self._build_expectations_value_set,
} | [
"def",
"__init__",
"(",
"self",
",",
"profile_dataset",
",",
"excluded_expectations",
":",
"list",
"=",
"None",
",",
"ignored_columns",
":",
"list",
"=",
"None",
",",
"not_null_only",
":",
"bool",
"=",
"False",
",",
"primary_or_compound_key",
":",
"list",
"=",
"False",
",",
"semantic_types_dict",
":",
"dict",
"=",
"None",
",",
"table_expectations_only",
":",
"bool",
"=",
"False",
",",
"value_set_threshold",
":",
"str",
"=",
"\"MANY\"",
",",
")",
":",
"self",
".",
"column_info",
"=",
"{",
"}",
"self",
".",
"profile_dataset",
"=",
"profile_dataset",
"assert",
"isinstance",
"(",
"self",
".",
"profile_dataset",
",",
"(",
"Dataset",
",",
"Validator",
",",
"Batch",
")",
")",
"if",
"isinstance",
"(",
"self",
".",
"profile_dataset",
",",
"Batch",
")",
":",
"self",
".",
"profile_dataset",
"=",
"Validator",
"(",
"execution_engine",
"=",
"self",
".",
"profile_dataset",
".",
"data",
".",
"execution_engine",
",",
"batches",
"=",
"[",
"self",
".",
"profile_dataset",
"]",
",",
")",
"self",
".",
"all_table_columns",
"=",
"self",
".",
"profile_dataset",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"table.columns\"",
",",
"dict",
"(",
")",
")",
")",
"elif",
"isinstance",
"(",
"self",
".",
"profile_dataset",
",",
"Validator",
")",
":",
"self",
".",
"all_table_columns",
"=",
"self",
".",
"profile_dataset",
".",
"get_metric",
"(",
"MetricConfiguration",
"(",
"\"table.columns\"",
",",
"dict",
"(",
")",
")",
")",
"else",
":",
"self",
".",
"all_table_columns",
"=",
"self",
".",
"profile_dataset",
".",
"get_table_columns",
"(",
")",
"self",
".",
"semantic_types_dict",
"=",
"semantic_types_dict",
"assert",
"isinstance",
"(",
"self",
".",
"semantic_types_dict",
",",
"(",
"dict",
",",
"type",
"(",
"None",
")",
")",
")",
"self",
".",
"ignored_columns",
"=",
"ignored_columns",
"or",
"[",
"]",
"assert",
"isinstance",
"(",
"self",
".",
"ignored_columns",
",",
"list",
")",
"self",
".",
"excluded_expectations",
"=",
"excluded_expectations",
"or",
"[",
"]",
"assert",
"isinstance",
"(",
"self",
".",
"excluded_expectations",
",",
"list",
")",
"assert",
"isinstance",
"(",
"value_set_threshold",
",",
"str",
")",
",",
"\"value_set_threshold must be a string\"",
"self",
".",
"value_set_threshold",
"=",
"value_set_threshold",
".",
"upper",
"(",
")",
"assert",
"(",
"self",
".",
"value_set_threshold",
"in",
"OrderedProfilerCardinality",
".",
"__members__",
")",
",",
"f\"value_set_threshold must be one of {[i for i in OrderedProfilerCardinality.__members__]}\"",
"self",
".",
"not_null_only",
"=",
"not_null_only",
"assert",
"isinstance",
"(",
"self",
".",
"not_null_only",
",",
"bool",
")",
"self",
".",
"table_expectations_only",
"=",
"table_expectations_only",
"assert",
"isinstance",
"(",
"self",
".",
"table_expectations_only",
",",
"bool",
")",
"if",
"self",
".",
"table_expectations_only",
"is",
"True",
":",
"logger",
".",
"info",
"(",
"\"table_expectations_only is set to True. When used to build a suite, this profiler will ignore all\"",
"\"columns and create expectations only at the table level. If you would also like to create expectations \"",
"\"at the column level, you can instantiate a new profiler with table_expectations_only set to False\"",
")",
"self",
".",
"primary_or_compound_key",
"=",
"primary_or_compound_key",
"or",
"[",
"]",
"assert",
"isinstance",
"(",
"self",
".",
"primary_or_compound_key",
",",
"list",
")",
"if",
"self",
".",
"table_expectations_only",
":",
"self",
".",
"ignored_columns",
"=",
"self",
".",
"all_table_columns",
"if",
"self",
".",
"primary_or_compound_key",
":",
"for",
"column",
"in",
"self",
".",
"primary_or_compound_key",
":",
"if",
"column",
"not",
"in",
"self",
".",
"all_table_columns",
":",
"raise",
"ValueError",
"(",
"f\"Column {column} not found. Please ensure that this column is in the {type(profile_dataset).__name__} \"",
"f\"if you would like to use it as a primary_or_compound_key.\"",
")",
"included_columns",
"=",
"[",
"column_name",
"for",
"column_name",
"in",
"self",
".",
"all_table_columns",
"if",
"column_name",
"not",
"in",
"self",
".",
"ignored_columns",
"]",
"for",
"column_name",
"in",
"included_columns",
":",
"self",
".",
"_add_column_cardinality_to_column_info",
"(",
"self",
".",
"profile_dataset",
",",
"column_name",
")",
"self",
".",
"_add_column_type_to_column_info",
"(",
"self",
".",
"profile_dataset",
",",
"column_name",
")",
"if",
"self",
".",
"semantic_types_dict",
"is",
"not",
"None",
":",
"self",
".",
"_validate_semantic_types_dict",
"(",
"self",
".",
"profile_dataset",
")",
"for",
"column_name",
"in",
"included_columns",
":",
"self",
".",
"_add_semantic_types_by_column_from_config_to_column_info",
"(",
"column_name",
")",
"self",
".",
"semantic_type_functions",
"=",
"{",
"\"DATETIME\"",
":",
"self",
".",
"_build_expectations_datetime",
",",
"\"NUMERIC\"",
":",
"self",
".",
"_build_expectations_numeric",
",",
"\"STRING\"",
":",
"self",
".",
"_build_expectations_string",
",",
"\"VALUE_SET\"",
":",
"self",
".",
"_build_expectations_value_set",
",",
"\"BOOLEAN\"",
":",
"self",
".",
"_build_expectations_value_set",
",",
"}"
] | [
58,
4
] | [
192,
9
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler.build_suite | (self) |
User-facing expectation-suite building function. Works with an instantiated UserConfigurableProfiler object.
Args:
Returns:
An expectation suite built either with or without a semantic_types dict
|
User-facing expectation-suite building function. Works with an instantiated UserConfigurableProfiler object.
Args: | def build_suite(self):
"""
User-facing expectation-suite building function. Works with an instantiated UserConfigurableProfiler object.
Args:
Returns:
An expectation suite built either with or without a semantic_types dict
"""
if len(self.profile_dataset.get_expectation_suite().expectations) > 0:
# noinspection PyProtectedMember
suite_name = self.profile_dataset._expectation_suite.expectation_suite_name
self.profile_dataset._expectation_suite = ExpectationSuite(suite_name)
if self.semantic_types_dict:
return self._build_expectation_suite_from_semantic_types_dict()
return self._profile_and_build_expectation_suite() | [
"def",
"build_suite",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"profile_dataset",
".",
"get_expectation_suite",
"(",
")",
".",
"expectations",
")",
">",
"0",
":",
"# noinspection PyProtectedMember",
"suite_name",
"=",
"self",
".",
"profile_dataset",
".",
"_expectation_suite",
".",
"expectation_suite_name",
"self",
".",
"profile_dataset",
".",
"_expectation_suite",
"=",
"ExpectationSuite",
"(",
"suite_name",
")",
"if",
"self",
".",
"semantic_types_dict",
":",
"return",
"self",
".",
"_build_expectation_suite_from_semantic_types_dict",
"(",
")",
"return",
"self",
".",
"_profile_and_build_expectation_suite",
"(",
")"
] | [
194,
4
] | [
211,
58
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectation_suite_from_semantic_types_dict | (self) |
Uses a semantic_type dict to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built from a semantic_types dict
|
Uses a semantic_type dict to determine which expectations to add to the suite, then builds the suite
Args: | def _build_expectation_suite_from_semantic_types_dict(self):
"""
Uses a semantic_type dict to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built from a semantic_types dict
"""
if not self.semantic_types_dict:
raise ValueError(
"A config with a semantic_types dict must be included in order to use this profiler."
)
self._build_expectations_table(self.profile_dataset)
if self.value_set_threshold:
logger.info(
"Using this profiler with a semantic_types dict will ignore the value_set_threshold parameter. If "
"you would like to include value_set expectations, you can include a 'value_set' entry in your "
"semantic_types dict with any columns for which you would like a value_set expectation, or you can "
"remove the semantic_types dict from the config."
)
if self.primary_or_compound_key:
self._build_expectations_primary_or_compound_key(
self.profile_dataset, self.primary_or_compound_key
)
with tqdm(
desc="Profiling Columns", total=len(self.column_info), delay=5
) as pbar:
for column_name, column_info in self.column_info.items():
pbar.set_postfix_str(f"Column={column_name}")
semantic_types = column_info.get("semantic_types")
for semantic_type in semantic_types:
semantic_type_fn = self.semantic_type_functions.get(semantic_type)
semantic_type_fn(
profile_dataset=self.profile_dataset, column=column_name
)
self._build_expectations_for_all_column_types(
self.profile_dataset, column_name
)
pbar.update()
expectation_suite = self._build_column_description_metadata(
self.profile_dataset
)
self._display_suite_by_column(suite=expectation_suite)
return expectation_suite | [
"def",
"_build_expectation_suite_from_semantic_types_dict",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"semantic_types_dict",
":",
"raise",
"ValueError",
"(",
"\"A config with a semantic_types dict must be included in order to use this profiler.\"",
")",
"self",
".",
"_build_expectations_table",
"(",
"self",
".",
"profile_dataset",
")",
"if",
"self",
".",
"value_set_threshold",
":",
"logger",
".",
"info",
"(",
"\"Using this profiler with a semantic_types dict will ignore the value_set_threshold parameter. If \"",
"\"you would like to include value_set expectations, you can include a 'value_set' entry in your \"",
"\"semantic_types dict with any columns for which you would like a value_set expectation, or you can \"",
"\"remove the semantic_types dict from the config.\"",
")",
"if",
"self",
".",
"primary_or_compound_key",
":",
"self",
".",
"_build_expectations_primary_or_compound_key",
"(",
"self",
".",
"profile_dataset",
",",
"self",
".",
"primary_or_compound_key",
")",
"with",
"tqdm",
"(",
"desc",
"=",
"\"Profiling Columns\"",
",",
"total",
"=",
"len",
"(",
"self",
".",
"column_info",
")",
",",
"delay",
"=",
"5",
")",
"as",
"pbar",
":",
"for",
"column_name",
",",
"column_info",
"in",
"self",
".",
"column_info",
".",
"items",
"(",
")",
":",
"pbar",
".",
"set_postfix_str",
"(",
"f\"Column={column_name}\"",
")",
"semantic_types",
"=",
"column_info",
".",
"get",
"(",
"\"semantic_types\"",
")",
"for",
"semantic_type",
"in",
"semantic_types",
":",
"semantic_type_fn",
"=",
"self",
".",
"semantic_type_functions",
".",
"get",
"(",
"semantic_type",
")",
"semantic_type_fn",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column",
"=",
"column_name",
")",
"self",
".",
"_build_expectations_for_all_column_types",
"(",
"self",
".",
"profile_dataset",
",",
"column_name",
")",
"pbar",
".",
"update",
"(",
")",
"expectation_suite",
"=",
"self",
".",
"_build_column_description_metadata",
"(",
"self",
".",
"profile_dataset",
")",
"self",
".",
"_display_suite_by_column",
"(",
"suite",
"=",
"expectation_suite",
")",
"return",
"expectation_suite"
] | [
213,
4
] | [
260,
32
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._profile_and_build_expectation_suite | (self) |
Profiles the provided dataset to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built after profiling the dataset
|
Profiles the provided dataset to determine which expectations to add to the suite, then builds the suite
Args: | def _profile_and_build_expectation_suite(self):
"""
Profiles the provided dataset to determine which expectations to add to the suite, then builds the suite
Args:
Returns:
An expectation suite built after profiling the dataset
"""
if self.primary_or_compound_key:
self._build_expectations_primary_or_compound_key(
profile_dataset=self.profile_dataset,
column_list=self.primary_or_compound_key,
)
self._build_expectations_table(profile_dataset=self.profile_dataset)
with tqdm(desc="Profiling", total=len(self.column_info), delay=5) as pbar:
for column_name, column_info in self.column_info.items():
pbar.set_postfix_str(f"Column={column_name}")
data_type = column_info.get("type")
cardinality = column_info.get("cardinality")
if data_type in ("FLOAT", "INT", "NUMERIC"):
self._build_expectations_numeric(
profile_dataset=self.profile_dataset,
column=column_name,
)
if data_type == "DATETIME":
self._build_expectations_datetime(
profile_dataset=self.profile_dataset,
column=column_name,
)
if (
OrderedProfilerCardinality[self.value_set_threshold]
>= OrderedProfilerCardinality[cardinality]
):
self._build_expectations_value_set(
profile_dataset=self.profile_dataset, column=column_name
)
self._build_expectations_for_all_column_types(
profile_dataset=self.profile_dataset, column=column_name
)
pbar.update()
expectation_suite = self._build_column_description_metadata(
self.profile_dataset
)
self._display_suite_by_column(
suite=expectation_suite
) # include in the actual profiler
return expectation_suite | [
"def",
"_profile_and_build_expectation_suite",
"(",
"self",
")",
":",
"if",
"self",
".",
"primary_or_compound_key",
":",
"self",
".",
"_build_expectations_primary_or_compound_key",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column_list",
"=",
"self",
".",
"primary_or_compound_key",
",",
")",
"self",
".",
"_build_expectations_table",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
")",
"with",
"tqdm",
"(",
"desc",
"=",
"\"Profiling\"",
",",
"total",
"=",
"len",
"(",
"self",
".",
"column_info",
")",
",",
"delay",
"=",
"5",
")",
"as",
"pbar",
":",
"for",
"column_name",
",",
"column_info",
"in",
"self",
".",
"column_info",
".",
"items",
"(",
")",
":",
"pbar",
".",
"set_postfix_str",
"(",
"f\"Column={column_name}\"",
")",
"data_type",
"=",
"column_info",
".",
"get",
"(",
"\"type\"",
")",
"cardinality",
"=",
"column_info",
".",
"get",
"(",
"\"cardinality\"",
")",
"if",
"data_type",
"in",
"(",
"\"FLOAT\"",
",",
"\"INT\"",
",",
"\"NUMERIC\"",
")",
":",
"self",
".",
"_build_expectations_numeric",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column",
"=",
"column_name",
",",
")",
"if",
"data_type",
"==",
"\"DATETIME\"",
":",
"self",
".",
"_build_expectations_datetime",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column",
"=",
"column_name",
",",
")",
"if",
"(",
"OrderedProfilerCardinality",
"[",
"self",
".",
"value_set_threshold",
"]",
">=",
"OrderedProfilerCardinality",
"[",
"cardinality",
"]",
")",
":",
"self",
".",
"_build_expectations_value_set",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column",
"=",
"column_name",
")",
"self",
".",
"_build_expectations_for_all_column_types",
"(",
"profile_dataset",
"=",
"self",
".",
"profile_dataset",
",",
"column",
"=",
"column_name",
")",
"pbar",
".",
"update",
"(",
")",
"expectation_suite",
"=",
"self",
".",
"_build_column_description_metadata",
"(",
"self",
".",
"profile_dataset",
")",
"self",
".",
"_display_suite_by_column",
"(",
"suite",
"=",
"expectation_suite",
")",
"# include in the actual profiler",
"return",
"expectation_suite"
] | [
262,
4
] | [
313,
32
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._validate_semantic_types_dict | (self, profile_dataset) |
Validates a semantic_types dict to ensure correct formatting, that all semantic_types are recognized, and that
the semantic_types align with the column data types
Args:
profile_dataset: A GE dataset
config: A config dictionary
Returns:
The validated semantic_types dictionary
|
Validates a semantic_types dict to ensure correct formatting, that all semantic_types are recognized, and that
the semantic_types align with the column data types
Args:
profile_dataset: A GE dataset
config: A config dictionary | def _validate_semantic_types_dict(self, profile_dataset):
"""
Validates a semantic_types dict to ensure correct formatting, that all semantic_types are recognized, and that
the semantic_types align with the column data types
Args:
profile_dataset: A GE dataset
config: A config dictionary
Returns:
The validated semantic_types dictionary
"""
if not isinstance(self.semantic_types_dict, dict):
raise ValueError(
f"The semantic_types dict in the config must be a dictionary, but is currently a "
f"{type(self.semantic_types_dict)}. Please reformat."
)
for k, v in self.semantic_types_dict.items():
assert isinstance(v, list), (
"Entries in semantic type dict must be lists of column names e.g. "
"{'semantic_types': {'numeric': ['number_of_transactions']}}"
)
if k.upper() not in profiler_semantic_types:
raise ValueError(
f"{k} is not a recognized semantic_type. Please only include one of "
f"{profiler_semantic_types}"
)
selected_columns = [
column
for column_list in self.semantic_types_dict.values()
for column in column_list
]
if selected_columns:
for column in selected_columns:
if column not in self.all_table_columns:
raise ProfilerError(f"Column {column} does not exist.")
elif column in self.ignored_columns:
raise ValueError(
f"Column {column} is specified in both the semantic_types_dict and the list of "
f"ignored columns. Please remove one of these entries to proceed."
)
for semantic_type, column_list in self.semantic_types_dict.items():
for column_name in column_list:
processed_column = self.column_info.get(column_name)
if semantic_type == "datetime":
assert processed_column.get("type") in ("DATETIME", "STRING",), (
f"Column {column_name} must be a datetime column or a string but appears to be "
f"{processed_column.get('type')}"
)
elif semantic_type == "numeric":
assert processed_column.get("type") in (
"INT",
"FLOAT",
"NUMERIC",
), f"Column {column_name} must be an int or a float but appears to be {processed_column.get('type')}"
elif semantic_type in ("STRING", "VALUE_SET"):
pass
return self.semantic_types_dict | [
"def",
"_validate_semantic_types_dict",
"(",
"self",
",",
"profile_dataset",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"semantic_types_dict",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"f\"The semantic_types dict in the config must be a dictionary, but is currently a \"",
"f\"{type(self.semantic_types_dict)}. Please reformat.\"",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"semantic_types_dict",
".",
"items",
"(",
")",
":",
"assert",
"isinstance",
"(",
"v",
",",
"list",
")",
",",
"(",
"\"Entries in semantic type dict must be lists of column names e.g. \"",
"\"{'semantic_types': {'numeric': ['number_of_transactions']}}\"",
")",
"if",
"k",
".",
"upper",
"(",
")",
"not",
"in",
"profiler_semantic_types",
":",
"raise",
"ValueError",
"(",
"f\"{k} is not a recognized semantic_type. Please only include one of \"",
"f\"{profiler_semantic_types}\"",
")",
"selected_columns",
"=",
"[",
"column",
"for",
"column_list",
"in",
"self",
".",
"semantic_types_dict",
".",
"values",
"(",
")",
"for",
"column",
"in",
"column_list",
"]",
"if",
"selected_columns",
":",
"for",
"column",
"in",
"selected_columns",
":",
"if",
"column",
"not",
"in",
"self",
".",
"all_table_columns",
":",
"raise",
"ProfilerError",
"(",
"f\"Column {column} does not exist.\"",
")",
"elif",
"column",
"in",
"self",
".",
"ignored_columns",
":",
"raise",
"ValueError",
"(",
"f\"Column {column} is specified in both the semantic_types_dict and the list of \"",
"f\"ignored columns. Please remove one of these entries to proceed.\"",
")",
"for",
"semantic_type",
",",
"column_list",
"in",
"self",
".",
"semantic_types_dict",
".",
"items",
"(",
")",
":",
"for",
"column_name",
"in",
"column_list",
":",
"processed_column",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column_name",
")",
"if",
"semantic_type",
"==",
"\"datetime\"",
":",
"assert",
"processed_column",
".",
"get",
"(",
"\"type\"",
")",
"in",
"(",
"\"DATETIME\"",
",",
"\"STRING\"",
",",
")",
",",
"(",
"f\"Column {column_name} must be a datetime column or a string but appears to be \"",
"f\"{processed_column.get('type')}\"",
")",
"elif",
"semantic_type",
"==",
"\"numeric\"",
":",
"assert",
"processed_column",
".",
"get",
"(",
"\"type\"",
")",
"in",
"(",
"\"INT\"",
",",
"\"FLOAT\"",
",",
"\"NUMERIC\"",
",",
")",
",",
"f\"Column {column_name} must be an int or a float but appears to be {processed_column.get('type')}\"",
"elif",
"semantic_type",
"in",
"(",
"\"STRING\"",
",",
"\"VALUE_SET\"",
")",
":",
"pass",
"return",
"self",
".",
"semantic_types_dict"
] | [
315,
4
] | [
374,
39
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._add_column_type_to_column_info | (self, profile_dataset, column_name) |
Adds the data type of a column to the column_info dictionary on self
Args:
profile_dataset: A GE dataset
column_name: The name of the column for which to retrieve the data type
Returns:
The type of the column
|
Adds the data type of a column to the column_info dictionary on self
Args:
profile_dataset: A GE dataset
column_name: The name of the column for which to retrieve the data type | def _add_column_type_to_column_info(self, profile_dataset, column_name):
"""
Adds the data type of a column to the column_info dictionary on self
Args:
profile_dataset: A GE dataset
column_name: The name of the column for which to retrieve the data type
Returns:
The type of the column
"""
if "expect_column_values_to_be_in_type_list" in self.excluded_expectations:
logger.info(
"expect_column_values_to_be_in_type_list is in the excluded_expectations list. This"
"expectation is required to establish column data, so it will be run and then removed from the"
"expectation suite."
)
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
column_type = column_info_entry.get("type")
if not column_type:
column_type = self._get_column_type(profile_dataset, column_name)
column_info_entry["type"] = column_type
return column_type | [
"def",
"_add_column_type_to_column_info",
"(",
"self",
",",
"profile_dataset",
",",
"column_name",
")",
":",
"if",
"\"expect_column_values_to_be_in_type_list\"",
"in",
"self",
".",
"excluded_expectations",
":",
"logger",
".",
"info",
"(",
"\"expect_column_values_to_be_in_type_list is in the excluded_expectations list. This\"",
"\"expectation is required to establish column data, so it will be run and then removed from the\"",
"\"expectation suite.\"",
")",
"column_info_entry",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column_name",
")",
"if",
"not",
"column_info_entry",
":",
"column_info_entry",
"=",
"{",
"}",
"self",
".",
"column_info",
"[",
"column_name",
"]",
"=",
"column_info_entry",
"column_type",
"=",
"column_info_entry",
".",
"get",
"(",
"\"type\"",
")",
"if",
"not",
"column_type",
":",
"column_type",
"=",
"self",
".",
"_get_column_type",
"(",
"profile_dataset",
",",
"column_name",
")",
"column_info_entry",
"[",
"\"type\"",
"]",
"=",
"column_type",
"return",
"column_type"
] | [
376,
4
] | [
401,
26
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._get_column_type | (self, profile_dataset, column) |
Determines the data type of a column by evaluating the success of `expect_column_values_to_be_in_type_list`.
In the case of type Decimal, this data type is returned as NUMERIC, which contains the type lists for both INTs
and FLOATs.
The type_list expectation used here is removed, since it will need to be built once the build_suite function is
actually called. This is because calling build_suite wipes any existing expectations, so expectations called
during the init of the profiler do not persist.
Args:
profile_dataset: A GE dataset
column: The column for which to get the data type
Returns:
The data type of the specified column
|
Determines the data type of a column by evaluating the success of `expect_column_values_to_be_in_type_list`.
In the case of type Decimal, this data type is returned as NUMERIC, which contains the type lists for both INTs
and FLOATs. | def _get_column_type(self, profile_dataset, column):
"""
Determines the data type of a column by evaluating the success of `expect_column_values_to_be_in_type_list`.
In the case of type Decimal, this data type is returned as NUMERIC, which contains the type lists for both INTs
and FLOATs.
The type_list expectation used here is removed, since it will need to be built once the build_suite function is
actually called. This is because calling build_suite wipes any existing expectations, so expectations called
during the init of the profiler do not persist.
Args:
profile_dataset: A GE dataset
column: The column for which to get the data type
Returns:
The data type of the specified column
"""
# list of types is used to support pandas and sqlalchemy
type_ = None
try:
if (
profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
).success
and profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES))
).success
):
type_ = "NUMERIC"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
).success:
type_ = "INT"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES))
).success:
type_ = "FLOAT"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.STRING_TYPE_NAMES))
).success:
type_ = "STRING"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.BOOLEAN_TYPE_NAMES))
).success:
type_ = "BOOLEAN"
elif profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=sorted(list(ProfilerTypeMapping.DATETIME_TYPE_NAMES))
).success:
type_ = "DATETIME"
else:
type_ = "UNKNOWN"
except NotImplementedError:
type_ = "unknown"
if type_ == "NUMERIC":
profile_dataset.expect_column_values_to_be_in_type_list(
column,
type_list=sorted(list(ProfilerTypeMapping.INT_TYPE_NAMES))
+ sorted(list(ProfilerTypeMapping.FLOAT_TYPE_NAMES)),
)
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_type_list",
kwargs={"column": column},
)
)
return type_ | [
"def",
"_get_column_type",
"(",
"self",
",",
"profile_dataset",
",",
"column",
")",
":",
"# list of types is used to support pandas and sqlalchemy",
"type_",
"=",
"None",
"try",
":",
"if",
"(",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"INT_TYPE_NAMES",
")",
")",
")",
".",
"success",
"and",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"FLOAT_TYPE_NAMES",
")",
")",
")",
".",
"success",
")",
":",
"type_",
"=",
"\"NUMERIC\"",
"elif",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"INT_TYPE_NAMES",
")",
")",
")",
".",
"success",
":",
"type_",
"=",
"\"INT\"",
"elif",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"FLOAT_TYPE_NAMES",
")",
")",
")",
".",
"success",
":",
"type_",
"=",
"\"FLOAT\"",
"elif",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"STRING_TYPE_NAMES",
")",
")",
")",
".",
"success",
":",
"type_",
"=",
"\"STRING\"",
"elif",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"BOOLEAN_TYPE_NAMES",
")",
")",
")",
".",
"success",
":",
"type_",
"=",
"\"BOOLEAN\"",
"elif",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"DATETIME_TYPE_NAMES",
")",
")",
")",
".",
"success",
":",
"type_",
"=",
"\"DATETIME\"",
"else",
":",
"type_",
"=",
"\"UNKNOWN\"",
"except",
"NotImplementedError",
":",
"type_",
"=",
"\"unknown\"",
"if",
"type_",
"==",
"\"NUMERIC\"",
":",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"INT_TYPE_NAMES",
")",
")",
"+",
"sorted",
"(",
"list",
"(",
"ProfilerTypeMapping",
".",
"FLOAT_TYPE_NAMES",
")",
")",
",",
")",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_values_to_be_in_type_list\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
")",
"return",
"type_"
] | [
403,
4
] | [
477,
20
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._add_column_cardinality_to_column_info | (self, profile_dataset, column_name) |
Adds the cardinality of a column to the column_info dictionary on self
Args:
profile_dataset: A GE Dataset
column_name: The name of the column for which to add cardinality
Returns:
The cardinality of the column
|
Adds the cardinality of a column to the column_info dictionary on self
Args:
profile_dataset: A GE Dataset
column_name: The name of the column for which to add cardinality | def _add_column_cardinality_to_column_info(self, profile_dataset, column_name):
"""
Adds the cardinality of a column to the column_info dictionary on self
Args:
profile_dataset: A GE Dataset
column_name: The name of the column for which to add cardinality
Returns:
The cardinality of the column
"""
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
column_cardinality = column_info_entry.get("cardinality")
if not column_cardinality:
column_cardinality = self._get_column_cardinality(
profile_dataset, column_name
)
column_info_entry["cardinality"] = column_cardinality
# remove the expectations
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_unique_value_count_to_be_between",
kwargs={"column": column_name},
)
)
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_proportion_of_unique_values_to_be_between",
kwargs={"column": column_name},
)
)
return column_cardinality | [
"def",
"_add_column_cardinality_to_column_info",
"(",
"self",
",",
"profile_dataset",
",",
"column_name",
")",
":",
"column_info_entry",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column_name",
")",
"if",
"not",
"column_info_entry",
":",
"column_info_entry",
"=",
"{",
"}",
"self",
".",
"column_info",
"[",
"column_name",
"]",
"=",
"column_info_entry",
"column_cardinality",
"=",
"column_info_entry",
".",
"get",
"(",
"\"cardinality\"",
")",
"if",
"not",
"column_cardinality",
":",
"column_cardinality",
"=",
"self",
".",
"_get_column_cardinality",
"(",
"profile_dataset",
",",
"column_name",
")",
"column_info_entry",
"[",
"\"cardinality\"",
"]",
"=",
"column_cardinality",
"# remove the expectations",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_unique_value_count_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column_name",
"}",
",",
")",
")",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_proportion_of_unique_values_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column_name",
"}",
",",
")",
")",
"return",
"column_cardinality"
] | [
479,
4
] | [
513,
33
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._get_column_cardinality | (self, profile_dataset, column) |
Determines the cardinality of a column using the get_basic_column_cardinality method from
OrderedProfilerCardinality
Args:
profile_dataset: A GE Dataset
column: The column for which to get cardinality
Returns:
The cardinality of the specified column
|
Determines the cardinality of a column using the get_basic_column_cardinality method from
OrderedProfilerCardinality
Args:
profile_dataset: A GE Dataset
column: The column for which to get cardinality | def _get_column_cardinality(self, profile_dataset, column):
"""
Determines the cardinality of a column using the get_basic_column_cardinality method from
OrderedProfilerCardinality
Args:
profile_dataset: A GE Dataset
column: The column for which to get cardinality
Returns:
The cardinality of the specified column
"""
num_unique = None
pct_unique = None
try:
num_unique = profile_dataset.expect_column_unique_value_count_to_be_between(
column, None, None
).result["observed_value"]
pct_unique = (
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, None, None
).result["observed_value"]
)
except KeyError: # if observed_value value is not set
logger.error(
"Failed to get cardinality of column {:s} - continuing...".format(
column
)
)
# Previously, if we had 25 possible categories out of 1000 rows, this would comes up as many, because of its
# percentage, so it was tweaked here, but is still experimental.
cardinality = OrderedProfilerCardinality.get_basic_column_cardinality(
num_unique, pct_unique
)
return cardinality.name | [
"def",
"_get_column_cardinality",
"(",
"self",
",",
"profile_dataset",
",",
"column",
")",
":",
"num_unique",
"=",
"None",
"pct_unique",
"=",
"None",
"try",
":",
"num_unique",
"=",
"profile_dataset",
".",
"expect_column_unique_value_count_to_be_between",
"(",
"column",
",",
"None",
",",
"None",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"pct_unique",
"=",
"(",
"profile_dataset",
".",
"expect_column_proportion_of_unique_values_to_be_between",
"(",
"column",
",",
"None",
",",
"None",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
")",
"except",
"KeyError",
":",
"# if observed_value value is not set",
"logger",
".",
"error",
"(",
"\"Failed to get cardinality of column {:s} - continuing...\"",
".",
"format",
"(",
"column",
")",
")",
"# Previously, if we had 25 possible categories out of 1000 rows, this would comes up as many, because of its",
"# percentage, so it was tweaked here, but is still experimental.",
"cardinality",
"=",
"OrderedProfilerCardinality",
".",
"get_basic_column_cardinality",
"(",
"num_unique",
",",
"pct_unique",
")",
"return",
"cardinality",
".",
"name"
] | [
515,
4
] | [
550,
31
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._add_semantic_types_by_column_from_config_to_column_info | (self, column_name) |
Adds the semantic type of a column to the column_info dict on self, for display purposes after suite creation
Args:
column_name: The name of the column
Returns:
A list of semantic_types for a given column
|
Adds the semantic type of a column to the column_info dict on self, for display purposes after suite creation
Args:
column_name: The name of the column | def _add_semantic_types_by_column_from_config_to_column_info(self, column_name):
"""
Adds the semantic type of a column to the column_info dict on self, for display purposes after suite creation
Args:
column_name: The name of the column
Returns:
A list of semantic_types for a given column
"""
column_info_entry = self.column_info.get(column_name)
if not column_info_entry:
column_info_entry = {}
self.column_info[column_name] = column_info_entry
semantic_types = column_info_entry.get("semantic_types")
if not semantic_types:
assert isinstance(
self.semantic_types_dict, dict
), f"The semantic_types dict in the config must be a dictionary, but is currently a {type(self.semantic_types_dict)}. Please reformat."
semantic_types = []
for semantic_type, column_list in self.semantic_types_dict.items():
if column_name in column_list:
semantic_types.append(semantic_type.upper())
column_info_entry["semantic_types"] = semantic_types
if all(
i in column_info_entry.get("semantic_types")
for i in ["BOOLEAN", "VALUE_SET"]
):
logger.info(
f"Column {column_name} has both 'BOOLEAN' and 'VALUE_SET' specified as semantic_types."
f"As these are currently the same in function, the 'VALUE_SET' type will be removed."
)
column_info_entry["semantic_types"].remove("VALUE_SET")
self.column_info[column_name] = column_info_entry
return semantic_types | [
"def",
"_add_semantic_types_by_column_from_config_to_column_info",
"(",
"self",
",",
"column_name",
")",
":",
"column_info_entry",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column_name",
")",
"if",
"not",
"column_info_entry",
":",
"column_info_entry",
"=",
"{",
"}",
"self",
".",
"column_info",
"[",
"column_name",
"]",
"=",
"column_info_entry",
"semantic_types",
"=",
"column_info_entry",
".",
"get",
"(",
"\"semantic_types\"",
")",
"if",
"not",
"semantic_types",
":",
"assert",
"isinstance",
"(",
"self",
".",
"semantic_types_dict",
",",
"dict",
")",
",",
"f\"The semantic_types dict in the config must be a dictionary, but is currently a {type(self.semantic_types_dict)}. Please reformat.\"",
"semantic_types",
"=",
"[",
"]",
"for",
"semantic_type",
",",
"column_list",
"in",
"self",
".",
"semantic_types_dict",
".",
"items",
"(",
")",
":",
"if",
"column_name",
"in",
"column_list",
":",
"semantic_types",
".",
"append",
"(",
"semantic_type",
".",
"upper",
"(",
")",
")",
"column_info_entry",
"[",
"\"semantic_types\"",
"]",
"=",
"semantic_types",
"if",
"all",
"(",
"i",
"in",
"column_info_entry",
".",
"get",
"(",
"\"semantic_types\"",
")",
"for",
"i",
"in",
"[",
"\"BOOLEAN\"",
",",
"\"VALUE_SET\"",
"]",
")",
":",
"logger",
".",
"info",
"(",
"f\"Column {column_name} has both 'BOOLEAN' and 'VALUE_SET' specified as semantic_types.\"",
"f\"As these are currently the same in function, the 'VALUE_SET' type will be removed.\"",
")",
"column_info_entry",
"[",
"\"semantic_types\"",
"]",
".",
"remove",
"(",
"\"VALUE_SET\"",
")",
"self",
".",
"column_info",
"[",
"column_name",
"]",
"=",
"column_info_entry",
"return",
"semantic_types"
] | [
552,
4
] | [
589,
29
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_column_description_metadata | (self, profile_dataset) |
Adds column description metadata to the suite on a Dataset object
Args:
profile_dataset: A GE Dataset
Returns:
An expectation suite with column description metadata
|
Adds column description metadata to the suite on a Dataset object
Args:
profile_dataset: A GE Dataset | def _build_column_description_metadata(self, profile_dataset):
"""
Adds column description metadata to the suite on a Dataset object
Args:
profile_dataset: A GE Dataset
Returns:
An expectation suite with column description metadata
"""
columns = self.all_table_columns
expectation_suite = profile_dataset.get_expectation_suite(
suppress_warnings=True, discard_failed_expectations=False
)
meta_columns = {}
for column in columns:
meta_columns[column] = {"description": ""}
if not expectation_suite.meta:
expectation_suite.meta = {"columns": meta_columns, "notes": {""}}
else:
expectation_suite.meta["columns"] = meta_columns
return expectation_suite | [
"def",
"_build_column_description_metadata",
"(",
"self",
",",
"profile_dataset",
")",
":",
"columns",
"=",
"self",
".",
"all_table_columns",
"expectation_suite",
"=",
"profile_dataset",
".",
"get_expectation_suite",
"(",
"suppress_warnings",
"=",
"True",
",",
"discard_failed_expectations",
"=",
"False",
")",
"meta_columns",
"=",
"{",
"}",
"for",
"column",
"in",
"columns",
":",
"meta_columns",
"[",
"column",
"]",
"=",
"{",
"\"description\"",
":",
"\"\"",
"}",
"if",
"not",
"expectation_suite",
".",
"meta",
":",
"expectation_suite",
".",
"meta",
"=",
"{",
"\"columns\"",
":",
"meta_columns",
",",
"\"notes\"",
":",
"{",
"\"\"",
"}",
"}",
"else",
":",
"expectation_suite",
".",
"meta",
"[",
"\"columns\"",
"]",
"=",
"meta_columns",
"return",
"expectation_suite"
] | [
591,
4
] | [
613,
32
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._display_suite_by_column | (self, suite) |
Displays the expectations of a suite by column, along with the column cardinality, and semantic or data type so
that a user can easily see which expectations were created for which columns
Args:
suite: An ExpectationSuite
Returns:
The ExpectationSuite
|
Displays the expectations of a suite by column, along with the column cardinality, and semantic or data type so
that a user can easily see which expectations were created for which columns
Args:
suite: An ExpectationSuite | def _display_suite_by_column(self, suite):
"""
Displays the expectations of a suite by column, along with the column cardinality, and semantic or data type so
that a user can easily see which expectations were created for which columns
Args:
suite: An ExpectationSuite
Returns:
The ExpectationSuite
"""
expectations = suite.expectations
expectations_by_column = {}
for expectation in expectations:
domain = expectation["kwargs"].get("column") or "table_level_expectations"
if expectations_by_column.get(domain) is None:
expectations_by_column[domain] = [expectation]
else:
expectations_by_column[domain].append(expectation)
if not expectations_by_column:
print("No expectations included in suite.")
else:
print("Creating an expectation suite with the following expectations:\n")
if "table_level_expectations" in expectations_by_column:
table_level_expectations = expectations_by_column.pop(
"table_level_expectations"
)
print("Table-Level Expectations")
for expectation in sorted(
table_level_expectations, key=lambda x: x.expectation_type
):
print(expectation.expectation_type)
if expectations_by_column:
print("\nExpectations by Column")
contains_semantic_types = [
v for v in self.column_info.values() if v.get("semantic_types")
]
for column in sorted(expectations_by_column):
info_column = self.column_info.get(column) or {}
semantic_types = info_column.get("semantic_types") or "not_specified"
type_ = info_column.get("type")
cardinality = info_column.get("cardinality")
if len(contains_semantic_types) > 0:
type_string = f" | Semantic Type: {semantic_types[0] if len(semantic_types)==1 else semantic_types}"
elif type_:
type_string = f" | Column Data Type: {type_}"
else:
type_string = ""
if cardinality:
cardinality_string = f" | Cardinality: {cardinality}"
else:
cardinality_string = ""
column_string = (
f"Column Name: {column}{type_string or ''}{cardinality_string or ''}"
)
print(column_string)
for expectation in sorted(
expectations_by_column.get(column), key=lambda x: x.expectation_type
):
print(expectation.expectation_type)
print("\n")
return True | [
"def",
"_display_suite_by_column",
"(",
"self",
",",
"suite",
")",
":",
"expectations",
"=",
"suite",
".",
"expectations",
"expectations_by_column",
"=",
"{",
"}",
"for",
"expectation",
"in",
"expectations",
":",
"domain",
"=",
"expectation",
"[",
"\"kwargs\"",
"]",
".",
"get",
"(",
"\"column\"",
")",
"or",
"\"table_level_expectations\"",
"if",
"expectations_by_column",
".",
"get",
"(",
"domain",
")",
"is",
"None",
":",
"expectations_by_column",
"[",
"domain",
"]",
"=",
"[",
"expectation",
"]",
"else",
":",
"expectations_by_column",
"[",
"domain",
"]",
".",
"append",
"(",
"expectation",
")",
"if",
"not",
"expectations_by_column",
":",
"print",
"(",
"\"No expectations included in suite.\"",
")",
"else",
":",
"print",
"(",
"\"Creating an expectation suite with the following expectations:\\n\"",
")",
"if",
"\"table_level_expectations\"",
"in",
"expectations_by_column",
":",
"table_level_expectations",
"=",
"expectations_by_column",
".",
"pop",
"(",
"\"table_level_expectations\"",
")",
"print",
"(",
"\"Table-Level Expectations\"",
")",
"for",
"expectation",
"in",
"sorted",
"(",
"table_level_expectations",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"expectation_type",
")",
":",
"print",
"(",
"expectation",
".",
"expectation_type",
")",
"if",
"expectations_by_column",
":",
"print",
"(",
"\"\\nExpectations by Column\"",
")",
"contains_semantic_types",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"column_info",
".",
"values",
"(",
")",
"if",
"v",
".",
"get",
"(",
"\"semantic_types\"",
")",
"]",
"for",
"column",
"in",
"sorted",
"(",
"expectations_by_column",
")",
":",
"info_column",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column",
")",
"or",
"{",
"}",
"semantic_types",
"=",
"info_column",
".",
"get",
"(",
"\"semantic_types\"",
")",
"or",
"\"not_specified\"",
"type_",
"=",
"info_column",
".",
"get",
"(",
"\"type\"",
")",
"cardinality",
"=",
"info_column",
".",
"get",
"(",
"\"cardinality\"",
")",
"if",
"len",
"(",
"contains_semantic_types",
")",
">",
"0",
":",
"type_string",
"=",
"f\" | Semantic Type: {semantic_types[0] if len(semantic_types)==1 else semantic_types}\"",
"elif",
"type_",
":",
"type_string",
"=",
"f\" | Column Data Type: {type_}\"",
"else",
":",
"type_string",
"=",
"\"\"",
"if",
"cardinality",
":",
"cardinality_string",
"=",
"f\" | Cardinality: {cardinality}\"",
"else",
":",
"cardinality_string",
"=",
"\"\"",
"column_string",
"=",
"(",
"f\"Column Name: {column}{type_string or ''}{cardinality_string or ''}\"",
")",
"print",
"(",
"column_string",
")",
"for",
"expectation",
"in",
"sorted",
"(",
"expectations_by_column",
".",
"get",
"(",
"column",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"expectation_type",
")",
":",
"print",
"(",
"expectation",
".",
"expectation_type",
")",
"print",
"(",
"\"\\n\"",
")",
"return",
"True"
] | [
615,
4
] | [
685,
19
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_value_set | (self, profile_dataset, column, **kwargs) |
Adds a value_set expectation for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add an expectation
**kwargs:
Returns:
The GE Dataset
|
Adds a value_set expectation for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add an expectation
**kwargs: | def _build_expectations_value_set(self, profile_dataset, column, **kwargs):
"""
Adds a value_set expectation for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add an expectation
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_be_in_set" not in self.excluded_expectations:
value_set = profile_dataset.expect_column_distinct_values_to_be_in_set(
column, value_set=None, result_format="SUMMARY"
).result["observed_value"]
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_distinct_values_to_be_in_set",
kwargs={"column": column},
),
match_type="domain",
)
profile_dataset.expect_column_values_to_be_in_set(
column, value_set=value_set
)
return profile_dataset | [
"def",
"_build_expectations_value_set",
"(",
"self",
",",
"profile_dataset",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"expect_column_values_to_be_in_set\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"value_set",
"=",
"profile_dataset",
".",
"expect_column_distinct_values_to_be_in_set",
"(",
"column",
",",
"value_set",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_distinct_values_to_be_in_set\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"profile_dataset",
".",
"expect_column_values_to_be_in_set",
"(",
"column",
",",
"value_set",
"=",
"value_set",
")",
"return",
"profile_dataset"
] | [
687,
4
] | [
714,
30
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_numeric | (self, profile_dataset, column, **kwargs) |
Adds a set of numeric expectations for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
|
Adds a set of numeric expectations for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs: | def _build_expectations_numeric(self, profile_dataset, column, **kwargs):
"""
Adds a set of numeric expectations for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
"""
# min
if "expect_column_min_to_be_between" not in self.excluded_expectations:
observed_min = profile_dataset.expect_column_min_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_min):
profile_dataset.expect_column_min_to_be_between(
column,
min_value=observed_min,
max_value=observed_min,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_min_to_be_between because observed value is nan: {observed_min}"
)
# max
if "expect_column_max_to_be_between" not in self.excluded_expectations:
observed_max = profile_dataset.expect_column_max_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_max):
profile_dataset.expect_column_max_to_be_between(
column,
min_value=observed_max,
max_value=observed_max,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_max_to_be_between because observed value is nan: {observed_max}"
)
# mean
if "expect_column_mean_to_be_between" not in self.excluded_expectations:
observed_mean = profile_dataset.expect_column_mean_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_mean):
profile_dataset.expect_column_mean_to_be_between(
column,
min_value=observed_mean,
max_value=observed_mean,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_mean_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_mean_to_be_between because observed value is nan: {observed_mean}"
)
# median
if "expect_column_median_to_be_between" not in self.excluded_expectations:
observed_median = profile_dataset.expect_column_median_to_be_between(
column, min_value=None, max_value=None, result_format="SUMMARY"
).result["observed_value"]
if not self._is_nan(observed_median):
profile_dataset.expect_column_median_to_be_between(
column,
min_value=observed_median,
max_value=observed_median,
)
else:
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_median_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_median_to_be_between because observed value is nan: {observed_median}"
)
if (
"expect_column_quantile_values_to_be_between"
not in self.excluded_expectations
):
if isinstance(profile_dataset, Dataset):
if isinstance(profile_dataset, PandasDataset):
allow_relative_error = "lower"
else:
allow_relative_error = (
profile_dataset.attempt_allowing_relative_error()
)
elif isinstance(profile_dataset, Validator):
if isinstance(profile_dataset.execution_engine, PandasExecutionEngine):
allow_relative_error = "lower"
if isinstance(profile_dataset.execution_engine, SparkDFExecutionEngine):
allow_relative_error = 0.0
if isinstance(
profile_dataset.execution_engine, SqlAlchemyExecutionEngine
):
allow_relative_error = attempt_allowing_relative_error(
profile_dataset.execution_engine.engine.dialect
)
quantile_result = profile_dataset.expect_column_quantile_values_to_be_between(
column,
quantile_ranges={
"quantiles": [0.05, 0.25, 0.5, 0.75, 0.95],
"value_ranges": [
[None, None],
[None, None],
[None, None],
[None, None],
[None, None],
],
},
# TODO: <Alex>ALEX -- Tal, could you please fix the issue in the next line?</Alex>
allow_relative_error=allow_relative_error,
result_format="SUMMARY",
)
if quantile_result.exception_info and (
quantile_result.exception_info["exception_traceback"]
or quantile_result.exception_info["exception_message"]
):
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_quantile_values_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(quantile_result.exception_info["exception_traceback"])
logger.debug(quantile_result.exception_info["exception_message"])
else:
profile_dataset.expect_column_quantile_values_to_be_between(
column,
quantile_ranges={
"quantiles": quantile_result.result["observed_value"][
"quantiles"
],
"value_ranges": [
[v, v]
for v in quantile_result.result["observed_value"]["values"]
],
},
allow_relative_error=allow_relative_error,
)
return profile_dataset | [
"def",
"_build_expectations_numeric",
"(",
"self",
",",
"profile_dataset",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"# min",
"if",
"\"expect_column_min_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"observed_min",
"=",
"profile_dataset",
".",
"expect_column_min_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"not",
"self",
".",
"_is_nan",
"(",
"observed_min",
")",
":",
"profile_dataset",
".",
"expect_column_min_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"observed_min",
",",
"max_value",
"=",
"observed_min",
",",
")",
"else",
":",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_min_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"f\"Skipping expect_column_min_to_be_between because observed value is nan: {observed_min}\"",
")",
"# max",
"if",
"\"expect_column_max_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"observed_max",
"=",
"profile_dataset",
".",
"expect_column_max_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"not",
"self",
".",
"_is_nan",
"(",
"observed_max",
")",
":",
"profile_dataset",
".",
"expect_column_max_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"observed_max",
",",
"max_value",
"=",
"observed_max",
",",
")",
"else",
":",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_max_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"f\"Skipping expect_column_max_to_be_between because observed value is nan: {observed_max}\"",
")",
"# mean",
"if",
"\"expect_column_mean_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"observed_mean",
"=",
"profile_dataset",
".",
"expect_column_mean_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"not",
"self",
".",
"_is_nan",
"(",
"observed_mean",
")",
":",
"profile_dataset",
".",
"expect_column_mean_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"observed_mean",
",",
"max_value",
"=",
"observed_mean",
",",
")",
"else",
":",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_mean_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"f\"Skipping expect_column_mean_to_be_between because observed value is nan: {observed_mean}\"",
")",
"# median",
"if",
"\"expect_column_median_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"observed_median",
"=",
"profile_dataset",
".",
"expect_column_median_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"not",
"self",
".",
"_is_nan",
"(",
"observed_median",
")",
":",
"profile_dataset",
".",
"expect_column_median_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"observed_median",
",",
"max_value",
"=",
"observed_median",
",",
")",
"else",
":",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_median_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"f\"Skipping expect_column_median_to_be_between because observed value is nan: {observed_median}\"",
")",
"if",
"(",
"\"expect_column_quantile_values_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"if",
"isinstance",
"(",
"profile_dataset",
",",
"Dataset",
")",
":",
"if",
"isinstance",
"(",
"profile_dataset",
",",
"PandasDataset",
")",
":",
"allow_relative_error",
"=",
"\"lower\"",
"else",
":",
"allow_relative_error",
"=",
"(",
"profile_dataset",
".",
"attempt_allowing_relative_error",
"(",
")",
")",
"elif",
"isinstance",
"(",
"profile_dataset",
",",
"Validator",
")",
":",
"if",
"isinstance",
"(",
"profile_dataset",
".",
"execution_engine",
",",
"PandasExecutionEngine",
")",
":",
"allow_relative_error",
"=",
"\"lower\"",
"if",
"isinstance",
"(",
"profile_dataset",
".",
"execution_engine",
",",
"SparkDFExecutionEngine",
")",
":",
"allow_relative_error",
"=",
"0.0",
"if",
"isinstance",
"(",
"profile_dataset",
".",
"execution_engine",
",",
"SqlAlchemyExecutionEngine",
")",
":",
"allow_relative_error",
"=",
"attempt_allowing_relative_error",
"(",
"profile_dataset",
".",
"execution_engine",
".",
"engine",
".",
"dialect",
")",
"quantile_result",
"=",
"profile_dataset",
".",
"expect_column_quantile_values_to_be_between",
"(",
"column",
",",
"quantile_ranges",
"=",
"{",
"\"quantiles\"",
":",
"[",
"0.05",
",",
"0.25",
",",
"0.5",
",",
"0.75",
",",
"0.95",
"]",
",",
"\"value_ranges\"",
":",
"[",
"[",
"None",
",",
"None",
"]",
",",
"[",
"None",
",",
"None",
"]",
",",
"[",
"None",
",",
"None",
"]",
",",
"[",
"None",
",",
"None",
"]",
",",
"[",
"None",
",",
"None",
"]",
",",
"]",
",",
"}",
",",
"# TODO: <Alex>ALEX -- Tal, could you please fix the issue in the next line?</Alex>",
"allow_relative_error",
"=",
"allow_relative_error",
",",
"result_format",
"=",
"\"SUMMARY\"",
",",
")",
"if",
"quantile_result",
".",
"exception_info",
"and",
"(",
"quantile_result",
".",
"exception_info",
"[",
"\"exception_traceback\"",
"]",
"or",
"quantile_result",
".",
"exception_info",
"[",
"\"exception_message\"",
"]",
")",
":",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_quantile_values_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"quantile_result",
".",
"exception_info",
"[",
"\"exception_traceback\"",
"]",
")",
"logger",
".",
"debug",
"(",
"quantile_result",
".",
"exception_info",
"[",
"\"exception_message\"",
"]",
")",
"else",
":",
"profile_dataset",
".",
"expect_column_quantile_values_to_be_between",
"(",
"column",
",",
"quantile_ranges",
"=",
"{",
"\"quantiles\"",
":",
"quantile_result",
".",
"result",
"[",
"\"observed_value\"",
"]",
"[",
"\"quantiles\"",
"]",
",",
"\"value_ranges\"",
":",
"[",
"[",
"v",
",",
"v",
"]",
"for",
"v",
"in",
"quantile_result",
".",
"result",
"[",
"\"observed_value\"",
"]",
"[",
"\"values\"",
"]",
"]",
",",
"}",
",",
"allow_relative_error",
"=",
"allow_relative_error",
",",
")",
"return",
"profile_dataset"
] | [
716,
4
] | [
893,
30
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_primary_or_compound_key | (
self, profile_dataset, column_list, **kwargs
) |
Adds a uniqueness expectation for a given column or set of columns
Args:
profile_dataset: A GE Dataset
column_list: A list containing one or more columns for which to add a uniqueness expectation
**kwargs:
Returns:
The GE Dataset
|
Adds a uniqueness expectation for a given column or set of columns
Args:
profile_dataset: A GE Dataset
column_list: A list containing one or more columns for which to add a uniqueness expectation
**kwargs: | def _build_expectations_primary_or_compound_key(
self, profile_dataset, column_list, **kwargs
):
"""
Adds a uniqueness expectation for a given column or set of columns
Args:
profile_dataset: A GE Dataset
column_list: A list containing one or more columns for which to add a uniqueness expectation
**kwargs:
Returns:
The GE Dataset
"""
# uniqueness
if (
len(column_list) > 1
and "expect_compound_columns_to_be_unique" not in self.excluded_expectations
):
if isinstance(profile_dataset, Validator) and not hasattr(
profile_dataset, "expect_compound_columns_to_be_unique"
):
# TODO: Remove this upon implementation of this expectation for V3
logger.warning(
"expect_compound_columns_to_be_unique is not currently available in the V3 (Batch Request) API. Specifying a compound key will not add any expectations. This will be updated when that expectation becomes available."
)
return profile_dataset
else:
profile_dataset.expect_compound_columns_to_be_unique(column_list)
elif len(column_list) < 1:
raise ValueError(
"When specifying a primary or compound key, column_list must not be empty"
)
else:
[column] = column_list
if "expect_column_values_to_be_unique" not in self.excluded_expectations:
profile_dataset.expect_column_values_to_be_unique(column)
return profile_dataset | [
"def",
"_build_expectations_primary_or_compound_key",
"(",
"self",
",",
"profile_dataset",
",",
"column_list",
",",
"*",
"*",
"kwargs",
")",
":",
"# uniqueness",
"if",
"(",
"len",
"(",
"column_list",
")",
">",
"1",
"and",
"\"expect_compound_columns_to_be_unique\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"if",
"isinstance",
"(",
"profile_dataset",
",",
"Validator",
")",
"and",
"not",
"hasattr",
"(",
"profile_dataset",
",",
"\"expect_compound_columns_to_be_unique\"",
")",
":",
"# TODO: Remove this upon implementation of this expectation for V3",
"logger",
".",
"warning",
"(",
"\"expect_compound_columns_to_be_unique is not currently available in the V3 (Batch Request) API. Specifying a compound key will not add any expectations. This will be updated when that expectation becomes available.\"",
")",
"return",
"profile_dataset",
"else",
":",
"profile_dataset",
".",
"expect_compound_columns_to_be_unique",
"(",
"column_list",
")",
"elif",
"len",
"(",
"column_list",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"When specifying a primary or compound key, column_list must not be empty\"",
")",
"else",
":",
"[",
"column",
"]",
"=",
"column_list",
"if",
"\"expect_column_values_to_be_unique\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"profile_dataset",
".",
"expect_column_values_to_be_unique",
"(",
"column",
")",
"return",
"profile_dataset"
] | [
895,
4
] | [
931,
30
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_string | (self, profile_dataset, column, **kwargs) |
Adds a set of string expectations for a given column. Currently does not do anything.
With the 0.12 API there isn't a quick way to introspect for value_lengths - if we did that, we could build a
potentially useful value_lengths expectation here.
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
|
Adds a set of string expectations for a given column. Currently does not do anything.
With the 0.12 API there isn't a quick way to introspect for value_lengths - if we did that, we could build a
potentially useful value_lengths expectation here.
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs: | def _build_expectations_string(self, profile_dataset, column, **kwargs):
"""
Adds a set of string expectations for a given column. Currently does not do anything.
With the 0.12 API there isn't a quick way to introspect for value_lengths - if we did that, we could build a
potentially useful value_lengths expectation here.
Args:
profile_dataset: A GE Dataset
column: The column for which to add expectations
**kwargs:
Returns:
The GE Dataset
"""
if (
"expect_column_value_lengths_to_be_between"
not in self.excluded_expectations
):
pass
return profile_dataset | [
"def",
"_build_expectations_string",
"(",
"self",
",",
"profile_dataset",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"\"expect_column_value_lengths_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"pass",
"return",
"profile_dataset"
] | [
933,
4
] | [
953,
30
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_datetime | (self, profile_dataset, column, **kwargs) |
Adds `expect_column_values_to_be_between` for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectation
**kwargs:
Returns:
The GE Dataset
|
Adds `expect_column_values_to_be_between` for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectation
**kwargs: | def _build_expectations_datetime(self, profile_dataset, column, **kwargs):
"""
Adds `expect_column_values_to_be_between` for a given column
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectation
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_be_between" not in self.excluded_expectations:
min_value = profile_dataset.expect_column_min_to_be_between(
column,
min_value=None,
max_value=None,
result_format="SUMMARY",
parse_strings_as_datetimes=True,
).result["observed_value"]
if min_value is not None:
try:
min_value = parse(min_value)
except TypeError:
pass
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_min_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
max_value = profile_dataset.expect_column_max_to_be_between(
column,
min_value=None,
max_value=None,
result_format="SUMMARY",
parse_strings_as_datetimes=True,
).result["observed_value"]
if max_value is not None:
try:
max_value = parse(max_value)
except TypeError:
pass
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
if min_value is not None or max_value is not None:
profile_dataset.expect_column_values_to_be_between(
column,
min_value=min_value,
max_value=max_value,
parse_strings_as_datetimes=True,
)
return profile_dataset | [
"def",
"_build_expectations_datetime",
"(",
"self",
",",
"profile_dataset",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"expect_column_values_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"min_value",
"=",
"profile_dataset",
".",
"expect_column_min_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
",",
"parse_strings_as_datetimes",
"=",
"True",
",",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"min_value",
"is",
"not",
"None",
":",
"try",
":",
"min_value",
"=",
"parse",
"(",
"min_value",
")",
"except",
"TypeError",
":",
"pass",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_min_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"max_value",
"=",
"profile_dataset",
".",
"expect_column_max_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"None",
",",
"max_value",
"=",
"None",
",",
"result_format",
"=",
"\"SUMMARY\"",
",",
"parse_strings_as_datetimes",
"=",
"True",
",",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"if",
"max_value",
"is",
"not",
"None",
":",
"try",
":",
"max_value",
"=",
"parse",
"(",
"max_value",
")",
"except",
"TypeError",
":",
"pass",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_max_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"if",
"min_value",
"is",
"not",
"None",
"or",
"max_value",
"is",
"not",
"None",
":",
"profile_dataset",
".",
"expect_column_values_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"min_value",
",",
"max_value",
"=",
"max_value",
",",
"parse_strings_as_datetimes",
"=",
"True",
",",
")",
"return",
"profile_dataset"
] | [
955,
4
] | [
1017,
30
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_for_all_column_types | (
self, profile_dataset, column, **kwargs
) |
Adds these expectations for all included columns irrespective of type. Includes:
- `expect_column_values_to_not_be_null` (or `expect_column_values_to_be_null`)
- `expect_column_proportion_of_unique_values_to_be_between`
- `expect_column_values_to_be_in_type_list`
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectations
**kwargs:
Returns:
The GE Dataset
|
Adds these expectations for all included columns irrespective of type. Includes:
- `expect_column_values_to_not_be_null` (or `expect_column_values_to_be_null`)
- `expect_column_proportion_of_unique_values_to_be_between`
- `expect_column_values_to_be_in_type_list`
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectations
**kwargs: | def _build_expectations_for_all_column_types(
self, profile_dataset, column, **kwargs
):
"""
Adds these expectations for all included columns irrespective of type. Includes:
- `expect_column_values_to_not_be_null` (or `expect_column_values_to_be_null`)
- `expect_column_proportion_of_unique_values_to_be_between`
- `expect_column_values_to_be_in_type_list`
Args:
profile_dataset: A GE Dataset
column: The column for which to add the expectations
**kwargs:
Returns:
The GE Dataset
"""
if "expect_column_values_to_not_be_null" not in self.excluded_expectations:
not_null_result = profile_dataset.expect_column_values_to_not_be_null(
column
)
if not not_null_result.success:
unexpected_percent = float(not_null_result.result["unexpected_percent"])
if unexpected_percent >= 50 and not self.not_null_only:
potential_mostly_value = math.floor(unexpected_percent) / 100.0
# A safe_mostly_value of 0.001 gives us a rough way of ensuring that we don't wind up with a mostly
# value of 0 when we round
safe_mostly_value = max(0.001, potential_mostly_value)
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": column},
),
match_type="domain",
)
if (
"expect_column_values_to_be_null"
not in self.excluded_expectations
):
profile_dataset.expect_column_values_to_be_null(
column, mostly=safe_mostly_value
)
else:
potential_mostly_value = (
100.0 - math.ceil(unexpected_percent)
) / 100.0
# A safe_mostly_value of 0.001 gives us a rough way of ensuring that we don't wind up with a mostly
# value of 0 when we round
safe_mostly_value = max(0.001, potential_mostly_value)
profile_dataset.expect_column_values_to_not_be_null(
column, mostly=safe_mostly_value
)
if (
"expect_column_proportion_of_unique_values_to_be_between"
not in self.excluded_expectations
):
pct_unique = (
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, None, None
).result["observed_value"]
)
if not self._is_nan(pct_unique):
profile_dataset.expect_column_proportion_of_unique_values_to_be_between(
column, min_value=pct_unique, max_value=pct_unique
)
else:
# noinspection PyProtectedMember
profile_dataset._expectation_suite.remove_expectation(
ExpectationConfiguration(
expectation_type="expect_column_proportion_of_unique_values_to_be_between",
kwargs={"column": column},
),
match_type="domain",
)
logger.debug(
f"Skipping expect_column_proportion_of_unique_values_to_be_between because observed value is nan: {pct_unique}"
)
if "expect_column_values_to_be_in_type_list" not in self.excluded_expectations:
col_type = self.column_info.get(column).get("type")
if col_type != "UNKNOWN":
type_list = profiler_data_types_with_mapping.get(col_type)
profile_dataset.expect_column_values_to_be_in_type_list(
column, type_list=type_list
)
else:
logger.info(
f"Column type for column {column} is unknown. "
f"Skipping expect_column_values_to_be_in_type_list for this column."
) | [
"def",
"_build_expectations_for_all_column_types",
"(",
"self",
",",
"profile_dataset",
",",
"column",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"\"expect_column_values_to_not_be_null\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"not_null_result",
"=",
"profile_dataset",
".",
"expect_column_values_to_not_be_null",
"(",
"column",
")",
"if",
"not",
"not_null_result",
".",
"success",
":",
"unexpected_percent",
"=",
"float",
"(",
"not_null_result",
".",
"result",
"[",
"\"unexpected_percent\"",
"]",
")",
"if",
"unexpected_percent",
">=",
"50",
"and",
"not",
"self",
".",
"not_null_only",
":",
"potential_mostly_value",
"=",
"math",
".",
"floor",
"(",
"unexpected_percent",
")",
"/",
"100.0",
"# A safe_mostly_value of 0.001 gives us a rough way of ensuring that we don't wind up with a mostly",
"# value of 0 when we round",
"safe_mostly_value",
"=",
"max",
"(",
"0.001",
",",
"potential_mostly_value",
")",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_values_to_not_be_null\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"if",
"(",
"\"expect_column_values_to_be_null\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"profile_dataset",
".",
"expect_column_values_to_be_null",
"(",
"column",
",",
"mostly",
"=",
"safe_mostly_value",
")",
"else",
":",
"potential_mostly_value",
"=",
"(",
"100.0",
"-",
"math",
".",
"ceil",
"(",
"unexpected_percent",
")",
")",
"/",
"100.0",
"# A safe_mostly_value of 0.001 gives us a rough way of ensuring that we don't wind up with a mostly",
"# value of 0 when we round",
"safe_mostly_value",
"=",
"max",
"(",
"0.001",
",",
"potential_mostly_value",
")",
"profile_dataset",
".",
"expect_column_values_to_not_be_null",
"(",
"column",
",",
"mostly",
"=",
"safe_mostly_value",
")",
"if",
"(",
"\"expect_column_proportion_of_unique_values_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"pct_unique",
"=",
"(",
"profile_dataset",
".",
"expect_column_proportion_of_unique_values_to_be_between",
"(",
"column",
",",
"None",
",",
"None",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
")",
"if",
"not",
"self",
".",
"_is_nan",
"(",
"pct_unique",
")",
":",
"profile_dataset",
".",
"expect_column_proportion_of_unique_values_to_be_between",
"(",
"column",
",",
"min_value",
"=",
"pct_unique",
",",
"max_value",
"=",
"pct_unique",
")",
"else",
":",
"# noinspection PyProtectedMember",
"profile_dataset",
".",
"_expectation_suite",
".",
"remove_expectation",
"(",
"ExpectationConfiguration",
"(",
"expectation_type",
"=",
"\"expect_column_proportion_of_unique_values_to_be_between\"",
",",
"kwargs",
"=",
"{",
"\"column\"",
":",
"column",
"}",
",",
")",
",",
"match_type",
"=",
"\"domain\"",
",",
")",
"logger",
".",
"debug",
"(",
"f\"Skipping expect_column_proportion_of_unique_values_to_be_between because observed value is nan: {pct_unique}\"",
")",
"if",
"\"expect_column_values_to_be_in_type_list\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"col_type",
"=",
"self",
".",
"column_info",
".",
"get",
"(",
"column",
")",
".",
"get",
"(",
"\"type\"",
")",
"if",
"col_type",
"!=",
"\"UNKNOWN\"",
":",
"type_list",
"=",
"profiler_data_types_with_mapping",
".",
"get",
"(",
"col_type",
")",
"profile_dataset",
".",
"expect_column_values_to_be_in_type_list",
"(",
"column",
",",
"type_list",
"=",
"type_list",
")",
"else",
":",
"logger",
".",
"info",
"(",
"f\"Column type for column {column} is unknown. \"",
"f\"Skipping expect_column_values_to_be_in_type_list for this column.\"",
")"
] | [
1019,
4
] | [
1110,
17
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._build_expectations_table | (self, profile_dataset, **kwargs) |
Adds two table level expectations to the dataset
Args:
profile_dataset: A GE Dataset
**kwargs:
Returns:
The GE Dataset
|
Adds two table level expectations to the dataset
Args:
profile_dataset: A GE Dataset
**kwargs: | def _build_expectations_table(self, profile_dataset, **kwargs):
"""
Adds two table level expectations to the dataset
Args:
profile_dataset: A GE Dataset
**kwargs:
Returns:
The GE Dataset
"""
if (
"expect_table_columns_to_match_ordered_list"
not in self.excluded_expectations
):
columns = self.all_table_columns
profile_dataset.expect_table_columns_to_match_ordered_list(columns)
if "expect_table_row_count_to_be_between" not in self.excluded_expectations:
row_count = profile_dataset.expect_table_row_count_to_be_between(
min_value=0, max_value=None
).result["observed_value"]
min_value = max(0, int(row_count))
max_value = int(row_count)
profile_dataset.expect_table_row_count_to_be_between(
min_value=min_value, max_value=max_value
) | [
"def",
"_build_expectations_table",
"(",
"self",
",",
"profile_dataset",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"(",
"\"expect_table_columns_to_match_ordered_list\"",
"not",
"in",
"self",
".",
"excluded_expectations",
")",
":",
"columns",
"=",
"self",
".",
"all_table_columns",
"profile_dataset",
".",
"expect_table_columns_to_match_ordered_list",
"(",
"columns",
")",
"if",
"\"expect_table_row_count_to_be_between\"",
"not",
"in",
"self",
".",
"excluded_expectations",
":",
"row_count",
"=",
"profile_dataset",
".",
"expect_table_row_count_to_be_between",
"(",
"min_value",
"=",
"0",
",",
"max_value",
"=",
"None",
")",
".",
"result",
"[",
"\"observed_value\"",
"]",
"min_value",
"=",
"max",
"(",
"0",
",",
"int",
"(",
"row_count",
")",
")",
"max_value",
"=",
"int",
"(",
"row_count",
")",
"profile_dataset",
".",
"expect_table_row_count_to_be_between",
"(",
"min_value",
"=",
"min_value",
",",
"max_value",
"=",
"max_value",
")"
] | [
1112,
4
] | [
1139,
13
] | python | en | ['en', 'error', 'th'] | False |
UserConfigurableProfiler._is_nan | (self, value) |
If value is an array, test element-wise for NaN and return result as a boolean array.
If value is a scalar, return boolean.
Args:
value: The value to test
Returns:
The results of the test
|
If value is an array, test element-wise for NaN and return result as a boolean array.
If value is a scalar, return boolean.
Args:
value: The value to test | def _is_nan(self, value):
"""
If value is an array, test element-wise for NaN and return result as a boolean array.
If value is a scalar, return boolean.
Args:
value: The value to test
Returns:
The results of the test
"""
try:
return np.isnan(value)
except TypeError:
return True | [
"def",
"_is_nan",
"(",
"self",
",",
"value",
")",
":",
"try",
":",
"return",
"np",
".",
"isnan",
"(",
"value",
")",
"except",
"TypeError",
":",
"return",
"True"
] | [
1141,
4
] | [
1154,
23
] | python | en | ['en', 'error', 'th'] | False |
ExpectTableColumnCountToBeBetween.validate_configuration | (self, configuration: Optional[ExpectationConfiguration]) |
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
|
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation. | def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
True if the configuration has been validated successfully. Otherwise, raises an exception
"""
super().validate_configuration(configuration)
self.validate_metric_value_between_configuration(configuration=configuration) | [
"def",
"validate_configuration",
"(",
"self",
",",
"configuration",
":",
"Optional",
"[",
"ExpectationConfiguration",
"]",
")",
":",
"super",
"(",
")",
".",
"validate_configuration",
"(",
"configuration",
")",
"self",
".",
"validate_metric_value_between_configuration",
"(",
"configuration",
"=",
"configuration",
")"
] | [
94,
4
] | [
106,
85
] | python | en | ['en', 'error', 'th'] | False |
is_valid_partition_object | (partition_object) | Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| def is_valid_partition_object(partition_object):
"""Tests whether a given object is a valid continuous or categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
return is_valid_continuous_partition_object(
partition_object
) or is_valid_categorical_partition_object(partition_object) | [
"def",
"is_valid_partition_object",
"(",
"partition_object",
")",
":",
"return",
"is_valid_continuous_partition_object",
"(",
"partition_object",
")",
"or",
"is_valid_categorical_partition_object",
"(",
"partition_object",
")"
] | [
21,
0
] | [
28,
64
] | python | en | ['en', 'en', 'en'] | True |
is_valid_categorical_partition_object | (partition_object) | Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
| def is_valid_categorical_partition_object(partition_object):
"""Tests whether a given object is a valid categorical partition object.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (
partition_object is None
or ("weights" not in partition_object)
or ("values" not in partition_object)
):
return False
# Expect the same number of values as weights; weights should sum to one
return len(partition_object["values"]) == len(
partition_object["weights"]
) and np.allclose(np.sum(partition_object["weights"]), 1) | [
"def",
"is_valid_categorical_partition_object",
"(",
"partition_object",
")",
":",
"if",
"(",
"partition_object",
"is",
"None",
"or",
"(",
"\"weights\"",
"not",
"in",
"partition_object",
")",
"or",
"(",
"\"values\"",
"not",
"in",
"partition_object",
")",
")",
":",
"return",
"False",
"# Expect the same number of values as weights; weights should sum to one",
"return",
"len",
"(",
"partition_object",
"[",
"\"values\"",
"]",
")",
"==",
"len",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
"and",
"np",
".",
"allclose",
"(",
"np",
".",
"sum",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
",",
"1",
")"
] | [
31,
0
] | [
45,
61
] | python | en | ['en', 'en', 'en'] | True |
is_valid_continuous_partition_object | (partition_object) | Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
| Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`. | def is_valid_continuous_partition_object(partition_object):
"""Tests whether a given object is a valid continuous partition object. See :ref:`partition_object`.
:param partition_object: The partition_object to evaluate
:return: Boolean
"""
if (
(partition_object is None)
or ("weights" not in partition_object)
or ("bins" not in partition_object)
):
return False
if "tail_weights" in partition_object:
if len(partition_object["tail_weights"]) != 2:
return False
comb_weights = partition_object["tail_weights"] + partition_object["weights"]
else:
comb_weights = partition_object["weights"]
## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects
# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):
# return False
# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one
return (
(len(partition_object["bins"]) == (len(partition_object["weights"]) + 1))
and np.all(np.diff(partition_object["bins"]) > 0)
and np.allclose(np.sum(comb_weights), 1.0)
) | [
"def",
"is_valid_continuous_partition_object",
"(",
"partition_object",
")",
":",
"if",
"(",
"(",
"partition_object",
"is",
"None",
")",
"or",
"(",
"\"weights\"",
"not",
"in",
"partition_object",
")",
"or",
"(",
"\"bins\"",
"not",
"in",
"partition_object",
")",
")",
":",
"return",
"False",
"if",
"\"tail_weights\"",
"in",
"partition_object",
":",
"if",
"len",
"(",
"partition_object",
"[",
"\"tail_weights\"",
"]",
")",
"!=",
"2",
":",
"return",
"False",
"comb_weights",
"=",
"partition_object",
"[",
"\"tail_weights\"",
"]",
"+",
"partition_object",
"[",
"\"weights\"",
"]",
"else",
":",
"comb_weights",
"=",
"partition_object",
"[",
"\"weights\"",
"]",
"## TODO: Consider adding this check to migrate to the tail_weights structure of partition objects",
"# if (partition_object['bins'][0] == -np.inf) or (partition_object['bins'][-1] == np.inf):",
"# return False",
"# Expect one more bin edge than weight; all bin edges should be monotonically increasing; weights should sum to one",
"return",
"(",
"(",
"len",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
"==",
"(",
"len",
"(",
"partition_object",
"[",
"\"weights\"",
"]",
")",
"+",
"1",
")",
")",
"and",
"np",
".",
"all",
"(",
"np",
".",
"diff",
"(",
"partition_object",
"[",
"\"bins\"",
"]",
")",
">",
"0",
")",
"and",
"np",
".",
"allclose",
"(",
"np",
".",
"sum",
"(",
"comb_weights",
")",
",",
"1.0",
")",
")"
] | [
48,
0
] | [
77,
5
] | python | en | ['en', 'en', 'en'] | True |
build_continuous_partition_object | (
execution_engine, domain_kwargs, bins="auto", n_bins=10, allow_relative_error=False
) | Convenience method for building a partition object on continuous data from a dataset and column
Args:
execution_engine (ExecutionEngine): the execution engine with which to compute the partition
domain_kwargs (dict): The domain kwargs describing the domain for which to compute the partition
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition object on continuous data from a dataset and column | def build_continuous_partition_object(
execution_engine, domain_kwargs, bins="auto", n_bins=10, allow_relative_error=False
):
"""Convenience method for building a partition object on continuous data from a dataset and column
Args:
execution_engine (ExecutionEngine): the execution engine with which to compute the partition
domain_kwargs (dict): The domain kwargs describing the domain for which to compute the partition
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto'
(for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
allow_relative_error: passed to get_column_quantiles, set to False for only precise
values, True to allow approximate values on systems with only binary choice (e.g. Redshift), and to a
value between zero and one for systems that allow specification of relative error (e.g.
SparkDFDataset).
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
See :ref:`partition_object`.
"""
partition_metric_configuration = MetricConfiguration(
"column.partition",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"bins": bins,
"n_bins": n_bins,
"allow_relative_error": allow_relative_error,
},
)
bins = execution_engine.resolve_metrics([partition_metric_configuration])[
partition_metric_configuration.id
]
if isinstance(bins, np.ndarray):
bins = bins.tolist()
else:
bins = list(bins)
hist_metric_configuration = MetricConfiguration(
"column.histogram",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"bins": tuple(bins),
},
)
nonnull_configuration = MetricConfiguration(
"column_values.nonnull.count",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"bins": tuple(bins),
},
)
metrics = execution_engine.resolve_metrics(
(hist_metric_configuration, nonnull_configuration)
)
weights = list(
np.array(metrics[hist_metric_configuration.id])
/ metrics[nonnull_configuration.id]
)
tail_weights = (1 - sum(weights)) / 2
partition_object = {
"bins": bins,
"weights": weights,
"tail_weights": [tail_weights, tail_weights],
}
return partition_object | [
"def",
"build_continuous_partition_object",
"(",
"execution_engine",
",",
"domain_kwargs",
",",
"bins",
"=",
"\"auto\"",
",",
"n_bins",
"=",
"10",
",",
"allow_relative_error",
"=",
"False",
")",
":",
"partition_metric_configuration",
"=",
"MetricConfiguration",
"(",
"\"column.partition\"",
",",
"metric_domain_kwargs",
"=",
"domain_kwargs",
",",
"metric_value_kwargs",
"=",
"{",
"\"bins\"",
":",
"bins",
",",
"\"n_bins\"",
":",
"n_bins",
",",
"\"allow_relative_error\"",
":",
"allow_relative_error",
",",
"}",
",",
")",
"bins",
"=",
"execution_engine",
".",
"resolve_metrics",
"(",
"[",
"partition_metric_configuration",
"]",
")",
"[",
"partition_metric_configuration",
".",
"id",
"]",
"if",
"isinstance",
"(",
"bins",
",",
"np",
".",
"ndarray",
")",
":",
"bins",
"=",
"bins",
".",
"tolist",
"(",
")",
"else",
":",
"bins",
"=",
"list",
"(",
"bins",
")",
"hist_metric_configuration",
"=",
"MetricConfiguration",
"(",
"\"column.histogram\"",
",",
"metric_domain_kwargs",
"=",
"domain_kwargs",
",",
"metric_value_kwargs",
"=",
"{",
"\"bins\"",
":",
"tuple",
"(",
"bins",
")",
",",
"}",
",",
")",
"nonnull_configuration",
"=",
"MetricConfiguration",
"(",
"\"column_values.nonnull.count\"",
",",
"metric_domain_kwargs",
"=",
"domain_kwargs",
",",
"metric_value_kwargs",
"=",
"{",
"\"bins\"",
":",
"tuple",
"(",
"bins",
")",
",",
"}",
",",
")",
"metrics",
"=",
"execution_engine",
".",
"resolve_metrics",
"(",
"(",
"hist_metric_configuration",
",",
"nonnull_configuration",
")",
")",
"weights",
"=",
"list",
"(",
"np",
".",
"array",
"(",
"metrics",
"[",
"hist_metric_configuration",
".",
"id",
"]",
")",
"/",
"metrics",
"[",
"nonnull_configuration",
".",
"id",
"]",
")",
"tail_weights",
"=",
"(",
"1",
"-",
"sum",
"(",
"weights",
")",
")",
"/",
"2",
"partition_object",
"=",
"{",
"\"bins\"",
":",
"bins",
",",
"\"weights\"",
":",
"weights",
",",
"\"tail_weights\"",
":",
"[",
"tail_weights",
",",
"tail_weights",
"]",
",",
"}",
"return",
"partition_object"
] | [
80,
0
] | [
150,
27
] | python | en | ['en', 'en', 'en'] | True |
build_categorical_partition_object | (execution_engine, domain_kwargs, sort="value") | Convenience method for building a partition object on categorical data from a dataset and column
Args:
execution_engine (ExecutionEngine): the execution engine with which to compute the partition
domain_kwargs (dict): The domain kwargs describing the domain for which to compute the partition
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
Returns:
A new partition_object::
{
"values": (list) the categorical values for which each weight applies,
"weights": (list) The densities of the values implied by the partition.
}
See :ref:`partition_object`.
| Convenience method for building a partition object on categorical data from a dataset and column | def build_categorical_partition_object(execution_engine, domain_kwargs, sort="value"):
"""Convenience method for building a partition object on categorical data from a dataset and column
Args:
execution_engine (ExecutionEngine): the execution engine with which to compute the partition
domain_kwargs (dict): The domain kwargs describing the domain for which to compute the partition
sort (string): must be one of "value", "count", or "none".
- if "value" then values in the resulting partition object will be sorted lexigraphically
- if "count" then values will be sorted according to descending count (frequency)
- if "none" then values will not be sorted
Returns:
A new partition_object::
{
"values": (list) the categorical values for which each weight applies,
"weights": (list) The densities of the values implied by the partition.
}
See :ref:`partition_object`.
"""
counts_configuration = MetricConfiguration(
"column.partition",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"sort": sort,
},
)
nonnull_configuration = MetricConfiguration(
"column_values.nonnull.count",
metric_domain_kwargs=domain_kwargs,
)
metrics = execution_engine.resolve_metrics(
(counts_configuration, nonnull_configuration)
)
return {
"values": list(metrics[counts_configuration.id].index),
"weights": list(
np.array(metrics[counts_configuration.id])
/ metrics[nonnull_configuration.id]
),
} | [
"def",
"build_categorical_partition_object",
"(",
"execution_engine",
",",
"domain_kwargs",
",",
"sort",
"=",
"\"value\"",
")",
":",
"counts_configuration",
"=",
"MetricConfiguration",
"(",
"\"column.partition\"",
",",
"metric_domain_kwargs",
"=",
"domain_kwargs",
",",
"metric_value_kwargs",
"=",
"{",
"\"sort\"",
":",
"sort",
",",
"}",
",",
")",
"nonnull_configuration",
"=",
"MetricConfiguration",
"(",
"\"column_values.nonnull.count\"",
",",
"metric_domain_kwargs",
"=",
"domain_kwargs",
",",
")",
"metrics",
"=",
"execution_engine",
".",
"resolve_metrics",
"(",
"(",
"counts_configuration",
",",
"nonnull_configuration",
")",
")",
"return",
"{",
"\"values\"",
":",
"list",
"(",
"metrics",
"[",
"counts_configuration",
".",
"id",
"]",
".",
"index",
")",
",",
"\"weights\"",
":",
"list",
"(",
"np",
".",
"array",
"(",
"metrics",
"[",
"counts_configuration",
".",
"id",
"]",
")",
"/",
"metrics",
"[",
"nonnull_configuration",
".",
"id",
"]",
")",
",",
"}"
] | [
153,
0
] | [
194,
5
] | python | en | ['en', 'en', 'en'] | True |
infer_distribution_parameters | (data, distribution, params=None) | Convenience method for determining the shape parameters of a given distribution
Args:
data (list-like): The data to build shape parameters from.
distribution (string): Scipy distribution, determines which parameters to build.
params (dict or None): The known parameters. Parameters given here will not be altered. \
Keep as None to infer all necessary parameters from the data data.
Returns:
A dictionary of named parameters::
{
"mean": (float),
"std_dev": (float),
"loc": (float),
"scale": (float),
"alpha": (float),
"beta": (float),
"min": (float),
"max": (float),
"df": (float)
}
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest
| Convenience method for determining the shape parameters of a given distribution | def infer_distribution_parameters(data, distribution, params=None):
"""Convenience method for determining the shape parameters of a given distribution
Args:
data (list-like): The data to build shape parameters from.
distribution (string): Scipy distribution, determines which parameters to build.
params (dict or None): The known parameters. Parameters given here will not be altered. \
Keep as None to infer all necessary parameters from the data data.
Returns:
A dictionary of named parameters::
{
"mean": (float),
"std_dev": (float),
"loc": (float),
"scale": (float),
"alpha": (float),
"beta": (float),
"min": (float),
"max": (float),
"df": (float)
}
See: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html#scipy.stats.kstest
"""
if params is None:
params = dict()
elif not isinstance(params, dict):
raise TypeError(
"params must be a dictionary object, see great_expectations documentation"
)
if "mean" not in params.keys():
params["mean"] = data.mean()
if "std_dev" not in params.keys():
params["std_dev"] = data.std()
if distribution == "beta":
# scipy cdf(x, a, b, loc=0, scale=1)
if "alpha" not in params.keys():
# from https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance
params["alpha"] = (params["mean"] ** 2) * (
((1 - params["mean"]) / params["std_dev"] ** 2) - (1 / params["mean"])
)
if "beta" not in params.keys():
params["beta"] = params["alpha"] * ((1 / params["mean"]) - 1)
elif distribution == "gamma":
# scipy cdf(x, a, loc=0, scale=1)
if "alpha" not in params.keys():
# Using https://en.wikipedia.org/wiki/Gamma_distribution
params["alpha"] = params["mean"] / params.get("scale", 1)
# elif distribution == 'poisson':
# if 'lambda' not in params.keys():
# params['lambda'] = params['mean']
elif distribution == "uniform":
# scipy cdf(x, loc=0, scale=1)
if "min" not in params.keys():
if "loc" in params.keys():
params["min"] = params["loc"]
else:
params["min"] = min(data)
if "max" not in params.keys():
if "scale" in params.keys():
params["max"] = params["scale"]
else:
params["max"] = max(data) - params["min"]
elif distribution == "chi2":
# scipy cdf(x, df, loc=0, scale=1)
if "df" not in params.keys():
# from https://en.wikipedia.org/wiki/Chi-squared_distribution
params["df"] = params["mean"]
# Expon only uses loc and scale, use default
# elif distribution == 'expon':
# scipy cdf(x, loc=0, scale=1)
# if 'lambda' in params.keys():
# Lambda is optional
# params['scale'] = 1 / params['lambda']
elif distribution != "norm":
raise AttributeError(
"Unsupported distribution type. Please refer to Great Expectations Documentation"
)
params["loc"] = params.get("loc", 0)
params["scale"] = params.get("scale", 1)
return params | [
"def",
"infer_distribution_parameters",
"(",
"data",
",",
"distribution",
",",
"params",
"=",
"None",
")",
":",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"dict",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"\"params must be a dictionary object, see great_expectations documentation\"",
")",
"if",
"\"mean\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"mean\"",
"]",
"=",
"data",
".",
"mean",
"(",
")",
"if",
"\"std_dev\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"std_dev\"",
"]",
"=",
"data",
".",
"std",
"(",
")",
"if",
"distribution",
"==",
"\"beta\"",
":",
"# scipy cdf(x, a, b, loc=0, scale=1)",
"if",
"\"alpha\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# from https://stats.stackexchange.com/questions/12232/calculating-the-parameters-of-a-beta-distribution-using-the-mean-and-variance",
"params",
"[",
"\"alpha\"",
"]",
"=",
"(",
"params",
"[",
"\"mean\"",
"]",
"**",
"2",
")",
"*",
"(",
"(",
"(",
"1",
"-",
"params",
"[",
"\"mean\"",
"]",
")",
"/",
"params",
"[",
"\"std_dev\"",
"]",
"**",
"2",
")",
"-",
"(",
"1",
"/",
"params",
"[",
"\"mean\"",
"]",
")",
")",
"if",
"\"beta\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"beta\"",
"]",
"=",
"params",
"[",
"\"alpha\"",
"]",
"*",
"(",
"(",
"1",
"/",
"params",
"[",
"\"mean\"",
"]",
")",
"-",
"1",
")",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"# scipy cdf(x, a, loc=0, scale=1)",
"if",
"\"alpha\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# Using https://en.wikipedia.org/wiki/Gamma_distribution",
"params",
"[",
"\"alpha\"",
"]",
"=",
"params",
"[",
"\"mean\"",
"]",
"/",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"# elif distribution == 'poisson':",
"# if 'lambda' not in params.keys():",
"# params['lambda'] = params['mean']",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"# scipy cdf(x, loc=0, scale=1)",
"if",
"\"min\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"if",
"\"loc\"",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"min\"",
"]",
"=",
"params",
"[",
"\"loc\"",
"]",
"else",
":",
"params",
"[",
"\"min\"",
"]",
"=",
"min",
"(",
"data",
")",
"if",
"\"max\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"if",
"\"scale\"",
"in",
"params",
".",
"keys",
"(",
")",
":",
"params",
"[",
"\"max\"",
"]",
"=",
"params",
"[",
"\"scale\"",
"]",
"else",
":",
"params",
"[",
"\"max\"",
"]",
"=",
"max",
"(",
"data",
")",
"-",
"params",
"[",
"\"min\"",
"]",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"# scipy cdf(x, df, loc=0, scale=1)",
"if",
"\"df\"",
"not",
"in",
"params",
".",
"keys",
"(",
")",
":",
"# from https://en.wikipedia.org/wiki/Chi-squared_distribution",
"params",
"[",
"\"df\"",
"]",
"=",
"params",
"[",
"\"mean\"",
"]",
"# Expon only uses loc and scale, use default",
"# elif distribution == 'expon':",
"# scipy cdf(x, loc=0, scale=1)",
"# if 'lambda' in params.keys():",
"# Lambda is optional",
"# params['scale'] = 1 / params['lambda']",
"elif",
"distribution",
"!=",
"\"norm\"",
":",
"raise",
"AttributeError",
"(",
"\"Unsupported distribution type. Please refer to Great Expectations Documentation\"",
")",
"params",
"[",
"\"loc\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"loc\"",
",",
"0",
")",
"params",
"[",
"\"scale\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"return",
"params"
] | [
197,
0
] | [
290,
17
] | python | en | ['en', 'en', 'en'] | True |
_scipy_distribution_positional_args_from_dict | (distribution, params) | Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
| Helper function that returns positional arguments for a scipy distribution using a dict of parameters. | def _scipy_distribution_positional_args_from_dict(distribution, params):
"""Helper function that returns positional arguments for a scipy distribution using a dict of parameters.
See the `cdf()` function here https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html#Methods\
to see an example of scipy's positional arguments. This function returns the arguments specified by the \
scipy.stat.distribution.cdf() for that distribution.
Args:
distribution (string): \
The scipy distribution name.
params (dict): \
A dict of named parameters.
Raises:
AttributeError: \
If an unsupported distribution is provided.
"""
params["loc"] = params.get("loc", 0)
if "scale" not in params:
params["scale"] = 1
if distribution == "norm":
return params["mean"], params["std_dev"]
elif distribution == "beta":
return params["alpha"], params["beta"], params["loc"], params["scale"]
elif distribution == "gamma":
return params["alpha"], params["loc"], params["scale"]
# elif distribution == 'poisson':
# return params['lambda'], params['loc']
elif distribution == "uniform":
return params["min"], params["max"]
elif distribution == "chi2":
return params["df"], params["loc"], params["scale"]
elif distribution == "expon":
return params["loc"], params["scale"] | [
"def",
"_scipy_distribution_positional_args_from_dict",
"(",
"distribution",
",",
"params",
")",
":",
"params",
"[",
"\"loc\"",
"]",
"=",
"params",
".",
"get",
"(",
"\"loc\"",
",",
"0",
")",
"if",
"\"scale\"",
"not",
"in",
"params",
":",
"params",
"[",
"\"scale\"",
"]",
"=",
"1",
"if",
"distribution",
"==",
"\"norm\"",
":",
"return",
"params",
"[",
"\"mean\"",
"]",
",",
"params",
"[",
"\"std_dev\"",
"]",
"elif",
"distribution",
"==",
"\"beta\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"beta\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"return",
"params",
"[",
"\"alpha\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"# elif distribution == 'poisson':",
"# return params['lambda'], params['loc']",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"return",
"params",
"[",
"\"min\"",
"]",
",",
"params",
"[",
"\"max\"",
"]",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"return",
"params",
"[",
"\"df\"",
"]",
",",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"return",
"params",
"[",
"\"loc\"",
"]",
",",
"params",
"[",
"\"scale\"",
"]"
] | [
293,
0
] | [
328,
45
] | python | en | ['en', 'en', 'en'] | True |
validate_distribution_parameters | (distribution, params) | Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
| Ensures that necessary parameters for a distribution are present and that all parameters are sensical. | def validate_distribution_parameters(distribution, params):
"""Ensures that necessary parameters for a distribution are present and that all parameters are sensical.
If parameters necessary to construct a distribution are missing or invalid, this function raises ValueError\
with an informative description. Note that 'loc' and 'scale' are optional arguments, and that 'scale'\
must be positive.
Args:
distribution (string): \
The scipy distribution name, e.g. normal distribution is 'norm'.
params (dict or list): \
The distribution shape parameters in a named dictionary or positional list form following the scipy \
cdf argument scheme.
params={'mean': 40, 'std_dev': 5} or params=[40, 5]
Exceptions:
ValueError: \
With an informative description, usually when necessary parameters are omitted or are invalid.
"""
norm_msg = (
"norm distributions require 0 parameters and optionally 'mean', 'std_dev'."
)
beta_msg = "beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'."
gamma_msg = "gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'."
# poisson_msg = "poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'."
uniform_msg = (
"uniform distributions require 0 parameters and optionally 'loc', 'scale'."
)
chi2_msg = "chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'."
expon_msg = (
"expon distributions require 0 parameters and optionally 'loc', 'scale'."
)
if distribution not in [
"norm",
"beta",
"gamma",
"poisson",
"uniform",
"chi2",
"expon",
]:
raise AttributeError("Unsupported distribution provided: %s" % distribution)
if isinstance(params, dict):
# `params` is a dictionary
if params.get("std_dev", 1) <= 0 or params.get("scale", 1) <= 0:
raise ValueError("std_dev and scale must be positive.")
# alpha and beta are required and positive
if distribution == "beta" and (
params.get("alpha", -1) <= 0 or params.get("beta", -1) <= 0
):
raise ValueError("Invalid parameters: %s" % beta_msg)
# alpha is required and positive
elif distribution == "gamma" and params.get("alpha", -1) <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# lambda is a required and positive
# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
# df is necessary and required to be positive
elif distribution == "chi2" and params.get("df", -1) <= 0:
raise ValueError("Invalid parameters: %s:" % chi2_msg)
elif isinstance(params, tuple) or isinstance(params, list):
scale = None
# `params` is a tuple or a list
if distribution == "beta":
if len(params) < 2:
raise ValueError("Missing required parameters: %s" % beta_msg)
if params[0] <= 0 or params[1] <= 0:
raise ValueError("Invalid parameters: %s" % beta_msg)
if len(params) == 4:
scale = params[3]
elif len(params) > 4:
raise ValueError("Too many parameters provided: %s" % beta_msg)
elif distribution == "norm":
if len(params) > 2:
raise ValueError("Too many parameters provided: %s" % norm_msg)
if len(params) == 2:
scale = params[1]
elif distribution == "gamma":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % gamma_msg)
if len(params) == 3:
scale = params[2]
if len(params) > 3:
raise ValueError("Too many parameters provided: %s" % gamma_msg)
elif params[0] <= 0:
raise ValueError("Invalid parameters: %s" % gamma_msg)
# elif distribution == 'poisson':
# if len(params) < 1:
# raise ValueError("Missing required parameters: %s" %poisson_msg)
# if len(params) > 2:
# raise ValueError("Too many parameters provided: %s" %poisson_msg)
# elif params[0] <= 0:
# raise ValueError("Invalid parameters: %s" %poisson_msg)
elif distribution == "uniform":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % uniform_msg)
elif distribution == "chi2":
if len(params) < 1:
raise ValueError("Missing required parameters: %s" % chi2_msg)
elif len(params) == 3:
scale = params[2]
elif len(params) > 3:
raise ValueError("Too many arguments provided: %s" % chi2_msg)
if params[0] <= 0:
raise ValueError("Invalid parameters: %s" % chi2_msg)
elif distribution == "expon":
if len(params) == 2:
scale = params[1]
if len(params) > 2:
raise ValueError("Too many arguments provided: %s" % expon_msg)
if scale is not None and scale <= 0:
raise ValueError("std_dev and scale must be positive.")
else:
raise ValueError(
"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)"
)
return | [
"def",
"validate_distribution_parameters",
"(",
"distribution",
",",
"params",
")",
":",
"norm_msg",
"=",
"(",
"\"norm distributions require 0 parameters and optionally 'mean', 'std_dev'.\"",
")",
"beta_msg",
"=",
"\"beta distributions require 2 positive parameters 'alpha', 'beta' and optionally 'loc', 'scale'.\"",
"gamma_msg",
"=",
"\"gamma distributions require 1 positive parameter 'alpha' and optionally 'loc','scale'.\"",
"# poisson_msg = \"poisson distributions require 1 positive parameter 'lambda' and optionally 'loc'.\"",
"uniform_msg",
"=",
"(",
"\"uniform distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"chi2_msg",
"=",
"\"chi2 distributions require 1 positive parameter 'df' and optionally 'loc', 'scale'.\"",
"expon_msg",
"=",
"(",
"\"expon distributions require 0 parameters and optionally 'loc', 'scale'.\"",
")",
"if",
"distribution",
"not",
"in",
"[",
"\"norm\"",
",",
"\"beta\"",
",",
"\"gamma\"",
",",
"\"poisson\"",
",",
"\"uniform\"",
",",
"\"chi2\"",
",",
"\"expon\"",
",",
"]",
":",
"raise",
"AttributeError",
"(",
"\"Unsupported distribution provided: %s\"",
"%",
"distribution",
")",
"if",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"# `params` is a dictionary",
"if",
"params",
".",
"get",
"(",
"\"std_dev\"",
",",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"scale\"",
",",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"# alpha and beta are required and positive",
"if",
"distribution",
"==",
"\"beta\"",
"and",
"(",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
"or",
"params",
".",
"get",
"(",
"\"beta\"",
",",
"-",
"1",
")",
"<=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"# alpha is required and positive",
"elif",
"distribution",
"==",
"\"gamma\"",
"and",
"params",
".",
"get",
"(",
"\"alpha\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# lambda is a required and positive",
"# elif distribution == 'poisson' and params.get('lambda', -1) <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"# df is necessary and required to be positive",
"elif",
"distribution",
"==",
"\"chi2\"",
"and",
"params",
".",
"get",
"(",
"\"df\"",
",",
"-",
"1",
")",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s:\"",
"%",
"chi2_msg",
")",
"elif",
"isinstance",
"(",
"params",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"params",
",",
"list",
")",
":",
"scale",
"=",
"None",
"# `params` is a tuple or a list",
"if",
"distribution",
"==",
"\"beta\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
"or",
"params",
"[",
"1",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"beta_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"4",
":",
"scale",
"=",
"params",
"[",
"3",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"4",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"beta_msg",
")",
"elif",
"distribution",
"==",
"\"norm\"",
":",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"norm_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"elif",
"distribution",
"==",
"\"gamma\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"gamma_msg",
")",
"if",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"if",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many parameters provided: %s\"",
"%",
"gamma_msg",
")",
"elif",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"gamma_msg",
")",
"# elif distribution == 'poisson':",
"# if len(params) < 1:",
"# raise ValueError(\"Missing required parameters: %s\" %poisson_msg)",
"# if len(params) > 2:",
"# raise ValueError(\"Too many parameters provided: %s\" %poisson_msg)",
"# elif params[0] <= 0:",
"# raise ValueError(\"Invalid parameters: %s\" %poisson_msg)",
"elif",
"distribution",
"==",
"\"uniform\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"uniform_msg",
")",
"elif",
"distribution",
"==",
"\"chi2\"",
":",
"if",
"len",
"(",
"params",
")",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"Missing required parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"len",
"(",
"params",
")",
"==",
"3",
":",
"scale",
"=",
"params",
"[",
"2",
"]",
"elif",
"len",
"(",
"params",
")",
">",
"3",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"chi2_msg",
")",
"if",
"params",
"[",
"0",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"Invalid parameters: %s\"",
"%",
"chi2_msg",
")",
"elif",
"distribution",
"==",
"\"expon\"",
":",
"if",
"len",
"(",
"params",
")",
"==",
"2",
":",
"scale",
"=",
"params",
"[",
"1",
"]",
"if",
"len",
"(",
"params",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"\"Too many arguments provided: %s\"",
"%",
"expon_msg",
")",
"if",
"scale",
"is",
"not",
"None",
"and",
"scale",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"std_dev and scale must be positive.\"",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"params must be a dict or list, or use ge.dataset.util.infer_distribution_parameters(data, distribution)\"",
")",
"return"
] | [
331,
0
] | [
470,
10
] | python | en | ['en', 'en', 'en'] | True |
create_multiple_expectations | (df, columns, expectation_type, *args, **kwargs) | Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results.
| Creates an identical expectation for each of the given columns with the specified arguments, if any. | def create_multiple_expectations(df, columns, expectation_type, *args, **kwargs):
"""Creates an identical expectation for each of the given columns with the specified arguments, if any.
Args:
df (great_expectations.dataset): A great expectations dataset object.
columns (list): A list of column names represented as strings.
expectation_type (string): The expectation type.
Raises:
KeyError if the provided column does not exist.
AttributeError if the provided expectation type does not exist or df is not a valid great expectations dataset.
Returns:
A list of expectation results.
"""
expectation = getattr(df, expectation_type)
results = list()
for column in columns:
results.append(expectation(column, *args, **kwargs))
return results | [
"def",
"create_multiple_expectations",
"(",
"df",
",",
"columns",
",",
"expectation_type",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"expectation",
"=",
"getattr",
"(",
"df",
",",
"expectation_type",
")",
"results",
"=",
"list",
"(",
")",
"for",
"column",
"in",
"columns",
":",
"results",
".",
"append",
"(",
"expectation",
"(",
"column",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"results"
] | [
473,
0
] | [
496,
18
] | python | en | ['en', 'en', 'en'] | True |
escape_html | (text, table=_escape_html_table) | Escape &, <, > as well as single and double quotes for HTML. | Escape &, <, > as well as single and double quotes for HTML. | def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table) | [
"def",
"escape_html",
"(",
"text",
",",
"table",
"=",
"_escape_html_table",
")",
":",
"return",
"text",
".",
"translate",
"(",
"table",
")"
] | [
39,
0
] | [
41,
32
] | python | en | ['en', 'en', 'en'] | True |
HtmlFormatter._get_css_class | (self, ttype) | Return the css class of this token type prefixed with
the classprefix option. | Return the css class of this token type prefixed with
the classprefix option. | def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return '' | [
"def",
"_get_css_class",
"(",
"self",
",",
"ttype",
")",
":",
"ttypeclass",
"=",
"_get_ttype_class",
"(",
"ttype",
")",
"if",
"ttypeclass",
":",
"return",
"self",
".",
"classprefix",
"+",
"ttypeclass",
"return",
"''"
] | [
429,
4
] | [
435,
17
] | python | en | ['en', 'en', 'en'] | True |
HtmlFormatter._get_css_classes | (self, ttype) | Return the css classes of this token type prefixed with
the classprefix option. | Return the css classes of this token type prefixed with
the classprefix option. | def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls | [
"def",
"_get_css_classes",
"(",
"self",
",",
"ttype",
")",
":",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"while",
"ttype",
"not",
"in",
"STANDARD_TYPES",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cls",
"=",
"self",
".",
"_get_css_class",
"(",
"ttype",
")",
"+",
"' '",
"+",
"cls",
"return",
"cls"
] | [
437,
4
] | [
444,
18
] | python | en | ['en', 'en', 'en'] | True |
HtmlFormatter.get_style_defs | (self, arg=None) |
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
|
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
| def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines) | [
"def",
"get_style_defs",
"(",
"self",
",",
"arg",
"=",
"None",
")",
":",
"if",
"arg",
"is",
"None",
":",
"arg",
"=",
"(",
"'cssclass'",
"in",
"self",
".",
"options",
"and",
"'.'",
"+",
"self",
".",
"cssclass",
"or",
"''",
")",
"if",
"isinstance",
"(",
"arg",
",",
"string_types",
")",
":",
"args",
"=",
"[",
"arg",
"]",
"else",
":",
"args",
"=",
"list",
"(",
"arg",
")",
"def",
"prefix",
"(",
"cls",
")",
":",
"if",
"cls",
":",
"cls",
"=",
"'.'",
"+",
"cls",
"tmp",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"tmp",
".",
"append",
"(",
"(",
"arg",
"and",
"arg",
"+",
"' '",
"or",
"''",
")",
"+",
"cls",
")",
"return",
"', '",
".",
"join",
"(",
"tmp",
")",
"styles",
"=",
"[",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"for",
"cls",
",",
"(",
"style",
",",
"ttype",
",",
"level",
")",
"in",
"iteritems",
"(",
"self",
".",
"class2style",
")",
"if",
"cls",
"and",
"style",
"]",
"styles",
".",
"sort",
"(",
")",
"lines",
"=",
"[",
"'%s { %s } /* %s */'",
"%",
"(",
"prefix",
"(",
"cls",
")",
",",
"style",
",",
"repr",
"(",
"ttype",
")",
"[",
"6",
":",
"]",
")",
"for",
"(",
"level",
",",
"ttype",
",",
"cls",
",",
"style",
")",
"in",
"styles",
"]",
"if",
"arg",
"and",
"not",
"self",
".",
"nobackground",
"and",
"self",
".",
"style",
".",
"background_color",
"is",
"not",
"None",
":",
"text_style",
"=",
"''",
"if",
"Text",
"in",
"self",
".",
"ttype2class",
":",
"text_style",
"=",
"' '",
"+",
"self",
".",
"class2style",
"[",
"self",
".",
"ttype2class",
"[",
"Text",
"]",
"]",
"[",
"0",
"]",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s { background: %s;%s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"background_color",
",",
"text_style",
")",
")",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"lines",
".",
"insert",
"(",
"0",
",",
"'%s.hll { background-color: %s }'",
"%",
"(",
"prefix",
"(",
"''",
")",
",",
"self",
".",
"style",
".",
"highlight_color",
")",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | [
470,
4
] | [
507,
31
] | python | en | ['en', 'error', 'th'] | False |
HtmlFormatter._format_lines | (self, tokensource) |
Just format the tokens, without any wrapping tags.
Yield individual lines.
|
Just format the tokens, without any wrapping tags.
Yield individual lines.
| def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line) | [
"def",
"_format_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"nocls",
"=",
"self",
".",
"noclasses",
"lsep",
"=",
"self",
".",
"lineseparator",
"# for <span style=\"\"> lookup only",
"getcls",
"=",
"self",
".",
"ttype2class",
".",
"get",
"c2s",
"=",
"self",
".",
"class2style",
"escape_table",
"=",
"_escape_html_table",
"tagsfile",
"=",
"self",
".",
"tagsfile",
"lspan",
"=",
"''",
"line",
"=",
"[",
"]",
"for",
"ttype",
",",
"value",
"in",
"tokensource",
":",
"if",
"nocls",
":",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"while",
"cclass",
"is",
"None",
":",
"ttype",
"=",
"ttype",
".",
"parent",
"cclass",
"=",
"getcls",
"(",
"ttype",
")",
"cspan",
"=",
"cclass",
"and",
"'<span style=\"%s\">'",
"%",
"c2s",
"[",
"cclass",
"]",
"[",
"0",
"]",
"or",
"''",
"else",
":",
"cls",
"=",
"self",
".",
"_get_css_classes",
"(",
"ttype",
")",
"cspan",
"=",
"cls",
"and",
"'<span class=\"%s\">'",
"%",
"cls",
"or",
"''",
"parts",
"=",
"value",
".",
"translate",
"(",
"escape_table",
")",
".",
"split",
"(",
"'\\n'",
")",
"if",
"tagsfile",
"and",
"ttype",
"in",
"Token",
".",
"Name",
":",
"filename",
",",
"linenumber",
"=",
"self",
".",
"_lookup_ctag",
"(",
"value",
")",
"if",
"linenumber",
":",
"base",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"if",
"base",
":",
"base",
"+=",
"'/'",
"filename",
",",
"extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"url",
"=",
"self",
".",
"tagurlformat",
"%",
"{",
"'path'",
":",
"base",
",",
"'fname'",
":",
"filename",
",",
"'fext'",
":",
"extension",
"}",
"parts",
"[",
"0",
"]",
"=",
"\"<a href=\\\"%s#%s-%d\\\">%s\"",
"%",
"(",
"url",
",",
"self",
".",
"lineanchors",
",",
"linenumber",
",",
"parts",
"[",
"0",
"]",
")",
"parts",
"[",
"-",
"1",
"]",
"=",
"parts",
"[",
"-",
"1",
"]",
"+",
"\"</a>\"",
"# for all but the last line",
"for",
"part",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"if",
"line",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"# both are the same",
"line",
".",
"extend",
"(",
"(",
"part",
",",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")",
"line",
"=",
"[",
"]",
"elif",
"part",
":",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"(",
"cspan",
",",
"part",
",",
"(",
"cspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"else",
":",
"yield",
"1",
",",
"lsep",
"# for the last line",
"if",
"line",
"and",
"parts",
"[",
"-",
"1",
"]",
":",
"if",
"lspan",
"!=",
"cspan",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
")",
")",
"lspan",
"=",
"cspan",
"else",
":",
"line",
".",
"append",
"(",
"parts",
"[",
"-",
"1",
"]",
")",
"elif",
"parts",
"[",
"-",
"1",
"]",
":",
"line",
"=",
"[",
"cspan",
",",
"parts",
"[",
"-",
"1",
"]",
"]",
"lspan",
"=",
"cspan",
"# else we neither have to open a new span nor set lspan",
"if",
"line",
":",
"line",
".",
"extend",
"(",
"(",
"(",
"lspan",
"and",
"'</span>'",
")",
",",
"lsep",
")",
")",
"yield",
"1",
",",
"''",
".",
"join",
"(",
"line",
")"
] | [
711,
4
] | [
780,
34
] | python | en | ['en', 'error', 'th'] | False |
HtmlFormatter._highlight_lines | (self, tokensource) |
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
|
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
| def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value | [
"def",
"_highlight_lines",
"(",
"self",
",",
"tokensource",
")",
":",
"hls",
"=",
"self",
".",
"hl_lines",
"for",
"i",
",",
"(",
"t",
",",
"value",
")",
"in",
"enumerate",
"(",
"tokensource",
")",
":",
"if",
"t",
"!=",
"1",
":",
"yield",
"t",
",",
"value",
"if",
"i",
"+",
"1",
"in",
"hls",
":",
"# i + 1 because Python indexes start at 0",
"if",
"self",
".",
"noclasses",
":",
"style",
"=",
"''",
"if",
"self",
".",
"style",
".",
"highlight_color",
"is",
"not",
"None",
":",
"style",
"=",
"(",
"' style=\"background-color: %s\"'",
"%",
"(",
"self",
".",
"style",
".",
"highlight_color",
",",
")",
")",
"yield",
"1",
",",
"'<span%s>%s</span>'",
"%",
"(",
"style",
",",
"value",
")",
"else",
":",
"yield",
"1",
",",
"'<span class=\"hll\">%s</span>'",
"%",
"value",
"else",
":",
"yield",
"1",
",",
"value"
] | [
789,
4
] | [
809,
30
] | python | en | ['en', 'error', 'th'] | False |
HtmlFormatter.wrap | (self, source, outfile) |
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
|
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
| def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source)) | [
"def",
"wrap",
"(",
"self",
",",
"source",
",",
"outfile",
")",
":",
"return",
"self",
".",
"_wrap_div",
"(",
"self",
".",
"_wrap_pre",
"(",
"source",
")",
")"
] | [
811,
4
] | [
817,
53
] | python | en | ['en', 'error', 'th'] | False |
HtmlFormatter.format_unencoded | (self, tokensource, outfile) |
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
|
The formatting process uses several nested generators; which of
them are used is determined by the user's options. | def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece) | [
"def",
"format_unencoded",
"(",
"self",
",",
"tokensource",
",",
"outfile",
")",
":",
"source",
"=",
"self",
".",
"_format_lines",
"(",
"tokensource",
")",
"if",
"self",
".",
"hl_lines",
":",
"source",
"=",
"self",
".",
"_highlight_lines",
"(",
"source",
")",
"if",
"not",
"self",
".",
"nowrap",
":",
"if",
"self",
".",
"linenos",
"==",
"2",
":",
"source",
"=",
"self",
".",
"_wrap_inlinelinenos",
"(",
"source",
")",
"if",
"self",
".",
"lineanchors",
":",
"source",
"=",
"self",
".",
"_wrap_lineanchors",
"(",
"source",
")",
"if",
"self",
".",
"linespans",
":",
"source",
"=",
"self",
".",
"_wrap_linespans",
"(",
"source",
")",
"source",
"=",
"self",
".",
"wrap",
"(",
"source",
",",
"outfile",
")",
"if",
"self",
".",
"linenos",
"==",
"1",
":",
"source",
"=",
"self",
".",
"_wrap_tablelinenos",
"(",
"source",
")",
"if",
"self",
".",
"full",
":",
"source",
"=",
"self",
".",
"_wrap_full",
"(",
"source",
",",
"outfile",
")",
"for",
"t",
",",
"piece",
"in",
"source",
":",
"outfile",
".",
"write",
"(",
"piece",
")"
] | [
819,
4
] | [
850,
32
] | python | en | ['en', 'error', 'th'] | False |
alice_columnar_table_single_batch | () |
About the "Alice" User Workflow Fixture
Alice has a single table of columnar data called user_events (DataAsset) that she wants to check periodically as new data is added.
- She knows what some of the columns mean, but not all - and there are MANY of them (only a subset currently shown in examples and fixtures).
- She has organized other tables similarly so that for example column name suffixes indicate which are for user ids (_id) and which timestamps are for versioning (_ts).
She wants to use a configurable profiler to generate a description (ExpectationSuite) about the table so that she can:
1. use it to validate the user_events table periodically and set up alerts for when things change
2. have a place to add her domain knowledge of the data (that can also be validated against new data)
3. if all goes well, generalize some of the Profiler to use on her other tables
Alice configures her Profiler using the yaml configurations and data file locations captured in this fixture.
|
About the "Alice" User Workflow Fixture | def alice_columnar_table_single_batch():
"""
About the "Alice" User Workflow Fixture
Alice has a single table of columnar data called user_events (DataAsset) that she wants to check periodically as new data is added.
- She knows what some of the columns mean, but not all - and there are MANY of them (only a subset currently shown in examples and fixtures).
- She has organized other tables similarly so that for example column name suffixes indicate which are for user ids (_id) and which timestamps are for versioning (_ts).
She wants to use a configurable profiler to generate a description (ExpectationSuite) about the table so that she can:
1. use it to validate the user_events table periodically and set up alerts for when things change
2. have a place to add her domain knowledge of the data (that can also be validated against new data)
3. if all goes well, generalize some of the Profiler to use on her other tables
Alice configures her Profiler using the yaml configurations and data file locations captured in this fixture.
"""
verbose_profiler_config_file_path: str = file_relative_path(
__file__, "alice_user_workflow_verbose_profiler_config.yml"
)
verbose_profiler_config: str
with open(verbose_profiler_config_file_path) as f:
verbose_profiler_config = f.read()
my_rule_for_user_ids_expectation_configurations: List[ExpectationConfiguration] = [
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_of_type",
"kwargs": {
"column": "user_id",
"type_": "INTEGER",
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_between",
"kwargs": {
"min_value": 397433, # From the data
"max_value": 999999999999,
"column": "user_id",
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_not_be_null",
"kwargs": {
"column": "user_id",
},
"meta": {},
}
),
]
event_ts_column_data: Dict[str, str] = {
"column_name": "event_ts",
"observed_max_time_str": "2004-10-19 11:05:20",
}
my_rule_for_timestamps_column_data: List[Dict[str, str]] = [
event_ts_column_data,
{
"column_name": "server_ts",
"observed_max_time_str": "2004-10-19 11:05:20",
},
{
"column_name": "device_ts",
"observed_max_time_str": "2004-10-19 11:05:22",
},
]
my_rule_for_timestamps_expectation_configurations: List[
ExpectationConfiguration
] = []
column_data: Dict[str, str]
for column_data in my_rule_for_timestamps_column_data:
my_rule_for_timestamps_expectation_configurations.extend(
[
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_of_type",
"kwargs": {
"column": column_data["column_name"],
"type_": "TIMESTAMP",
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_increasing",
"kwargs": {
"column": column_data["column_name"],
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_values_to_be_dateutil_parseable",
"kwargs": {
"column": column_data["column_name"],
},
"meta": {},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_min_to_be_between",
"kwargs": {
"column": column_data["column_name"],
"min_value": "2004-10-19T10:23:54", # From variables
"max_value": "2004-10-19T10:23:54", # From variables
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms no events occur before tracking started **2004-10-19 10:23:54**"
],
}
},
}
),
ExpectationConfiguration(
**{
"expectation_type": "expect_column_max_to_be_between",
"kwargs": {
"column": column_data["column_name"],
"min_value": "2004-10-19T10:23:54", # From variables
"max_value": event_ts_column_data[
"observed_max_time_str"
], # Pin to event_ts column
},
"meta": {
"notes": {
"format": "markdown",
"content": [
"### This expectation confirms that the event_ts contains the latest timestamp of all domains"
],
}
},
}
),
]
)
expectation_configurations: List[ExpectationConfiguration] = []
expectation_configurations.extend(my_rule_for_user_ids_expectation_configurations)
expectation_configurations.extend(my_rule_for_timestamps_expectation_configurations)
expectation_suite_name: str = "alice_columnar_table_single_batch"
expected_expectation_suite: ExpectationSuite = ExpectationSuite(
expectation_suite_name=expectation_suite_name
)
expectation_configuration: ExpectationConfiguration
for expectation_configuration in expectation_configurations:
expected_expectation_suite.add_expectation(expectation_configuration)
# NOTE that this expectation suite should fail when validated on the data in "sample_data_relative_path"
# because the device_ts is ahead of the event_ts for the latest event
sample_data_relative_path: str = "alice_columnar_table_single_batch_data.csv"
yaml = YAML()
profiler_config: dict = yaml.load(verbose_profiler_config)
expected_expectation_suite.add_citation(
comment="Suite created by Rule-Based Profiler with the configuration included.",
profiler_config=profiler_config,
)
return {
"profiler_config": verbose_profiler_config,
"expected_expectation_suite_name": expectation_suite_name,
"expected_expectation_suite": expected_expectation_suite,
"sample_data_relative_path": sample_data_relative_path,
} | [
"def",
"alice_columnar_table_single_batch",
"(",
")",
":",
"verbose_profiler_config_file_path",
":",
"str",
"=",
"file_relative_path",
"(",
"__file__",
",",
"\"alice_user_workflow_verbose_profiler_config.yml\"",
")",
"verbose_profiler_config",
":",
"str",
"with",
"open",
"(",
"verbose_profiler_config_file_path",
")",
"as",
"f",
":",
"verbose_profiler_config",
"=",
"f",
".",
"read",
"(",
")",
"my_rule_for_user_ids_expectation_configurations",
":",
"List",
"[",
"ExpectationConfiguration",
"]",
"=",
"[",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_of_type\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"user_id\"",
",",
"\"type_\"",
":",
"\"INTEGER\"",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"min_value\"",
":",
"397433",
",",
"# From the data",
"\"max_value\"",
":",
"999999999999",
",",
"\"column\"",
":",
"\"user_id\"",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_not_be_null\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"\"user_id\"",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"]",
"event_ts_column_data",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"=",
"{",
"\"column_name\"",
":",
"\"event_ts\"",
",",
"\"observed_max_time_str\"",
":",
"\"2004-10-19 11:05:20\"",
",",
"}",
"my_rule_for_timestamps_column_data",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"str",
"]",
"]",
"=",
"[",
"event_ts_column_data",
",",
"{",
"\"column_name\"",
":",
"\"server_ts\"",
",",
"\"observed_max_time_str\"",
":",
"\"2004-10-19 11:05:20\"",
",",
"}",
",",
"{",
"\"column_name\"",
":",
"\"device_ts\"",
",",
"\"observed_max_time_str\"",
":",
"\"2004-10-19 11:05:22\"",
",",
"}",
",",
"]",
"my_rule_for_timestamps_expectation_configurations",
":",
"List",
"[",
"ExpectationConfiguration",
"]",
"=",
"[",
"]",
"column_data",
":",
"Dict",
"[",
"str",
",",
"str",
"]",
"for",
"column_data",
"in",
"my_rule_for_timestamps_column_data",
":",
"my_rule_for_timestamps_expectation_configurations",
".",
"extend",
"(",
"[",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_of_type\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"column_data",
"[",
"\"column_name\"",
"]",
",",
"\"type_\"",
":",
"\"TIMESTAMP\"",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_increasing\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"column_data",
"[",
"\"column_name\"",
"]",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_values_to_be_dateutil_parseable\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"column_data",
"[",
"\"column_name\"",
"]",
",",
"}",
",",
"\"meta\"",
":",
"{",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_min_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"column_data",
"[",
"\"column_name\"",
"]",
",",
"\"min_value\"",
":",
"\"2004-10-19T10:23:54\"",
",",
"# From variables",
"\"max_value\"",
":",
"\"2004-10-19T10:23:54\"",
",",
"# From variables",
"}",
",",
"\"meta\"",
":",
"{",
"\"notes\"",
":",
"{",
"\"format\"",
":",
"\"markdown\"",
",",
"\"content\"",
":",
"[",
"\"### This expectation confirms no events occur before tracking started **2004-10-19 10:23:54**\"",
"]",
",",
"}",
"}",
",",
"}",
")",
",",
"ExpectationConfiguration",
"(",
"*",
"*",
"{",
"\"expectation_type\"",
":",
"\"expect_column_max_to_be_between\"",
",",
"\"kwargs\"",
":",
"{",
"\"column\"",
":",
"column_data",
"[",
"\"column_name\"",
"]",
",",
"\"min_value\"",
":",
"\"2004-10-19T10:23:54\"",
",",
"# From variables",
"\"max_value\"",
":",
"event_ts_column_data",
"[",
"\"observed_max_time_str\"",
"]",
",",
"# Pin to event_ts column",
"}",
",",
"\"meta\"",
":",
"{",
"\"notes\"",
":",
"{",
"\"format\"",
":",
"\"markdown\"",
",",
"\"content\"",
":",
"[",
"\"### This expectation confirms that the event_ts contains the latest timestamp of all domains\"",
"]",
",",
"}",
"}",
",",
"}",
")",
",",
"]",
")",
"expectation_configurations",
":",
"List",
"[",
"ExpectationConfiguration",
"]",
"=",
"[",
"]",
"expectation_configurations",
".",
"extend",
"(",
"my_rule_for_user_ids_expectation_configurations",
")",
"expectation_configurations",
".",
"extend",
"(",
"my_rule_for_timestamps_expectation_configurations",
")",
"expectation_suite_name",
":",
"str",
"=",
"\"alice_columnar_table_single_batch\"",
"expected_expectation_suite",
":",
"ExpectationSuite",
"=",
"ExpectationSuite",
"(",
"expectation_suite_name",
"=",
"expectation_suite_name",
")",
"expectation_configuration",
":",
"ExpectationConfiguration",
"for",
"expectation_configuration",
"in",
"expectation_configurations",
":",
"expected_expectation_suite",
".",
"add_expectation",
"(",
"expectation_configuration",
")",
"# NOTE that this expectation suite should fail when validated on the data in \"sample_data_relative_path\"",
"# because the device_ts is ahead of the event_ts for the latest event",
"sample_data_relative_path",
":",
"str",
"=",
"\"alice_columnar_table_single_batch_data.csv\"",
"yaml",
"=",
"YAML",
"(",
")",
"profiler_config",
":",
"dict",
"=",
"yaml",
".",
"load",
"(",
"verbose_profiler_config",
")",
"expected_expectation_suite",
".",
"add_citation",
"(",
"comment",
"=",
"\"Suite created by Rule-Based Profiler with the configuration included.\"",
",",
"profiler_config",
"=",
"profiler_config",
",",
")",
"return",
"{",
"\"profiler_config\"",
":",
"verbose_profiler_config",
",",
"\"expected_expectation_suite_name\"",
":",
"expectation_suite_name",
",",
"\"expected_expectation_suite\"",
":",
"expected_expectation_suite",
",",
"\"sample_data_relative_path\"",
":",
"sample_data_relative_path",
",",
"}"
] | [
14,
0
] | [
196,
5
] | python | en | ['en', 'error', 'th'] | False |
get_document_store | (document_store_type, similarity='dot_product') | TODO This method is taken from test/conftest.py but maybe should be within Haystack.
Perhaps a class method of DocStore that just takes string for type of DocStore | TODO This method is taken from test/conftest.py but maybe should be within Haystack.
Perhaps a class method of DocStore that just takes string for type of DocStore | def get_document_store(document_store_type, similarity='dot_product'):
""" TODO This method is taken from test/conftest.py but maybe should be within Haystack.
Perhaps a class method of DocStore that just takes string for type of DocStore"""
if document_store_type == "sql":
if os.path.exists("haystack_test.db"):
os.remove("haystack_test.db")
document_store = SQLDocumentStore(url="sqlite:///haystack_test.db")
elif document_store_type == "memory":
document_store = InMemoryDocumentStore()
elif document_store_type == "elasticsearch":
# make sure we start from a fresh index
client = Elasticsearch()
client.indices.delete(index='haystack_test*', ignore=[404])
document_store = ElasticsearchDocumentStore(index="eval_document", similarity=similarity, timeout=3000)
elif document_store_type in("faiss_flat", "faiss_hnsw"):
if document_store_type == "faiss_flat":
index_type = "Flat"
elif document_store_type == "faiss_hnsw":
index_type = "HNSW"
status = subprocess.run(
['docker rm -f haystack-postgres'],
shell=True)
time.sleep(1)
status = subprocess.run(
['docker run --name haystack-postgres -p 5432:5432 -e POSTGRES_PASSWORD=password -d postgres'],
shell=True)
time.sleep(6)
status = subprocess.run(
['docker exec -it haystack-postgres psql -U postgres -c "CREATE DATABASE haystack;"'], shell=True)
time.sleep(1)
document_store = FAISSDocumentStore(sql_url="postgresql://postgres:password@localhost:5432/haystack",
faiss_index_factory_str=index_type,
similarity=similarity)
else:
raise Exception(f"No document store fixture for '{document_store_type}'")
assert document_store.get_document_count() == 0
return document_store | [
"def",
"get_document_store",
"(",
"document_store_type",
",",
"similarity",
"=",
"'dot_product'",
")",
":",
"if",
"document_store_type",
"==",
"\"sql\"",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"\"haystack_test.db\"",
")",
":",
"os",
".",
"remove",
"(",
"\"haystack_test.db\"",
")",
"document_store",
"=",
"SQLDocumentStore",
"(",
"url",
"=",
"\"sqlite:///haystack_test.db\"",
")",
"elif",
"document_store_type",
"==",
"\"memory\"",
":",
"document_store",
"=",
"InMemoryDocumentStore",
"(",
")",
"elif",
"document_store_type",
"==",
"\"elasticsearch\"",
":",
"# make sure we start from a fresh index",
"client",
"=",
"Elasticsearch",
"(",
")",
"client",
".",
"indices",
".",
"delete",
"(",
"index",
"=",
"'haystack_test*'",
",",
"ignore",
"=",
"[",
"404",
"]",
")",
"document_store",
"=",
"ElasticsearchDocumentStore",
"(",
"index",
"=",
"\"eval_document\"",
",",
"similarity",
"=",
"similarity",
",",
"timeout",
"=",
"3000",
")",
"elif",
"document_store_type",
"in",
"(",
"\"faiss_flat\"",
",",
"\"faiss_hnsw\"",
")",
":",
"if",
"document_store_type",
"==",
"\"faiss_flat\"",
":",
"index_type",
"=",
"\"Flat\"",
"elif",
"document_store_type",
"==",
"\"faiss_hnsw\"",
":",
"index_type",
"=",
"\"HNSW\"",
"status",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'docker rm -f haystack-postgres'",
"]",
",",
"shell",
"=",
"True",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"status",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'docker run --name haystack-postgres -p 5432:5432 -e POSTGRES_PASSWORD=password -d postgres'",
"]",
",",
"shell",
"=",
"True",
")",
"time",
".",
"sleep",
"(",
"6",
")",
"status",
"=",
"subprocess",
".",
"run",
"(",
"[",
"'docker exec -it haystack-postgres psql -U postgres -c \"CREATE DATABASE haystack;\"'",
"]",
",",
"shell",
"=",
"True",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"document_store",
"=",
"FAISSDocumentStore",
"(",
"sql_url",
"=",
"\"postgresql://postgres:password@localhost:5432/haystack\"",
",",
"faiss_index_factory_str",
"=",
"index_type",
",",
"similarity",
"=",
"similarity",
")",
"else",
":",
"raise",
"Exception",
"(",
"f\"No document store fixture for '{document_store_type}'\"",
")",
"assert",
"document_store",
".",
"get_document_count",
"(",
")",
"==",
"0",
"return",
"document_store"
] | [
24,
0
] | [
61,
25
] | python | en | ['en', 'en', 'en'] | True |
SelectPort.__init__ | (self, parent) | if sys.platform == "win32":
self.iconbitmap("data/Floppy_icon.ico")
elif sys.platform == "linux":
self.icon = Image("photo", file="data/Floppy_icon.png")
self.tk.call("wm", "iconphoto", self._w, self.icon) | if sys.platform == "win32":
self.iconbitmap("data/Floppy_icon.ico")
elif sys.platform == "linux":
self.icon = Image("photo", file="data/Floppy_icon.png")
self.tk.call("wm", "iconphoto", self._w, self.icon) | def __init__(self, parent):
super(SelectPort, self).__init__()
self.parent = parent
self.transient(self.parent)
width = 300
height = 100
pos_x = self.parent.winfo_x() + (self.parent.winfo_width() // 2) - (width // 2)
pos_y = self.parent.winfo_y() + (self.parent.winfo_height() // 2) - (height // 2)
self.geometry(f"{width}x{height}+{pos_x}+{pos_y}")
self.resizable(False, False)
self.title("Select Floppy port")
"""if sys.platform == "win32":
self.iconbitmap("data/Floppy_icon.ico")
elif sys.platform == "linux":
self.icon = Image("photo", file="data/Floppy_icon.png")
self.tk.call("wm", "iconphoto", self._w, self.icon)"""
# region Port
port = Frame(self, highlightthickness=1, highlightbackground="black")
Label(port, text="Select port: ", anchor=W).grid(column=0, row=0, sticky=E, pady=5, padx=5)
self.combo_box = ttk.Combobox(port, state="readonly", takefocus=0, width=10)
self.serial_ports()
self.combo_box.bind("<FocusIn>", self.connect_button)
self.combo_box.grid(column=1, row=0, sticky=W)
port.columnconfigure(0, weight=1)
port.columnconfigure(1, weight=1)
port.rowconfigure(0, weight=1)
port.grid(column=0, row=0, columnspan=2, sticky=N+S+E+W, padx=10, pady=10)
# endregion
ttk.Button(self, text="Refresh", takefocus=False, command=self.serial_ports).grid(column=0, row=1, sticky=N+S+E+W, padx=10, pady=5)
self.connect_button = ttk.Button(self, text="Connect", takefocus=False, state=DISABLED, command=self.connect)
self.connect_button.grid(column=1, row=1, sticky=N+S+E+W, padx=10, pady=5)
self.bind("<Return>", lambda e: self.connect())
for i in range(2):
self.columnconfigure(i, weight=1)
self.rowconfigure(0, weight=1)
self.protocol("WM_DELETE_WINDOW", self.close)
self.focus_set()
self.grab_set() | [
"def",
"__init__",
"(",
"self",
",",
"parent",
")",
":",
"super",
"(",
"SelectPort",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"parent",
"=",
"parent",
"self",
".",
"transient",
"(",
"self",
".",
"parent",
")",
"width",
"=",
"300",
"height",
"=",
"100",
"pos_x",
"=",
"self",
".",
"parent",
".",
"winfo_x",
"(",
")",
"+",
"(",
"self",
".",
"parent",
".",
"winfo_width",
"(",
")",
"//",
"2",
")",
"-",
"(",
"width",
"//",
"2",
")",
"pos_y",
"=",
"self",
".",
"parent",
".",
"winfo_y",
"(",
")",
"+",
"(",
"self",
".",
"parent",
".",
"winfo_height",
"(",
")",
"//",
"2",
")",
"-",
"(",
"height",
"//",
"2",
")",
"self",
".",
"geometry",
"(",
"f\"{width}x{height}+{pos_x}+{pos_y}\"",
")",
"self",
".",
"resizable",
"(",
"False",
",",
"False",
")",
"self",
".",
"title",
"(",
"\"Select Floppy port\"",
")",
"# region Port",
"port",
"=",
"Frame",
"(",
"self",
",",
"highlightthickness",
"=",
"1",
",",
"highlightbackground",
"=",
"\"black\"",
")",
"Label",
"(",
"port",
",",
"text",
"=",
"\"Select port: \"",
",",
"anchor",
"=",
"W",
")",
".",
"grid",
"(",
"column",
"=",
"0",
",",
"row",
"=",
"0",
",",
"sticky",
"=",
"E",
",",
"pady",
"=",
"5",
",",
"padx",
"=",
"5",
")",
"self",
".",
"combo_box",
"=",
"ttk",
".",
"Combobox",
"(",
"port",
",",
"state",
"=",
"\"readonly\"",
",",
"takefocus",
"=",
"0",
",",
"width",
"=",
"10",
")",
"self",
".",
"serial_ports",
"(",
")",
"self",
".",
"combo_box",
".",
"bind",
"(",
"\"<FocusIn>\"",
",",
"self",
".",
"connect_button",
")",
"self",
".",
"combo_box",
".",
"grid",
"(",
"column",
"=",
"1",
",",
"row",
"=",
"0",
",",
"sticky",
"=",
"W",
")",
"port",
".",
"columnconfigure",
"(",
"0",
",",
"weight",
"=",
"1",
")",
"port",
".",
"columnconfigure",
"(",
"1",
",",
"weight",
"=",
"1",
")",
"port",
".",
"rowconfigure",
"(",
"0",
",",
"weight",
"=",
"1",
")",
"port",
".",
"grid",
"(",
"column",
"=",
"0",
",",
"row",
"=",
"0",
",",
"columnspan",
"=",
"2",
",",
"sticky",
"=",
"N",
"+",
"S",
"+",
"E",
"+",
"W",
",",
"padx",
"=",
"10",
",",
"pady",
"=",
"10",
")",
"# endregion",
"ttk",
".",
"Button",
"(",
"self",
",",
"text",
"=",
"\"Refresh\"",
",",
"takefocus",
"=",
"False",
",",
"command",
"=",
"self",
".",
"serial_ports",
")",
".",
"grid",
"(",
"column",
"=",
"0",
",",
"row",
"=",
"1",
",",
"sticky",
"=",
"N",
"+",
"S",
"+",
"E",
"+",
"W",
",",
"padx",
"=",
"10",
",",
"pady",
"=",
"5",
")",
"self",
".",
"connect_button",
"=",
"ttk",
".",
"Button",
"(",
"self",
",",
"text",
"=",
"\"Connect\"",
",",
"takefocus",
"=",
"False",
",",
"state",
"=",
"DISABLED",
",",
"command",
"=",
"self",
".",
"connect",
")",
"self",
".",
"connect_button",
".",
"grid",
"(",
"column",
"=",
"1",
",",
"row",
"=",
"1",
",",
"sticky",
"=",
"N",
"+",
"S",
"+",
"E",
"+",
"W",
",",
"padx",
"=",
"10",
",",
"pady",
"=",
"5",
")",
"self",
".",
"bind",
"(",
"\"<Return>\"",
",",
"lambda",
"e",
":",
"self",
".",
"connect",
"(",
")",
")",
"for",
"i",
"in",
"range",
"(",
"2",
")",
":",
"self",
".",
"columnconfigure",
"(",
"i",
",",
"weight",
"=",
"1",
")",
"self",
".",
"rowconfigure",
"(",
"0",
",",
"weight",
"=",
"1",
")",
"self",
".",
"protocol",
"(",
"\"WM_DELETE_WINDOW\"",
",",
"self",
".",
"close",
")",
"self",
".",
"focus_set",
"(",
")",
"self",
".",
"grab_set",
"(",
")"
] | [
11,
4
] | [
66,
23
] | python | en | ['en', 'en', 'en'] | True |
SelectPort.connect_button | (self, event=None) | Enable connect button if port selected. | Enable connect button if port selected. | def connect_button(self, event=None):
"""Enable connect button if port selected."""
# Force focus to avoid blue selection on combobox.
self.focus_set()
if self.combo_box.get() != "":
self.connect_button.config(state=NORMAL) | [
"def",
"connect_button",
"(",
"self",
",",
"event",
"=",
"None",
")",
":",
"# Force focus to avoid blue selection on combobox.",
"self",
".",
"focus_set",
"(",
")",
"if",
"self",
".",
"combo_box",
".",
"get",
"(",
")",
"!=",
"\"\"",
":",
"self",
".",
"connect_button",
".",
"config",
"(",
"state",
"=",
"NORMAL",
")"
] | [
68,
4
] | [
76,
52
] | python | en | ['en', 'en', 'en'] | True |
SelectPort.serial_ports | (self) | Lists available serial port names. | Lists available serial port names. | def serial_ports(self):
"""Lists available serial port names."""
if sys.platform == "win32":
ports = ["COM%s" % (i + 1) for i in range(256)]
elif sys.platform == "linux":
ports = glob.glob("/dev/tty[A-Za-z]*")
else:
raise EnvironmentError("Unsupported platform")
result = list()
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
self.combo_box.config(values=result) | [
"def",
"serial_ports",
"(",
"self",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"\"win32\"",
":",
"ports",
"=",
"[",
"\"COM%s\"",
"%",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"256",
")",
"]",
"elif",
"sys",
".",
"platform",
"==",
"\"linux\"",
":",
"ports",
"=",
"glob",
".",
"glob",
"(",
"\"/dev/tty[A-Za-z]*\"",
")",
"else",
":",
"raise",
"EnvironmentError",
"(",
"\"Unsupported platform\"",
")",
"result",
"=",
"list",
"(",
")",
"for",
"port",
"in",
"ports",
":",
"try",
":",
"s",
"=",
"serial",
".",
"Serial",
"(",
"port",
")",
"s",
".",
"close",
"(",
")",
"result",
".",
"append",
"(",
"port",
")",
"except",
"(",
"OSError",
",",
"serial",
".",
"SerialException",
")",
":",
"pass",
"self",
".",
"combo_box",
".",
"config",
"(",
"values",
"=",
"result",
")"
] | [
78,
4
] | [
98,
44
] | python | en | ['fr', 'en', 'en'] | True |
SelectPort.connect | (self) | If a port is selected, close this window and connect to the board. | If a port is selected, close this window and connect to the board. | def connect(self):
"""If a port is selected, close this window and connect to the board."""
if self.combo_box.get() == "":
messagebox.showwarning("Warning", "No port selected. Please select one.")
else:
self.parent.connect(self.combo_box.get())
self.destroy() | [
"def",
"connect",
"(",
"self",
")",
":",
"if",
"self",
".",
"combo_box",
".",
"get",
"(",
")",
"==",
"\"\"",
":",
"messagebox",
".",
"showwarning",
"(",
"\"Warning\"",
",",
"\"No port selected. Please select one.\"",
")",
"else",
":",
"self",
".",
"parent",
".",
"connect",
"(",
"self",
".",
"combo_box",
".",
"get",
"(",
")",
")",
"self",
".",
"destroy",
"(",
")"
] | [
100,
4
] | [
108,
26
] | python | en | ['en', 'en', 'en'] | True |
SelectPort.close | (self) | If close button is pressed, destroy parent window | If close button is pressed, destroy parent window | def close(self):
"""If close button is pressed, destroy parent window"""
self.parent.destroy() | [
"def",
"close",
"(",
"self",
")",
":",
"self",
".",
"parent",
".",
"destroy",
"(",
")"
] | [
110,
4
] | [
114,
29
] | python | en | ['en', 'en', 'en'] | True |
FilePathDataConnector.__init__ | (
self,
name: str,
datasource_name: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
sorters: Optional[list] = None,
batch_spec_passthrough: Optional[dict] = None,
) |
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references.
Args:
name (str): name of FilePathDataConnector
datasource_name (str): Name of datasource that this DataConnector is connected to
execution_engine (ExecutionEngine): Execution Engine object to actually read the data
default_regex (dict): Optional dict the filter and organize the data_references.
sorters (list): Optional list if you want to sort the data_references
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
|
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references. | def __init__(
self,
name: str,
datasource_name: str,
execution_engine: Optional[ExecutionEngine] = None,
default_regex: Optional[dict] = None,
sorters: Optional[list] = None,
batch_spec_passthrough: Optional[dict] = None,
):
"""
Base class for DataConnectors that connect to filesystem-like data. This class supports the configuration of default_regex
and sorters for filtering and sorting data_references.
Args:
name (str): name of FilePathDataConnector
datasource_name (str): Name of datasource that this DataConnector is connected to
execution_engine (ExecutionEngine): Execution Engine object to actually read the data
default_regex (dict): Optional dict the filter and organize the data_references.
sorters (list): Optional list if you want to sort the data_references
batch_spec_passthrough (dict): dictionary with keys that will be added directly to batch_spec
"""
logger.debug(f'Constructing FilePathDataConnector "{name}".')
super().__init__(
name=name,
datasource_name=datasource_name,
execution_engine=execution_engine,
batch_spec_passthrough=batch_spec_passthrough,
)
if default_regex is None:
default_regex = {}
self._default_regex = default_regex
self._sorters = build_sorters_from_config(config_list=sorters)
self._validate_sorters_configuration() | [
"def",
"__init__",
"(",
"self",
",",
"name",
":",
"str",
",",
"datasource_name",
":",
"str",
",",
"execution_engine",
":",
"Optional",
"[",
"ExecutionEngine",
"]",
"=",
"None",
",",
"default_regex",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"sorters",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"batch_spec_passthrough",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
")",
":",
"logger",
".",
"debug",
"(",
"f'Constructing FilePathDataConnector \"{name}\".'",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"name",
"=",
"name",
",",
"datasource_name",
"=",
"datasource_name",
",",
"execution_engine",
"=",
"execution_engine",
",",
"batch_spec_passthrough",
"=",
"batch_spec_passthrough",
",",
")",
"if",
"default_regex",
"is",
"None",
":",
"default_regex",
"=",
"{",
"}",
"self",
".",
"_default_regex",
"=",
"default_regex",
"self",
".",
"_sorters",
"=",
"build_sorters_from_config",
"(",
"config_list",
"=",
"sorters",
")",
"self",
".",
"_validate_sorters_configuration",
"(",
")"
] | [
38,
4
] | [
73,
46
] | python | en | ['en', 'error', 'th'] | False |
FilePathDataConnector._get_data_reference_list_from_cache_by_data_asset_name | (
self, data_asset_name: str
) |
Fetch data_references corresponding to data_asset_name from the cache.
|
Fetch data_references corresponding to data_asset_name from the cache.
| def _get_data_reference_list_from_cache_by_data_asset_name(
self, data_asset_name: str
) -> List[str]:
"""
Fetch data_references corresponding to data_asset_name from the cache.
"""
regex_config: dict = self._get_regex_config(data_asset_name=data_asset_name)
pattern: str = regex_config["pattern"]
group_names: List[str] = regex_config["group_names"]
batch_definition_list = self._get_batch_definition_list_from_batch_request(
batch_request=BatchRequestBase(
datasource_name=self.datasource_name,
data_connector_name=self.name,
data_asset_name=data_asset_name,
)
)
if len(self.sorters) > 0:
batch_definition_list = self._sort_batch_definition_list(
batch_definition_list=batch_definition_list
)
path_list: List[str] = [
map_batch_definition_to_data_reference_string_using_regex(
batch_definition=batch_definition,
regex_pattern=pattern,
group_names=group_names,
)
for batch_definition in batch_definition_list
]
return path_list | [
"def",
"_get_data_reference_list_from_cache_by_data_asset_name",
"(",
"self",
",",
"data_asset_name",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"regex_config",
":",
"dict",
"=",
"self",
".",
"_get_regex_config",
"(",
"data_asset_name",
"=",
"data_asset_name",
")",
"pattern",
":",
"str",
"=",
"regex_config",
"[",
"\"pattern\"",
"]",
"group_names",
":",
"List",
"[",
"str",
"]",
"=",
"regex_config",
"[",
"\"group_names\"",
"]",
"batch_definition_list",
"=",
"self",
".",
"_get_batch_definition_list_from_batch_request",
"(",
"batch_request",
"=",
"BatchRequestBase",
"(",
"datasource_name",
"=",
"self",
".",
"datasource_name",
",",
"data_connector_name",
"=",
"self",
".",
"name",
",",
"data_asset_name",
"=",
"data_asset_name",
",",
")",
")",
"if",
"len",
"(",
"self",
".",
"sorters",
")",
">",
"0",
":",
"batch_definition_list",
"=",
"self",
".",
"_sort_batch_definition_list",
"(",
"batch_definition_list",
"=",
"batch_definition_list",
")",
"path_list",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"map_batch_definition_to_data_reference_string_using_regex",
"(",
"batch_definition",
"=",
"batch_definition",
",",
"regex_pattern",
"=",
"pattern",
",",
"group_names",
"=",
"group_names",
",",
")",
"for",
"batch_definition",
"in",
"batch_definition_list",
"]",
"return",
"path_list"
] | [
79,
4
] | [
111,
24
] | python | en | ['en', 'error', 'th'] | False |
FilePathDataConnector.get_batch_definition_list_from_batch_request | (
self,
batch_request: BatchRequest,
) |
Retrieve batch_definitions and that match batch_request.
First retrieves all batch_definitions that match batch_request
- if batch_request also has a batch_filter, then select batch_definitions that match batch_filter.
- if data_connector has sorters configured, then sort the batch_definition list before returning.
Args:
batch_request (BatchRequest): BatchRequest (containing previously validated attributes) to process
Returns:
A list of BatchDefinition objects that match BatchRequest
|
Retrieve batch_definitions and that match batch_request. | def get_batch_definition_list_from_batch_request(
self,
batch_request: BatchRequest,
) -> List[BatchDefinition]:
"""
Retrieve batch_definitions and that match batch_request.
First retrieves all batch_definitions that match batch_request
- if batch_request also has a batch_filter, then select batch_definitions that match batch_filter.
- if data_connector has sorters configured, then sort the batch_definition list before returning.
Args:
batch_request (BatchRequest): BatchRequest (containing previously validated attributes) to process
Returns:
A list of BatchDefinition objects that match BatchRequest
"""
batch_request_base: BatchRequestBase = cast(BatchRequestBase, batch_request)
return self._get_batch_definition_list_from_batch_request(
batch_request=batch_request_base
) | [
"def",
"get_batch_definition_list_from_batch_request",
"(",
"self",
",",
"batch_request",
":",
"BatchRequest",
",",
")",
"->",
"List",
"[",
"BatchDefinition",
"]",
":",
"batch_request_base",
":",
"BatchRequestBase",
"=",
"cast",
"(",
"BatchRequestBase",
",",
"batch_request",
")",
"return",
"self",
".",
"_get_batch_definition_list_from_batch_request",
"(",
"batch_request",
"=",
"batch_request_base",
")"
] | [
113,
4
] | [
134,
9
] | python | en | ['en', 'error', 'th'] | False |
FilePathDataConnector._get_batch_definition_list_from_batch_request | (
self,
batch_request: BatchRequestBase,
) |
Retrieve batch_definitions that match batch_request.
First retrieves all batch_definitions that match batch_request
- if batch_request also has a batch_filter, then select batch_definitions that match batch_filter.
- if data_connector has sorters configured, then sort the batch_definition list before returning.
Args:
batch_request (BatchRequestBase): BatchRequestBase (BatchRequest without attribute validation) to process
Returns:
A list of BatchDefinition objects that match BatchRequest
|
Retrieve batch_definitions that match batch_request. | def _get_batch_definition_list_from_batch_request(
self,
batch_request: BatchRequestBase,
) -> List[BatchDefinition]:
"""
Retrieve batch_definitions that match batch_request.
First retrieves all batch_definitions that match batch_request
- if batch_request also has a batch_filter, then select batch_definitions that match batch_filter.
- if data_connector has sorters configured, then sort the batch_definition list before returning.
Args:
batch_request (BatchRequestBase): BatchRequestBase (BatchRequest without attribute validation) to process
Returns:
A list of BatchDefinition objects that match BatchRequest
"""
self._validate_batch_request(batch_request=batch_request)
if len(self._data_references_cache) == 0:
self._refresh_data_references_cache()
batch_definition_list: List[BatchDefinition] = list(
filter(
lambda batch_definition: batch_definition_matches_batch_request(
batch_definition=batch_definition, batch_request=batch_request
),
self._get_batch_definition_list_from_cache(),
)
)
if len(self.sorters) > 0:
batch_definition_list = self._sort_batch_definition_list(
batch_definition_list=batch_definition_list
)
if batch_request.data_connector_query is not None:
batch_filter_obj: BatchFilter = build_batch_filter(
data_connector_query_dict=batch_request.data_connector_query
)
batch_definition_list = batch_filter_obj.select_from_data_connector_query(
batch_definition_list=batch_definition_list
)
return batch_definition_list | [
"def",
"_get_batch_definition_list_from_batch_request",
"(",
"self",
",",
"batch_request",
":",
"BatchRequestBase",
",",
")",
"->",
"List",
"[",
"BatchDefinition",
"]",
":",
"self",
".",
"_validate_batch_request",
"(",
"batch_request",
"=",
"batch_request",
")",
"if",
"len",
"(",
"self",
".",
"_data_references_cache",
")",
"==",
"0",
":",
"self",
".",
"_refresh_data_references_cache",
"(",
")",
"batch_definition_list",
":",
"List",
"[",
"BatchDefinition",
"]",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"batch_definition",
":",
"batch_definition_matches_batch_request",
"(",
"batch_definition",
"=",
"batch_definition",
",",
"batch_request",
"=",
"batch_request",
")",
",",
"self",
".",
"_get_batch_definition_list_from_cache",
"(",
")",
",",
")",
")",
"if",
"len",
"(",
"self",
".",
"sorters",
")",
">",
"0",
":",
"batch_definition_list",
"=",
"self",
".",
"_sort_batch_definition_list",
"(",
"batch_definition_list",
"=",
"batch_definition_list",
")",
"if",
"batch_request",
".",
"data_connector_query",
"is",
"not",
"None",
":",
"batch_filter_obj",
":",
"BatchFilter",
"=",
"build_batch_filter",
"(",
"data_connector_query_dict",
"=",
"batch_request",
".",
"data_connector_query",
")",
"batch_definition_list",
"=",
"batch_filter_obj",
".",
"select_from_data_connector_query",
"(",
"batch_definition_list",
"=",
"batch_definition_list",
")",
"return",
"batch_definition_list"
] | [
136,
4
] | [
180,
36
] | python | en | ['en', 'error', 'th'] | False |
FilePathDataConnector._sort_batch_definition_list | (
self, batch_definition_list: List[BatchDefinition]
) |
Use configured sorters to sort batch_definition
Args:
batch_definition_list (list): list of batch_definitions to sort
Returns:
sorted list of batch_definitions
|
Use configured sorters to sort batch_definition | def _sort_batch_definition_list(
self, batch_definition_list: List[BatchDefinition]
) -> List[BatchDefinition]:
"""
Use configured sorters to sort batch_definition
Args:
batch_definition_list (list): list of batch_definitions to sort
Returns:
sorted list of batch_definitions
"""
sorters: Iterator[Sorter] = reversed(list(self.sorters.values()))
for sorter in sorters:
batch_definition_list = sorter.get_sorted_batch_definitions(
batch_definitions=batch_definition_list
)
return batch_definition_list | [
"def",
"_sort_batch_definition_list",
"(",
"self",
",",
"batch_definition_list",
":",
"List",
"[",
"BatchDefinition",
"]",
")",
"->",
"List",
"[",
"BatchDefinition",
"]",
":",
"sorters",
":",
"Iterator",
"[",
"Sorter",
"]",
"=",
"reversed",
"(",
"list",
"(",
"self",
".",
"sorters",
".",
"values",
"(",
")",
")",
")",
"for",
"sorter",
"in",
"sorters",
":",
"batch_definition_list",
"=",
"sorter",
".",
"get_sorted_batch_definitions",
"(",
"batch_definitions",
"=",
"batch_definition_list",
")",
"return",
"batch_definition_list"
] | [
182,
4
] | [
200,
36
] | python | en | ['en', 'error', 'th'] | False |
FilePathDataConnector.build_batch_spec | (self, batch_definition: BatchDefinition) |
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
|
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function. | def build_batch_spec(self, batch_definition: BatchDefinition) -> PathBatchSpec:
"""
Build BatchSpec from batch_definition by calling DataConnector's build_batch_spec function.
Args:
batch_definition (BatchDefinition): to be used to build batch_spec
Returns:
BatchSpec built from batch_definition
"""
batch_spec: BatchSpec = super().build_batch_spec(
batch_definition=batch_definition
)
return PathBatchSpec(batch_spec) | [
"def",
"build_batch_spec",
"(",
"self",
",",
"batch_definition",
":",
"BatchDefinition",
")",
"->",
"PathBatchSpec",
":",
"batch_spec",
":",
"BatchSpec",
"=",
"super",
"(",
")",
".",
"build_batch_spec",
"(",
"batch_definition",
"=",
"batch_definition",
")",
"return",
"PathBatchSpec",
"(",
"batch_spec",
")"
] | [
230,
4
] | [
243,
40
] | python | en | ['en', 'error', 'th'] | False |
score5 | (fa, matrix=None) |
Calculate 5' splice site strength
(exon)XXX|XXXXXX(intron)
**
>>> round(score5('cagGTAAGT'), 2)
10.86
>>> round(score5('gagGTAAGT'), 2)
11.08
>>> round(score5('taaATAAGT'), 2)
-0.12
>>> matrix = load_matrix5()
>>> round(score5('cagGTAAGT', matrix=matrix), 2)
10.86
|
Calculate 5' splice site strength
(exon)XXX|XXXXXX(intron)
**
>>> round(score5('cagGTAAGT'), 2)
10.86
>>> round(score5('gagGTAAGT'), 2)
11.08
>>> round(score5('taaATAAGT'), 2)
-0.12
>>> matrix = load_matrix5()
>>> round(score5('cagGTAAGT', matrix=matrix), 2)
10.86
| def score5(fa, matrix=None):
'''
Calculate 5' splice site strength
(exon)XXX|XXXXXX(intron)
**
>>> round(score5('cagGTAAGT'), 2)
10.86
>>> round(score5('gagGTAAGT'), 2)
11.08
>>> round(score5('taaATAAGT'), 2)
-0.12
>>> matrix = load_matrix5()
>>> round(score5('cagGTAAGT', matrix=matrix), 2)
10.86
'''
# check length of fa
if len(fa) != 9:
sys.exit('Wrong length of fa!')
# check matrix
if not matrix:
matrix = load_matrix5()
# for key elements
key = fa[3:5].upper()
score = cons1_5[key[0]] * cons2_5[key[1]] / (bgd_5[key[0]] * bgd_5[key[1]])
# for rest elements
rest = (fa[:3] + fa[5:]).upper()
rest_score = matrix[rest]
# final score
return math.log(score * rest_score, 2) | [
"def",
"score5",
"(",
"fa",
",",
"matrix",
"=",
"None",
")",
":",
"# check length of fa",
"if",
"len",
"(",
"fa",
")",
"!=",
"9",
":",
"sys",
".",
"exit",
"(",
"'Wrong length of fa!'",
")",
"# check matrix",
"if",
"not",
"matrix",
":",
"matrix",
"=",
"load_matrix5",
"(",
")",
"# for key elements",
"key",
"=",
"fa",
"[",
"3",
":",
"5",
"]",
".",
"upper",
"(",
")",
"score",
"=",
"cons1_5",
"[",
"key",
"[",
"0",
"]",
"]",
"*",
"cons2_5",
"[",
"key",
"[",
"1",
"]",
"]",
"/",
"(",
"bgd_5",
"[",
"key",
"[",
"0",
"]",
"]",
"*",
"bgd_5",
"[",
"key",
"[",
"1",
"]",
"]",
")",
"# for rest elements",
"rest",
"=",
"(",
"fa",
"[",
":",
"3",
"]",
"+",
"fa",
"[",
"5",
":",
"]",
")",
".",
"upper",
"(",
")",
"rest_score",
"=",
"matrix",
"[",
"rest",
"]",
"# final score",
"return",
"math",
".",
"log",
"(",
"score",
"*",
"rest_score",
",",
"2",
")"
] | [
33,
0
] | [
61,
42
] | python | en | ['en', 'error', 'th'] | False |
score3 | (fa, matrix=None) |
Calculate 3' splice site strength
(intron)XXXXXXXXXXXXXXXXXXXX|XXX(exon)
**
>>> round(score3('ttccaaacgaacttttgtAGgga'), 2)
2.89
>>> round(score3('tgtctttttctgtgtggcAGtgg'), 2)
8.19
>>> round(score3('ttctctcttcagacttatAGcaa'), 2)
-0.08
>>> matrix = load_matrix3()
>>> round(score3('ttccaaacgaacttttgtAGgga', matrix=matrix), 2)
2.89
|
Calculate 3' splice site strength
(intron)XXXXXXXXXXXXXXXXXXXX|XXX(exon)
**
>>> round(score3('ttccaaacgaacttttgtAGgga'), 2)
2.89
>>> round(score3('tgtctttttctgtgtggcAGtgg'), 2)
8.19
>>> round(score3('ttctctcttcagacttatAGcaa'), 2)
-0.08
>>> matrix = load_matrix3()
>>> round(score3('ttccaaacgaacttttgtAGgga', matrix=matrix), 2)
2.89
| def score3(fa, matrix=None):
'''
Calculate 3' splice site strength
(intron)XXXXXXXXXXXXXXXXXXXX|XXX(exon)
**
>>> round(score3('ttccaaacgaacttttgtAGgga'), 2)
2.89
>>> round(score3('tgtctttttctgtgtggcAGtgg'), 2)
8.19
>>> round(score3('ttctctcttcagacttatAGcaa'), 2)
-0.08
>>> matrix = load_matrix3()
>>> round(score3('ttccaaacgaacttttgtAGgga', matrix=matrix), 2)
2.89
'''
# check length of fa
if len(fa) != 23:
sys.exit('Wrong length of fa!')
# check matrix
if not matrix:
matrix = load_matrix3()
# for key elements
key = fa[18:20].upper()
score = cons1_3[key[0]] * cons2_3[key[1]] / (bgd_3[key[0]] * bgd_3[key[1]])
# for rest elements
rest = (fa[:18] + fa[20:]).upper()
rest_score = 1
rest_score *= matrix[0][hashseq(rest[:7])]
rest_score *= matrix[1][hashseq(rest[7:14])]
rest_score *= matrix[2][hashseq(rest[14:])]
rest_score *= matrix[3][hashseq(rest[4:11])]
rest_score *= matrix[4][hashseq(rest[11:18])]
rest_score /= matrix[5][hashseq(rest[4:7])]
rest_score /= matrix[6][hashseq(rest[7:11])]
rest_score /= matrix[7][hashseq(rest[11:14])]
rest_score /= matrix[8][hashseq(rest[14:18])]
# final score
return math.log(score * rest_score, 2) | [
"def",
"score3",
"(",
"fa",
",",
"matrix",
"=",
"None",
")",
":",
"# check length of fa",
"if",
"len",
"(",
"fa",
")",
"!=",
"23",
":",
"sys",
".",
"exit",
"(",
"'Wrong length of fa!'",
")",
"# check matrix",
"if",
"not",
"matrix",
":",
"matrix",
"=",
"load_matrix3",
"(",
")",
"# for key elements",
"key",
"=",
"fa",
"[",
"18",
":",
"20",
"]",
".",
"upper",
"(",
")",
"score",
"=",
"cons1_3",
"[",
"key",
"[",
"0",
"]",
"]",
"*",
"cons2_3",
"[",
"key",
"[",
"1",
"]",
"]",
"/",
"(",
"bgd_3",
"[",
"key",
"[",
"0",
"]",
"]",
"*",
"bgd_3",
"[",
"key",
"[",
"1",
"]",
"]",
")",
"# for rest elements",
"rest",
"=",
"(",
"fa",
"[",
":",
"18",
"]",
"+",
"fa",
"[",
"20",
":",
"]",
")",
".",
"upper",
"(",
")",
"rest_score",
"=",
"1",
"rest_score",
"*=",
"matrix",
"[",
"0",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
":",
"7",
"]",
")",
"]",
"rest_score",
"*=",
"matrix",
"[",
"1",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"7",
":",
"14",
"]",
")",
"]",
"rest_score",
"*=",
"matrix",
"[",
"2",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"14",
":",
"]",
")",
"]",
"rest_score",
"*=",
"matrix",
"[",
"3",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"4",
":",
"11",
"]",
")",
"]",
"rest_score",
"*=",
"matrix",
"[",
"4",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"11",
":",
"18",
"]",
")",
"]",
"rest_score",
"/=",
"matrix",
"[",
"5",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"4",
":",
"7",
"]",
")",
"]",
"rest_score",
"/=",
"matrix",
"[",
"6",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"7",
":",
"11",
"]",
")",
"]",
"rest_score",
"/=",
"matrix",
"[",
"7",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"11",
":",
"14",
"]",
")",
"]",
"rest_score",
"/=",
"matrix",
"[",
"8",
"]",
"[",
"hashseq",
"(",
"rest",
"[",
"14",
":",
"18",
"]",
")",
"]",
"# final score",
"return",
"math",
".",
"log",
"(",
"score",
"*",
"rest_score",
",",
"2",
")"
] | [
74,
0
] | [
111,
42
] | python | en | ['en', 'error', 'th'] | False |
checkpoint | () |
Checkpoint operations
A checkpoint is a bundle of one or more batches of data with one or more
Expectation Suites.
A checkpoint can be as simple as one batch of data paired with one
Expectation Suite.
A checkpoint can be as complex as many batches of data across different
datasources paired with one or more Expectation Suites each.
|
Checkpoint operations | def checkpoint():
"""
Checkpoint operations
A checkpoint is a bundle of one or more batches of data with one or more
Expectation Suites.
A checkpoint can be as simple as one batch of data paired with one
Expectation Suite.
A checkpoint can be as complex as many batches of data across different
datasources paired with one or more Expectation Suites each.
"""
pass | [
"def",
"checkpoint",
"(",
")",
":",
"pass"
] | [
59,
0
] | [
72,
8
] | python | en | ['en', 'error', 'th'] | False |
checkpoint_new | (checkpoint, suite, directory, datasource) | Create a new checkpoint for easy deployments. (Experimental) | Create a new checkpoint for easy deployments. (Experimental) | def checkpoint_new(checkpoint, suite, directory, datasource):
"""Create a new checkpoint for easy deployments. (Experimental)"""
suite_name = suite
usage_event = "cli.checkpoint.new"
context = toolkit.load_data_context_with_error_handling(directory)
_verify_checkpoint_does_not_exist(context, checkpoint, usage_event)
suite: ExpectationSuite = toolkit.load_expectation_suite(
context, suite_name, usage_event
)
datasource = toolkit.select_datasource(context, datasource_name=datasource)
if datasource is None:
toolkit.send_usage_message(context, usage_event, success=False)
sys.exit(1)
_, _, _, batch_kwargs = toolkit.get_batch_kwargs(context, datasource.name)
_ = context.add_checkpoint(
name=checkpoint,
**{
"class_name": "LegacyCheckpoint",
"batches": [
{
"batch_kwargs": dict(batch_kwargs),
"expectation_suite_names": [suite.expectation_suite_name],
}
],
},
)
cli_message(
f"""<green>A Checkpoint named `{checkpoint}` was added to your project!</green>
- To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`"""
)
toolkit.send_usage_message(context, usage_event, success=True) | [
"def",
"checkpoint_new",
"(",
"checkpoint",
",",
"suite",
",",
"directory",
",",
"datasource",
")",
":",
"suite_name",
"=",
"suite",
"usage_event",
"=",
"\"cli.checkpoint.new\"",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"_verify_checkpoint_does_not_exist",
"(",
"context",
",",
"checkpoint",
",",
"usage_event",
")",
"suite",
":",
"ExpectationSuite",
"=",
"toolkit",
".",
"load_expectation_suite",
"(",
"context",
",",
"suite_name",
",",
"usage_event",
")",
"datasource",
"=",
"toolkit",
".",
"select_datasource",
"(",
"context",
",",
"datasource_name",
"=",
"datasource",
")",
"if",
"datasource",
"is",
"None",
":",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"usage_event",
",",
"success",
"=",
"False",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"_",
",",
"_",
",",
"_",
",",
"batch_kwargs",
"=",
"toolkit",
".",
"get_batch_kwargs",
"(",
"context",
",",
"datasource",
".",
"name",
")",
"_",
"=",
"context",
".",
"add_checkpoint",
"(",
"name",
"=",
"checkpoint",
",",
"*",
"*",
"{",
"\"class_name\"",
":",
"\"LegacyCheckpoint\"",
",",
"\"batches\"",
":",
"[",
"{",
"\"batch_kwargs\"",
":",
"dict",
"(",
"batch_kwargs",
")",
",",
"\"expectation_suite_names\"",
":",
"[",
"suite",
".",
"expectation_suite_name",
"]",
",",
"}",
"]",
",",
"}",
",",
")",
"cli_message",
"(",
"f\"\"\"<green>A Checkpoint named `{checkpoint}` was added to your project!</green>\n - To run this Checkpoint, run `great_expectations checkpoint run {checkpoint}`\"\"\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"usage_event",
",",
"success",
"=",
"True",
")"
] | [
86,
0
] | [
119,
66
] | python | en | ['en', 'en', 'en'] | True |
checkpoint_list | (directory) | List configured checkpoints. (Experimental) | List configured checkpoints. (Experimental) | def checkpoint_list(directory):
"""List configured checkpoints. (Experimental)"""
context = toolkit.load_data_context_with_error_handling(directory)
checkpoints = context.list_checkpoints()
if not checkpoints:
cli_message(
"No checkpoints found.\n"
" - Use the command `great_expectations checkpoint new` to create one."
)
toolkit.send_usage_message(context, event="cli.checkpoint.list", success=True)
sys.exit(0)
number_found = len(checkpoints)
plural = "s" if number_found > 1 else ""
message = f"Found {number_found} checkpoint{plural}."
pretty_list = [f" - <cyan>{cp}</cyan>" for cp in checkpoints]
cli_message_list(pretty_list, list_intro_string=message)
toolkit.send_usage_message(context, event="cli.checkpoint.list", success=True) | [
"def",
"checkpoint_list",
"(",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"checkpoints",
"=",
"context",
".",
"list_checkpoints",
"(",
")",
"if",
"not",
"checkpoints",
":",
"cli_message",
"(",
"\"No checkpoints found.\\n\"",
"\" - Use the command `great_expectations checkpoint new` to create one.\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"\"cli.checkpoint.list\"",
",",
"success",
"=",
"True",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"number_found",
"=",
"len",
"(",
"checkpoints",
")",
"plural",
"=",
"\"s\"",
"if",
"number_found",
">",
"1",
"else",
"\"\"",
"message",
"=",
"f\"Found {number_found} checkpoint{plural}.\"",
"pretty_list",
"=",
"[",
"f\" - <cyan>{cp}</cyan>\"",
"for",
"cp",
"in",
"checkpoints",
"]",
"cli_message_list",
"(",
"pretty_list",
",",
"list_intro_string",
"=",
"message",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"\"cli.checkpoint.list\"",
",",
"success",
"=",
"True",
")"
] | [
171,
0
] | [
188,
82
] | python | en | ['en', 'en', 'en'] | True |
checkpoint_run | (checkpoint, directory) | Run a checkpoint. (Experimental) | Run a checkpoint. (Experimental) | def checkpoint_run(checkpoint, directory):
"""Run a checkpoint. (Experimental)"""
usage_event = "cli.checkpoint.run"
context = toolkit.load_data_context_with_error_handling(
directory=directory, from_cli_upgrade_command=False
)
checkpoint: Checkpoint = toolkit.load_checkpoint(
context,
checkpoint,
usage_event,
)
try:
results = checkpoint.run()
except Exception as e:
toolkit.exit_with_failure_message_and_stats(
context, usage_event, f"<red>{e}</red>"
)
if not results["success"]:
cli_message("Validation failed!")
toolkit.send_usage_message(context, event=usage_event, success=True)
print_validation_operator_results_details(results)
sys.exit(1)
cli_message("Validation succeeded!")
toolkit.send_usage_message(context, event=usage_event, success=True)
print_validation_operator_results_details(results)
sys.exit(0) | [
"def",
"checkpoint_run",
"(",
"checkpoint",
",",
"directory",
")",
":",
"usage_event",
"=",
"\"cli.checkpoint.run\"",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
"=",
"directory",
",",
"from_cli_upgrade_command",
"=",
"False",
")",
"checkpoint",
":",
"Checkpoint",
"=",
"toolkit",
".",
"load_checkpoint",
"(",
"context",
",",
"checkpoint",
",",
"usage_event",
",",
")",
"try",
":",
"results",
"=",
"checkpoint",
".",
"run",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"<red>{e}</red>\"",
")",
"if",
"not",
"results",
"[",
"\"success\"",
"]",
":",
"cli_message",
"(",
"\"Validation failed!\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event",
",",
"success",
"=",
"True",
")",
"print_validation_operator_results_details",
"(",
"results",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"cli_message",
"(",
"\"Validation succeeded!\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event",
",",
"success",
"=",
"True",
")",
"print_validation_operator_results_details",
"(",
"results",
")",
"sys",
".",
"exit",
"(",
"0",
")"
] | [
200,
0
] | [
229,
15
] | python | en | ['it', 'gd', 'en'] | False |
checkpoint_script | (checkpoint, directory) |
Create a python script to run a checkpoint. (Experimental)
Checkpoints can be run directly without this script using the
`great_expectations checkpoint run` command.
This script is provided for those who wish to run checkpoints via python.
|
Create a python script to run a checkpoint. (Experimental) | def checkpoint_script(checkpoint, directory):
"""
Create a python script to run a checkpoint. (Experimental)
Checkpoints can be run directly without this script using the
`great_expectations checkpoint run` command.
This script is provided for those who wish to run checkpoints via python.
"""
context = toolkit.load_data_context_with_error_handling(directory)
usage_event = "cli.checkpoint.script"
# Attempt to load the checkpoint and deal with errors
_ = toolkit.load_checkpoint(context, checkpoint, usage_event)
script_name = f"run_{checkpoint}.py"
script_path = os.path.join(
context.root_directory, context.GE_UNCOMMITTED_DIR, script_name
)
if os.path.isfile(script_path):
toolkit.exit_with_failure_message_and_stats(
context,
usage_event,
f"""<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>
- Existing file path: {script_path}""",
)
_write_checkpoint_script_to_disk(context.root_directory, checkpoint, script_path)
cli_message(
f"""<green>A python script was created that runs the checkpoint named: `{checkpoint}`</green>
- The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`
- The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`"""
)
toolkit.send_usage_message(context, event=usage_event, success=True) | [
"def",
"checkpoint_script",
"(",
"checkpoint",
",",
"directory",
")",
":",
"context",
"=",
"toolkit",
".",
"load_data_context_with_error_handling",
"(",
"directory",
")",
"usage_event",
"=",
"\"cli.checkpoint.script\"",
"# Attempt to load the checkpoint and deal with errors",
"_",
"=",
"toolkit",
".",
"load_checkpoint",
"(",
"context",
",",
"checkpoint",
",",
"usage_event",
")",
"script_name",
"=",
"f\"run_{checkpoint}.py\"",
"script_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"context",
".",
"root_directory",
",",
"context",
".",
"GE_UNCOMMITTED_DIR",
",",
"script_name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"script_path",
")",
":",
"toolkit",
".",
"exit_with_failure_message_and_stats",
"(",
"context",
",",
"usage_event",
",",
"f\"\"\"<red>Warning! A script named {script_name} already exists and this command will not overwrite it.</red>\n - Existing file path: {script_path}\"\"\"",
",",
")",
"_write_checkpoint_script_to_disk",
"(",
"context",
".",
"root_directory",
",",
"checkpoint",
",",
"script_path",
")",
"cli_message",
"(",
"f\"\"\"<green>A python script was created that runs the checkpoint named: `{checkpoint}`</green>\n - The script is located in `great_expectations/uncommitted/run_{checkpoint}.py`\n - The script can be run with `python great_expectations/uncommitted/run_{checkpoint}.py`\"\"\"",
")",
"toolkit",
".",
"send_usage_message",
"(",
"context",
",",
"event",
"=",
"usage_event",
",",
"success",
"=",
"True",
")"
] | [
270,
0
] | [
304,
72
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.get_dataset_by_extent | (self,
product,
product_type=None,
platform=None,
time=None,
longitude=None,
latitude=None,
measurements=None,
output_crs=None,
resolution=None,
dask_chunks=None,
**kwargs) |
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
crs (string): CRS lat/lon bounds are specified in, defaults to WGS84.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
dask_chunks (dict): Lazy loaded array block sizes, not lazy loaded by default.
Returns:
data (xarray): dataset with the desired data.
|
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.) | def get_dataset_by_extent(self,
product,
product_type=None,
platform=None,
time=None,
longitude=None,
latitude=None,
measurements=None,
output_crs=None,
resolution=None,
dask_chunks=None,
**kwargs):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
product (string): The name of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platform (string): The platform associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
crs (string): CRS lat/lon bounds are specified in, defaults to WGS84.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
dask_chunks (dict): Lazy loaded array block sizes, not lazy loaded by default.
Returns:
data (xarray): dataset with the desired data.
"""
# there is probably a better way to do this but I'm not aware of it.
query = {}
if product_type is not None:
query['product_type'] = product_type
if platform is not None:
query['platform'] = platform
if time is not None:
query['time'] = time
if longitude is not None and latitude is not None:
query['longitude'] = longitude
query['latitude'] = latitude
data = self.dc.load(
product=product,
measurements=measurements,
output_crs=output_crs,
resolution=resolution,
dask_chunks=dask_chunks,
**query)
return data | [
"def",
"get_dataset_by_extent",
"(",
"self",
",",
"product",
",",
"product_type",
"=",
"None",
",",
"platform",
"=",
"None",
",",
"time",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"measurements",
"=",
"None",
",",
"output_crs",
"=",
"None",
",",
"resolution",
"=",
"None",
",",
"dask_chunks",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# there is probably a better way to do this but I'm not aware of it.",
"query",
"=",
"{",
"}",
"if",
"product_type",
"is",
"not",
"None",
":",
"query",
"[",
"'product_type'",
"]",
"=",
"product_type",
"if",
"platform",
"is",
"not",
"None",
":",
"query",
"[",
"'platform'",
"]",
"=",
"platform",
"if",
"time",
"is",
"not",
"None",
":",
"query",
"[",
"'time'",
"]",
"=",
"time",
"if",
"longitude",
"is",
"not",
"None",
"and",
"latitude",
"is",
"not",
"None",
":",
"query",
"[",
"'longitude'",
"]",
"=",
"longitude",
"query",
"[",
"'latitude'",
"]",
"=",
"latitude",
"data",
"=",
"self",
".",
"dc",
".",
"load",
"(",
"product",
"=",
"product",
",",
"measurements",
"=",
"measurements",
",",
"output_crs",
"=",
"output_crs",
",",
"resolution",
"=",
"resolution",
",",
"dask_chunks",
"=",
"dask_chunks",
",",
"*",
"*",
"query",
")",
"return",
"data"
] | [
44,
4
] | [
97,
19
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.get_stacked_datasets_by_extent | (self,
products,
product_type=None,
platforms=None,
time=None,
longitude=None,
latitude=None,
measurements=None,
output_crs=None,
resolution=None,
dask_chunks=None,
**kwargs) |
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
products (array of strings): The names of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platforms (array of strings): The platforms associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data.
|
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.) | def get_stacked_datasets_by_extent(self,
products,
product_type=None,
platforms=None,
time=None,
longitude=None,
latitude=None,
measurements=None,
output_crs=None,
resolution=None,
dask_chunks=None,
**kwargs):
"""
Gets and returns data based on lat/long bounding box inputs.
All params are optional. Leaving one out will just query the dc without it, (eg leaving out
lat/lng but giving product returns dataset containing entire product.)
Args:
products (array of strings): The names of the product associated with the desired dataset.
product_type (string): The type of product associated with the desired dataset.
platforms (array of strings): The platforms associated with the desired dataset.
time (tuple): A tuple consisting of the start time and end time for the dataset.
longitude (tuple): A tuple of floats specifying the min,max longitude bounds.
latitude (tuple): A tuple of floats specifying the min,max latitutde bounds.
measurements (list): A list of strings that represents all measurements.
output_crs (string): Determines reprojection of the data before its returned
resolution (tuple): A tuple of min,max ints to determine the resolution of the data.
Returns:
data (xarray): dataset with the desired data.
"""
data_array = []
for index, product in enumerate(products):
product_data = self.get_dataset_by_extent(
product,
product_type=product_type,
platform=platforms[index] if platforms is not None else None,
time=time,
longitude=longitude,
latitude=latitude,
measurements=measurements,
output_crs=output_crs,
resolution=resolution,
dask_chunks=dask_chunks)
if 'time' in product_data:
product_data['satellite'] = xr.DataArray(
np.full(product_data[list(product_data.data_vars)[0]].values.shape, index, dtype="int16"),
dims=('time', 'latitude', 'longitude'),
coords={
'latitude': product_data.latitude,
'longitude': product_data.longitude,
'time': product_data.time
})
data_array.append(product_data.copy(deep=True))
data = None
if len(data_array) > 0:
combined_data = xr.concat(data_array, 'time')
data = combined_data.reindex({'time': sorted(combined_data.time.values)})
return data | [
"def",
"get_stacked_datasets_by_extent",
"(",
"self",
",",
"products",
",",
"product_type",
"=",
"None",
",",
"platforms",
"=",
"None",
",",
"time",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"measurements",
"=",
"None",
",",
"output_crs",
"=",
"None",
",",
"resolution",
"=",
"None",
",",
"dask_chunks",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"data_array",
"=",
"[",
"]",
"for",
"index",
",",
"product",
"in",
"enumerate",
"(",
"products",
")",
":",
"product_data",
"=",
"self",
".",
"get_dataset_by_extent",
"(",
"product",
",",
"product_type",
"=",
"product_type",
",",
"platform",
"=",
"platforms",
"[",
"index",
"]",
"if",
"platforms",
"is",
"not",
"None",
"else",
"None",
",",
"time",
"=",
"time",
",",
"longitude",
"=",
"longitude",
",",
"latitude",
"=",
"latitude",
",",
"measurements",
"=",
"measurements",
",",
"output_crs",
"=",
"output_crs",
",",
"resolution",
"=",
"resolution",
",",
"dask_chunks",
"=",
"dask_chunks",
")",
"if",
"'time'",
"in",
"product_data",
":",
"product_data",
"[",
"'satellite'",
"]",
"=",
"xr",
".",
"DataArray",
"(",
"np",
".",
"full",
"(",
"product_data",
"[",
"list",
"(",
"product_data",
".",
"data_vars",
")",
"[",
"0",
"]",
"]",
".",
"values",
".",
"shape",
",",
"index",
",",
"dtype",
"=",
"\"int16\"",
")",
",",
"dims",
"=",
"(",
"'time'",
",",
"'latitude'",
",",
"'longitude'",
")",
",",
"coords",
"=",
"{",
"'latitude'",
":",
"product_data",
".",
"latitude",
",",
"'longitude'",
":",
"product_data",
".",
"longitude",
",",
"'time'",
":",
"product_data",
".",
"time",
"}",
")",
"data_array",
".",
"append",
"(",
"product_data",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
")",
"data",
"=",
"None",
"if",
"len",
"(",
"data_array",
")",
">",
"0",
":",
"combined_data",
"=",
"xr",
".",
"concat",
"(",
"data_array",
",",
"'time'",
")",
"data",
"=",
"combined_data",
".",
"reindex",
"(",
"{",
"'time'",
":",
"sorted",
"(",
"combined_data",
".",
"time",
".",
"values",
")",
"}",
")",
"return",
"data"
] | [
99,
4
] | [
161,
19
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.get_query_metadata | (self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs) |
Gets a descriptor based on a request.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
**kwargs (dict): Keyword arguments for `self.get_dataset_by_extent()`.
Returns:
scene_metadata (dict): Dictionary containing a variety of data that can later be
accessed.
|
Gets a descriptor based on a request. | def get_query_metadata(self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs):
"""
Gets a descriptor based on a request.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
**kwargs (dict): Keyword arguments for `self.get_dataset_by_extent()`.
Returns:
scene_metadata (dict): Dictionary containing a variety of data that can later be
accessed.
"""
dataset = self.get_dataset_by_extent(
platform=platform, product=product, longitude=longitude, latitude=latitude, time=time, **kwargs)
if self.is_dataset_empty(dataset) == True:
return {
'lat_extents': (None, None),
'lon_extents': (None, None),
'time_extents': (None, None),
'scene_count': 0,
'pixel_count': 0,
'tile_count': 0,
'storage_units': {}
}
lon_min, lat_min, lon_max, lat_max = dataset.geobox.extent.envelope
return {
'lat_extents': (lat_min, lat_max),
'lon_extents': (lon_min, lon_max),
'time_extents': (dataset.time[0].values.astype('M8[ms]').tolist(),
dataset.time[-1].values.astype('M8[ms]').tolist()),
'tile_count':
dataset.time.size,
'pixel_count':
dataset.geobox.shape[0] * dataset.geobox.shape[1],
} | [
"def",
"get_query_metadata",
"(",
"self",
",",
"product",
",",
"platform",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset",
"=",
"self",
".",
"get_dataset_by_extent",
"(",
"platform",
"=",
"platform",
",",
"product",
"=",
"product",
",",
"longitude",
"=",
"longitude",
",",
"latitude",
"=",
"latitude",
",",
"time",
"=",
"time",
",",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"is_dataset_empty",
"(",
"dataset",
")",
"==",
"True",
":",
"return",
"{",
"'lat_extents'",
":",
"(",
"None",
",",
"None",
")",
",",
"'lon_extents'",
":",
"(",
"None",
",",
"None",
")",
",",
"'time_extents'",
":",
"(",
"None",
",",
"None",
")",
",",
"'scene_count'",
":",
"0",
",",
"'pixel_count'",
":",
"0",
",",
"'tile_count'",
":",
"0",
",",
"'storage_units'",
":",
"{",
"}",
"}",
"lon_min",
",",
"lat_min",
",",
"lon_max",
",",
"lat_max",
"=",
"dataset",
".",
"geobox",
".",
"extent",
".",
"envelope",
"return",
"{",
"'lat_extents'",
":",
"(",
"lat_min",
",",
"lat_max",
")",
",",
"'lon_extents'",
":",
"(",
"lon_min",
",",
"lon_max",
")",
",",
"'time_extents'",
":",
"(",
"dataset",
".",
"time",
"[",
"0",
"]",
".",
"values",
".",
"astype",
"(",
"'M8[ms]'",
")",
".",
"tolist",
"(",
")",
",",
"dataset",
".",
"time",
"[",
"-",
"1",
"]",
".",
"values",
".",
"astype",
"(",
"'M8[ms]'",
")",
".",
"tolist",
"(",
")",
")",
",",
"'tile_count'",
":",
"dataset",
".",
"time",
".",
"size",
",",
"'pixel_count'",
":",
"dataset",
".",
"geobox",
".",
"shape",
"[",
"0",
"]",
"*",
"dataset",
".",
"geobox",
".",
"shape",
"[",
"1",
"]",
",",
"}"
] | [
167,
4
] | [
208,
9
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.list_acquisition_dates | (self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs) |
Get a list of all acquisition dates for a query.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
|
Get a list of all acquisition dates for a query. | def list_acquisition_dates(self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs):
"""
Get a list of all acquisition dates for a query.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
"""
dataset = self.get_dataset_by_extent(
product=product, platform=platform, longitude=longitude, latitude=latitude, time=time, dask_chunks={})
if not dataset:
return []
return dataset.time.values.astype('M8[ms]').tolist() | [
"def",
"list_acquisition_dates",
"(",
"self",
",",
"product",
",",
"platform",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset",
"=",
"self",
".",
"get_dataset_by_extent",
"(",
"product",
"=",
"product",
",",
"platform",
"=",
"platform",
",",
"longitude",
"=",
"longitude",
",",
"latitude",
"=",
"latitude",
",",
"time",
"=",
"time",
",",
"dask_chunks",
"=",
"{",
"}",
")",
"if",
"not",
"dataset",
":",
"return",
"[",
"]",
"return",
"dataset",
".",
"time",
".",
"values",
".",
"astype",
"(",
"'M8[ms]'",
")",
".",
"tolist",
"(",
")"
] | [
210,
4
] | [
230,
60
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.list_combined_acquisition_dates | (self,
products,
platforms=None,
longitude=None,
latitude=None,
time=None,
**kwargs) |
Get a list of all acquisition dates for a query.
Args:
platforms (list): Platforms for which data is requested
products (list): The name of the products associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
|
Get a list of all acquisition dates for a query. | def list_combined_acquisition_dates(self,
products,
platforms=None,
longitude=None,
latitude=None,
time=None,
**kwargs):
"""
Get a list of all acquisition dates for a query.
Args:
platforms (list): Platforms for which data is requested
products (list): The name of the products associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
times (list): Python list of dates that can be used to query the dc for single time
sliced data.
"""
dates = []
for index, product in enumerate(products):
dataset = self.get_dataset_by_extent(
product,
platform=platforms[index] if platforms is not None else None,
time=time,
longitude=longitude,
latitude=latitude,
dask_chunks={})
if not dataset:
continue
dates += dataset.time.values.astype('M8[ms]').tolist()
return dates | [
"def",
"list_combined_acquisition_dates",
"(",
"self",
",",
"products",
",",
"platforms",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dates",
"=",
"[",
"]",
"for",
"index",
",",
"product",
"in",
"enumerate",
"(",
"products",
")",
":",
"dataset",
"=",
"self",
".",
"get_dataset_by_extent",
"(",
"product",
",",
"platform",
"=",
"platforms",
"[",
"index",
"]",
"if",
"platforms",
"is",
"not",
"None",
"else",
"None",
",",
"time",
"=",
"time",
",",
"longitude",
"=",
"longitude",
",",
"latitude",
"=",
"latitude",
",",
"dask_chunks",
"=",
"{",
"}",
")",
"if",
"not",
"dataset",
":",
"continue",
"dates",
"+=",
"dataset",
".",
"time",
".",
"values",
".",
"astype",
"(",
"'M8[ms]'",
")",
".",
"tolist",
"(",
")",
"return",
"dates"
] | [
232,
4
] | [
268,
20
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.get_full_dataset_extent | (self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs) |
Get a list of all dimensions for a query.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
dict containing time, latitude, and longitude, each containing the respective xarray dataarray
|
Get a list of all dimensions for a query. | def get_full_dataset_extent(self, product, platform=None, longitude=None, latitude=None, time=None, **kwargs):
"""
Get a list of all dimensions for a query.
Args:
platform (string): Platform for which data is requested
product (string): The name of the product associated with the desired dataset.
longitude (tuple): Tuple of min,max floats for longitude
latitude (tuple): Tuple of min,max floats for latitutde
time (tuple): Tuple of start and end datetimes for requested data
Returns:
dict containing time, latitude, and longitude, each containing the respective xarray dataarray
"""
dataset = self.get_dataset_by_extent(
product=product, platform=platform, longitude=longitude, latitude=latitude, time=time, dask_chunks={})
if not dataset:
return []
return {'time': dataset.time, 'latitude': dataset.latitude, 'longitude': dataset.longitude} | [
"def",
"get_full_dataset_extent",
"(",
"self",
",",
"product",
",",
"platform",
"=",
"None",
",",
"longitude",
"=",
"None",
",",
"latitude",
"=",
"None",
",",
"time",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"dataset",
"=",
"self",
".",
"get_dataset_by_extent",
"(",
"product",
"=",
"product",
",",
"platform",
"=",
"platform",
",",
"longitude",
"=",
"longitude",
",",
"latitude",
"=",
"latitude",
",",
"time",
"=",
"time",
",",
"dask_chunks",
"=",
"{",
"}",
")",
"if",
"not",
"dataset",
":",
"return",
"[",
"]",
"return",
"{",
"'time'",
":",
"dataset",
".",
"time",
",",
"'latitude'",
":",
"dataset",
".",
"latitude",
",",
"'longitude'",
":",
"dataset",
".",
"longitude",
"}"
] | [
270,
4
] | [
289,
99
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.get_datacube_metadata | (self, product, platform=None) |
Gets some details on the cube and its contents.
Args:
platform (string): Desired platform for requested data.
product (string): Desired product for requested data.
Returns:
datacube_metadata (dict): a dict with multiple keys containing relevant metadata.
|
Gets some details on the cube and its contents. | def get_datacube_metadata(self, product, platform=None):
"""
Gets some details on the cube and its contents.
Args:
platform (string): Desired platform for requested data.
product (string): Desired product for requested data.
Returns:
datacube_metadata (dict): a dict with multiple keys containing relevant metadata.
"""
return self.get_query_metadata(product, platform=platform) | [
"def",
"get_datacube_metadata",
"(",
"self",
",",
"product",
",",
"platform",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_query_metadata",
"(",
"product",
",",
"platform",
"=",
"platform",
")"
] | [
291,
4
] | [
303,
66
] | python | en | ['en', 'error', 'th'] | False |
DataAccessApi.validate_measurements | (self, product, measurements, **kwargs) | Ensure that your measurements exist for the product before loading.
| Ensure that your measurements exist for the product before loading.
| def validate_measurements(self, product, measurements, **kwargs):
"""Ensure that your measurements exist for the product before loading.
"""
measurement_list = self.dc.list_measurements(with_pandas=False)
measurements_for_product = filter(lambda x: x['product'] == product, measurement_list)
valid_measurements_name_array = map(lambda x: x['name'], measurements_for_product)
return set(measurements).issubset(set(valid_measurements_name_array)) | [
"def",
"validate_measurements",
"(",
"self",
",",
"product",
",",
"measurements",
",",
"*",
"*",
"kwargs",
")",
":",
"measurement_list",
"=",
"self",
".",
"dc",
".",
"list_measurements",
"(",
"with_pandas",
"=",
"False",
")",
"measurements_for_product",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"'product'",
"]",
"==",
"product",
",",
"measurement_list",
")",
"valid_measurements_name_array",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
"[",
"'name'",
"]",
",",
"measurements_for_product",
")",
"return",
"set",
"(",
"measurements",
")",
".",
"issubset",
"(",
"set",
"(",
"valid_measurements_name_array",
")",
")"
] | [
305,
4
] | [
312,
77
] | python | en | ['en', 'en', 'en'] | True |
smd | (feature, treatment) | Calculate the standard mean difference (SMD) of a feature between the
treatment and control groups.
The definition is available at
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3144483/#s11title
Args:
feature (pandas.Series): a column of a feature to calculate SMD for
treatment (pandas.Series): a column that indicate whether a row is in
the treatment group or not
Returns:
(float): The SMD of the feature
| Calculate the standard mean difference (SMD) of a feature between the
treatment and control groups. | def smd(feature, treatment):
"""Calculate the standard mean difference (SMD) of a feature between the
treatment and control groups.
The definition is available at
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3144483/#s11title
Args:
feature (pandas.Series): a column of a feature to calculate SMD for
treatment (pandas.Series): a column that indicate whether a row is in
the treatment group or not
Returns:
(float): The SMD of the feature
"""
t = feature[treatment == 1]
c = feature[treatment == 0]
return (t.mean() - c.mean()) / np.sqrt(.5 * (t.var() + c.var())) | [
"def",
"smd",
"(",
"feature",
",",
"treatment",
")",
":",
"t",
"=",
"feature",
"[",
"treatment",
"==",
"1",
"]",
"c",
"=",
"feature",
"[",
"treatment",
"==",
"0",
"]",
"return",
"(",
"t",
".",
"mean",
"(",
")",
"-",
"c",
".",
"mean",
"(",
")",
")",
"/",
"np",
".",
"sqrt",
"(",
".5",
"*",
"(",
"t",
".",
"var",
"(",
")",
"+",
"c",
".",
"var",
"(",
")",
")",
")"
] | [
13,
0
] | [
30,
68
] | python | en | ['en', 'en', 'en'] | True |
create_table_one | (data, treatment_col, features) | Report balance in input features between the treatment and control groups.
References:
R's tableone at CRAN: https://github.com/kaz-yos/tableone
Python's tableone at PyPi: https://github.com/tompollard/tableone
Args:
data (pandas.DataFrame): total or matched sample data
treatment_col (str): the column name for the treatment
features (list of str): the column names of features
Returns:
(pandas.DataFrame): A table with the means and standard deviations in
the treatment and control groups, and the SMD between two groups
for the features.
| Report balance in input features between the treatment and control groups. | def create_table_one(data, treatment_col, features):
"""Report balance in input features between the treatment and control groups.
References:
R's tableone at CRAN: https://github.com/kaz-yos/tableone
Python's tableone at PyPi: https://github.com/tompollard/tableone
Args:
data (pandas.DataFrame): total or matched sample data
treatment_col (str): the column name for the treatment
features (list of str): the column names of features
Returns:
(pandas.DataFrame): A table with the means and standard deviations in
the treatment and control groups, and the SMD between two groups
for the features.
"""
t1 = pd.pivot_table(data[features + [treatment_col]],
columns=treatment_col,
aggfunc=[lambda x: '{:.2f} ({:.2f})'.format(x.mean(),
x.std())])
t1.columns = t1.columns.droplevel(level=0)
t1['SMD'] = data[features].apply(
lambda x: smd(x, data[treatment_col])
).round(4)
n_row = pd.pivot_table(data[[features[0], treatment_col]],
columns=treatment_col,
aggfunc=['count'])
n_row.columns = n_row.columns.droplevel(level=0)
n_row['SMD'] = ''
n_row.index = ['n']
t1 = pd.concat([n_row, t1], axis=0)
t1.columns.name = ''
t1.columns = ['Control', 'Treatment', 'SMD']
t1.index.name = 'Variable'
return t1 | [
"def",
"create_table_one",
"(",
"data",
",",
"treatment_col",
",",
"features",
")",
":",
"t1",
"=",
"pd",
".",
"pivot_table",
"(",
"data",
"[",
"features",
"+",
"[",
"treatment_col",
"]",
"]",
",",
"columns",
"=",
"treatment_col",
",",
"aggfunc",
"=",
"[",
"lambda",
"x",
":",
"'{:.2f} ({:.2f})'",
".",
"format",
"(",
"x",
".",
"mean",
"(",
")",
",",
"x",
".",
"std",
"(",
")",
")",
"]",
")",
"t1",
".",
"columns",
"=",
"t1",
".",
"columns",
".",
"droplevel",
"(",
"level",
"=",
"0",
")",
"t1",
"[",
"'SMD'",
"]",
"=",
"data",
"[",
"features",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"smd",
"(",
"x",
",",
"data",
"[",
"treatment_col",
"]",
")",
")",
".",
"round",
"(",
"4",
")",
"n_row",
"=",
"pd",
".",
"pivot_table",
"(",
"data",
"[",
"[",
"features",
"[",
"0",
"]",
",",
"treatment_col",
"]",
"]",
",",
"columns",
"=",
"treatment_col",
",",
"aggfunc",
"=",
"[",
"'count'",
"]",
")",
"n_row",
".",
"columns",
"=",
"n_row",
".",
"columns",
".",
"droplevel",
"(",
"level",
"=",
"0",
")",
"n_row",
"[",
"'SMD'",
"]",
"=",
"''",
"n_row",
".",
"index",
"=",
"[",
"'n'",
"]",
"t1",
"=",
"pd",
".",
"concat",
"(",
"[",
"n_row",
",",
"t1",
"]",
",",
"axis",
"=",
"0",
")",
"t1",
".",
"columns",
".",
"name",
"=",
"''",
"t1",
".",
"columns",
"=",
"[",
"'Control'",
",",
"'Treatment'",
",",
"'SMD'",
"]",
"t1",
".",
"index",
".",
"name",
"=",
"'Variable'",
"return",
"t1"
] | [
33,
0
] | [
71,
13
] | python | en | ['en', 'en', 'en'] | True |
NearestNeighborMatch.__init__ | (self, caliper=.2, replace=False, ratio=1, shuffle=True,
random_state=None, n_jobs=-1) | Initialize a propensity score matching model.
Args:
caliper (float): threshold to be considered as a match.
replace (bool): whether to match with replacement or not
shuffle (bool): whether to shuffle the treatment group data before
matching or not
random_state (numpy.random.RandomState or int): RandomState or an
int seed
n_jobs (int): The number of parallel jobs to run for neighbors search.
None means 1 unless in a joblib.parallel_backend context. -1 means using all processors
| Initialize a propensity score matching model. | def __init__(self, caliper=.2, replace=False, ratio=1, shuffle=True,
random_state=None, n_jobs=-1):
"""Initialize a propensity score matching model.
Args:
caliper (float): threshold to be considered as a match.
replace (bool): whether to match with replacement or not
shuffle (bool): whether to shuffle the treatment group data before
matching or not
random_state (numpy.random.RandomState or int): RandomState or an
int seed
n_jobs (int): The number of parallel jobs to run for neighbors search.
None means 1 unless in a joblib.parallel_backend context. -1 means using all processors
"""
self.caliper = caliper
self.replace = replace
self.ratio = ratio
self.shuffle = shuffle
self.random_state = check_random_state(random_state)
self.n_jobs = n_jobs | [
"def",
"__init__",
"(",
"self",
",",
"caliper",
"=",
".2",
",",
"replace",
"=",
"False",
",",
"ratio",
"=",
"1",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"None",
",",
"n_jobs",
"=",
"-",
"1",
")",
":",
"self",
".",
"caliper",
"=",
"caliper",
"self",
".",
"replace",
"=",
"replace",
"self",
".",
"ratio",
"=",
"ratio",
"self",
".",
"shuffle",
"=",
"shuffle",
"self",
".",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"self",
".",
"n_jobs",
"=",
"n_jobs"
] | [
91,
4
] | [
110,
28
] | python | en | ['en', 'en', 'en'] | True |
NearestNeighborMatch.match | (self, data, treatment_col, score_cols) | Find matches from the control group by matching on specified columns
(propensity preferred).
Args:
data (pandas.DataFrame): total input data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
| Find matches from the control group by matching on specified columns
(propensity preferred). | def match(self, data, treatment_col, score_cols):
"""Find matches from the control group by matching on specified columns
(propensity preferred).
Args:
data (pandas.DataFrame): total input data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
"""
assert type(score_cols) is list, 'score_cols must be a list'
treatment = data.loc[data[treatment_col] == 1, score_cols]
control = data.loc[data[treatment_col] == 0, score_cols]
sdcal = self.caliper * np.std(data[score_cols].values)
if self.replace:
scaler = StandardScaler()
scaler.fit(data[score_cols])
treatment_scaled = pd.DataFrame(scaler.transform(treatment),
index=treatment.index)
control_scaled = pd.DataFrame(scaler.transform(control),
index=control.index)
# SD is the same as caliper because we use a StandardScaler above
sdcal = self.caliper
matching_model = NearestNeighbors(n_neighbors=self.ratio, n_jobs=self.n_jobs)
matching_model.fit(control_scaled)
distances, indices = matching_model.kneighbors(treatment_scaled)
# distances and indices are (n_obs, self.ratio) matrices.
# To index easily, reshape distances, indices and treatment into
# the (n_obs * self.ratio, 1) matrices and data frame.
distances = distances.T.flatten()
indices = indices.T.flatten()
treatment_scaled = pd.concat([treatment_scaled] * self.ratio,
axis=0)
cond = (distances / np.sqrt(len(score_cols)) ) < sdcal
# Deduplicate the indices of the treatment group
t_idx_matched = np.unique(treatment_scaled.loc[cond].index)
# XXX: Should we deduplicate the indices of the control group too?
c_idx_matched = np.array(control_scaled.iloc[indices[cond]].index)
else:
assert len(score_cols) == 1, (
'Matching on multiple columns is only supported using the '
'replacement method (if matching on multiple columns, set '
'replace=True).'
)
# unpack score_cols for the single-variable matching case
score_col = score_cols[0]
if self.shuffle:
t_indices = self.random_state.permutation(treatment.index)
else:
t_indices = treatment.index
t_idx_matched = []
c_idx_matched = []
control['unmatched'] = True
for t_idx in t_indices:
dist = np.abs(control.loc[control.unmatched, score_col]
- treatment.loc[t_idx, score_col])
c_idx_min = dist.idxmin()
if dist[c_idx_min] <= sdcal:
t_idx_matched.append(t_idx)
c_idx_matched.append(c_idx_min)
control.loc[c_idx_min, 'unmatched'] = False
return data.loc[np.concatenate([np.array(t_idx_matched),
np.array(c_idx_matched)])] | [
"def",
"match",
"(",
"self",
",",
"data",
",",
"treatment_col",
",",
"score_cols",
")",
":",
"assert",
"type",
"(",
"score_cols",
")",
"is",
"list",
",",
"'score_cols must be a list'",
"treatment",
"=",
"data",
".",
"loc",
"[",
"data",
"[",
"treatment_col",
"]",
"==",
"1",
",",
"score_cols",
"]",
"control",
"=",
"data",
".",
"loc",
"[",
"data",
"[",
"treatment_col",
"]",
"==",
"0",
",",
"score_cols",
"]",
"sdcal",
"=",
"self",
".",
"caliper",
"*",
"np",
".",
"std",
"(",
"data",
"[",
"score_cols",
"]",
".",
"values",
")",
"if",
"self",
".",
"replace",
":",
"scaler",
"=",
"StandardScaler",
"(",
")",
"scaler",
".",
"fit",
"(",
"data",
"[",
"score_cols",
"]",
")",
"treatment_scaled",
"=",
"pd",
".",
"DataFrame",
"(",
"scaler",
".",
"transform",
"(",
"treatment",
")",
",",
"index",
"=",
"treatment",
".",
"index",
")",
"control_scaled",
"=",
"pd",
".",
"DataFrame",
"(",
"scaler",
".",
"transform",
"(",
"control",
")",
",",
"index",
"=",
"control",
".",
"index",
")",
"# SD is the same as caliper because we use a StandardScaler above",
"sdcal",
"=",
"self",
".",
"caliper",
"matching_model",
"=",
"NearestNeighbors",
"(",
"n_neighbors",
"=",
"self",
".",
"ratio",
",",
"n_jobs",
"=",
"self",
".",
"n_jobs",
")",
"matching_model",
".",
"fit",
"(",
"control_scaled",
")",
"distances",
",",
"indices",
"=",
"matching_model",
".",
"kneighbors",
"(",
"treatment_scaled",
")",
"# distances and indices are (n_obs, self.ratio) matrices.",
"# To index easily, reshape distances, indices and treatment into",
"# the (n_obs * self.ratio, 1) matrices and data frame.",
"distances",
"=",
"distances",
".",
"T",
".",
"flatten",
"(",
")",
"indices",
"=",
"indices",
".",
"T",
".",
"flatten",
"(",
")",
"treatment_scaled",
"=",
"pd",
".",
"concat",
"(",
"[",
"treatment_scaled",
"]",
"*",
"self",
".",
"ratio",
",",
"axis",
"=",
"0",
")",
"cond",
"=",
"(",
"distances",
"/",
"np",
".",
"sqrt",
"(",
"len",
"(",
"score_cols",
")",
")",
")",
"<",
"sdcal",
"# Deduplicate the indices of the treatment group",
"t_idx_matched",
"=",
"np",
".",
"unique",
"(",
"treatment_scaled",
".",
"loc",
"[",
"cond",
"]",
".",
"index",
")",
"# XXX: Should we deduplicate the indices of the control group too?",
"c_idx_matched",
"=",
"np",
".",
"array",
"(",
"control_scaled",
".",
"iloc",
"[",
"indices",
"[",
"cond",
"]",
"]",
".",
"index",
")",
"else",
":",
"assert",
"len",
"(",
"score_cols",
")",
"==",
"1",
",",
"(",
"'Matching on multiple columns is only supported using the '",
"'replacement method (if matching on multiple columns, set '",
"'replace=True).'",
")",
"# unpack score_cols for the single-variable matching case",
"score_col",
"=",
"score_cols",
"[",
"0",
"]",
"if",
"self",
".",
"shuffle",
":",
"t_indices",
"=",
"self",
".",
"random_state",
".",
"permutation",
"(",
"treatment",
".",
"index",
")",
"else",
":",
"t_indices",
"=",
"treatment",
".",
"index",
"t_idx_matched",
"=",
"[",
"]",
"c_idx_matched",
"=",
"[",
"]",
"control",
"[",
"'unmatched'",
"]",
"=",
"True",
"for",
"t_idx",
"in",
"t_indices",
":",
"dist",
"=",
"np",
".",
"abs",
"(",
"control",
".",
"loc",
"[",
"control",
".",
"unmatched",
",",
"score_col",
"]",
"-",
"treatment",
".",
"loc",
"[",
"t_idx",
",",
"score_col",
"]",
")",
"c_idx_min",
"=",
"dist",
".",
"idxmin",
"(",
")",
"if",
"dist",
"[",
"c_idx_min",
"]",
"<=",
"sdcal",
":",
"t_idx_matched",
".",
"append",
"(",
"t_idx",
")",
"c_idx_matched",
".",
"append",
"(",
"c_idx_min",
")",
"control",
".",
"loc",
"[",
"c_idx_min",
",",
"'unmatched'",
"]",
"=",
"False",
"return",
"data",
".",
"loc",
"[",
"np",
".",
"concatenate",
"(",
"[",
"np",
".",
"array",
"(",
"t_idx_matched",
")",
",",
"np",
".",
"array",
"(",
"c_idx_matched",
")",
"]",
")",
"]"
] | [
112,
4
] | [
188,
66
] | python | en | ['en', 'en', 'en'] | True |
NearestNeighborMatch.match_by_group | (self, data, treatment_col, score_cols, groupby_col) | Find matches from the control group stratified by groupby_col, by
matching on specified columns (propensity preferred).
Args:
data (pandas.DataFrame): total sample data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
groupby_col (str): the column name to be used for stratification
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
| Find matches from the control group stratified by groupby_col, by
matching on specified columns (propensity preferred). | def match_by_group(self, data, treatment_col, score_cols, groupby_col):
"""Find matches from the control group stratified by groupby_col, by
matching on specified columns (propensity preferred).
Args:
data (pandas.DataFrame): total sample data
treatment_col (str): the column name for the treatment
score_cols (list): list of column names for matching (propensity
column should be included)
groupby_col (str): the column name to be used for stratification
Returns:
(pandas.DataFrame): The subset of data consisting of matched
treatment and control group data.
"""
matched = data.groupby(groupby_col).apply(
lambda x: self.match(data=x, treatment_col=treatment_col,
score_cols=score_cols)
)
return matched.reset_index(level=0, drop=True) | [
"def",
"match_by_group",
"(",
"self",
",",
"data",
",",
"treatment_col",
",",
"score_cols",
",",
"groupby_col",
")",
":",
"matched",
"=",
"data",
".",
"groupby",
"(",
"groupby_col",
")",
".",
"apply",
"(",
"lambda",
"x",
":",
"self",
".",
"match",
"(",
"data",
"=",
"x",
",",
"treatment_col",
"=",
"treatment_col",
",",
"score_cols",
"=",
"score_cols",
")",
")",
"return",
"matched",
".",
"reset_index",
"(",
"level",
"=",
"0",
",",
"drop",
"=",
"True",
")"
] | [
190,
4
] | [
209,
54
] | python | en | ['en', 'en', 'en'] | True |
MatchOptimizer.__init__ | (self, treatment_col='is_treatment', ps_col='pihat',
user_col=None, matching_covariates=['pihat'], max_smd=0.1,
max_deviation=0.1, caliper_range=(0.01, 0.5),
max_pihat_range=(0.95, 0.999), max_iter_per_param=5,
min_users_per_group=1000, smd_cols=['pihat'],
dev_cols_transformations={'pihat': np.mean},
dev_factor=1., verbose=True) | Finds the set of parameters that gives the best matching result.
Score = (number of features with SMD > max_smd)
+ (sum of deviations for important variables
* deviation factor)
The logic behind the scoring is that we are most concerned with
minimizing the number of features where SMD is lower than a certain
threshold (max_smd). However, we would also like the matched dataset
not deviate too much from the original dataset, in terms of key
variable(s), so that we still retain a similar userbase.
Args:
- treatment_col (str): name of the treatment column
- ps_col (str): name of the propensity score column
- max_smd (float): maximum acceptable SMD
- max_deviation (float): maximum acceptable deviation for
important variables
- caliper_range (tuple): low and high bounds for caliper search
range
- max_pihat_range (tuple): low and high bounds for max pihat
search range
- max_iter_per_param (int): maximum number of search values per
parameters
- min_users_per_group (int): minimum number of users per group in
matched set
- smd_cols (list): score is more sensitive to these features
exceeding max_smd
- dev_factor (float): importance weight factor for dev_cols
(e.g. dev_factor=1 means a 10% deviation leads to penalty of 1
in score)
- dev_cols_transformations (dict): dict of transformations to be
made on dev_cols
- verbose (bool): boolean flag for printing statements
Returns:
The best matched dataset (pd.DataFrame)
| Finds the set of parameters that gives the best matching result. | def __init__(self, treatment_col='is_treatment', ps_col='pihat',
user_col=None, matching_covariates=['pihat'], max_smd=0.1,
max_deviation=0.1, caliper_range=(0.01, 0.5),
max_pihat_range=(0.95, 0.999), max_iter_per_param=5,
min_users_per_group=1000, smd_cols=['pihat'],
dev_cols_transformations={'pihat': np.mean},
dev_factor=1., verbose=True):
"""Finds the set of parameters that gives the best matching result.
Score = (number of features with SMD > max_smd)
+ (sum of deviations for important variables
* deviation factor)
The logic behind the scoring is that we are most concerned with
minimizing the number of features where SMD is lower than a certain
threshold (max_smd). However, we would also like the matched dataset
not deviate too much from the original dataset, in terms of key
variable(s), so that we still retain a similar userbase.
Args:
- treatment_col (str): name of the treatment column
- ps_col (str): name of the propensity score column
- max_smd (float): maximum acceptable SMD
- max_deviation (float): maximum acceptable deviation for
important variables
- caliper_range (tuple): low and high bounds for caliper search
range
- max_pihat_range (tuple): low and high bounds for max pihat
search range
- max_iter_per_param (int): maximum number of search values per
parameters
- min_users_per_group (int): minimum number of users per group in
matched set
- smd_cols (list): score is more sensitive to these features
exceeding max_smd
- dev_factor (float): importance weight factor for dev_cols
(e.g. dev_factor=1 means a 10% deviation leads to penalty of 1
in score)
- dev_cols_transformations (dict): dict of transformations to be
made on dev_cols
- verbose (bool): boolean flag for printing statements
Returns:
The best matched dataset (pd.DataFrame)
"""
self.treatment_col = treatment_col
self.ps_col = ps_col
self.user_col = user_col
self.matching_covariates = matching_covariates
self.max_smd = max_smd
self.max_deviation = max_deviation
self.caliper_range = np.linspace(*caliper_range,
num=max_iter_per_param)
self.max_pihat_range = np.linspace(*max_pihat_range,
num=max_iter_per_param)
self.max_iter_per_param = max_iter_per_param
self.min_users_per_group = min_users_per_group
self.smd_cols = smd_cols
self.dev_factor = dev_factor
self.dev_cols_transformations = dev_cols_transformations
self.best_params = {}
self.best_score = 1e7 # ideal score is 0
self.verbose = verbose
self.pass_all = False | [
"def",
"__init__",
"(",
"self",
",",
"treatment_col",
"=",
"'is_treatment'",
",",
"ps_col",
"=",
"'pihat'",
",",
"user_col",
"=",
"None",
",",
"matching_covariates",
"=",
"[",
"'pihat'",
"]",
",",
"max_smd",
"=",
"0.1",
",",
"max_deviation",
"=",
"0.1",
",",
"caliper_range",
"=",
"(",
"0.01",
",",
"0.5",
")",
",",
"max_pihat_range",
"=",
"(",
"0.95",
",",
"0.999",
")",
",",
"max_iter_per_param",
"=",
"5",
",",
"min_users_per_group",
"=",
"1000",
",",
"smd_cols",
"=",
"[",
"'pihat'",
"]",
",",
"dev_cols_transformations",
"=",
"{",
"'pihat'",
":",
"np",
".",
"mean",
"}",
",",
"dev_factor",
"=",
"1.",
",",
"verbose",
"=",
"True",
")",
":",
"self",
".",
"treatment_col",
"=",
"treatment_col",
"self",
".",
"ps_col",
"=",
"ps_col",
"self",
".",
"user_col",
"=",
"user_col",
"self",
".",
"matching_covariates",
"=",
"matching_covariates",
"self",
".",
"max_smd",
"=",
"max_smd",
"self",
".",
"max_deviation",
"=",
"max_deviation",
"self",
".",
"caliper_range",
"=",
"np",
".",
"linspace",
"(",
"*",
"caliper_range",
",",
"num",
"=",
"max_iter_per_param",
")",
"self",
".",
"max_pihat_range",
"=",
"np",
".",
"linspace",
"(",
"*",
"max_pihat_range",
",",
"num",
"=",
"max_iter_per_param",
")",
"self",
".",
"max_iter_per_param",
"=",
"max_iter_per_param",
"self",
".",
"min_users_per_group",
"=",
"min_users_per_group",
"self",
".",
"smd_cols",
"=",
"smd_cols",
"self",
".",
"dev_factor",
"=",
"dev_factor",
"self",
".",
"dev_cols_transformations",
"=",
"dev_cols_transformations",
"self",
".",
"best_params",
"=",
"{",
"}",
"self",
".",
"best_score",
"=",
"1e7",
"# ideal score is 0",
"self",
".",
"verbose",
"=",
"verbose",
"self",
".",
"pass_all",
"=",
"False"
] | [
213,
4
] | [
276,
29
] | python | en | ['en', 'en', 'en'] | True |
generate_messages_with_defaults | (
defaults: Dict[str, Any], message_stubs: List[Dict[str, Any]]
) |
Create a list of messages by overriding defaults with message_stubs
Args:
defaults: Dict of default message items
message_stubs: Unique parts of message
Returns:
List of messages same len(message_stubs) combining defaults overridden by message stubs
|
Create a list of messages by overriding defaults with message_stubs
Args:
defaults: Dict of default message items
message_stubs: Unique parts of message | def generate_messages_with_defaults(
defaults: Dict[str, Any], message_stubs: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
"""
Create a list of messages by overriding defaults with message_stubs
Args:
defaults: Dict of default message items
message_stubs: Unique parts of message
Returns:
List of messages same len(message_stubs) combining defaults overridden by message stubs
"""
output_list = []
for message_stub in message_stubs:
defaults_copy = copy.deepcopy(defaults)
defaults_copy.update(message_stub)
output_list.append(defaults_copy)
return output_list | [
"def",
"generate_messages_with_defaults",
"(",
"defaults",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
",",
"message_stubs",
":",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
")",
"->",
"List",
"[",
"Dict",
"[",
"str",
",",
"Any",
"]",
"]",
":",
"output_list",
"=",
"[",
"]",
"for",
"message_stub",
"in",
"message_stubs",
":",
"defaults_copy",
"=",
"copy",
".",
"deepcopy",
"(",
"defaults",
")",
"defaults_copy",
".",
"update",
"(",
"message_stub",
")",
"output_list",
".",
"append",
"(",
"defaults_copy",
")",
"return",
"output_list"
] | [
14,
0
] | [
32,
22
] | python | en | ['en', 'error', 'th'] | False |
test_usage_statistics_message | (message) | known message formats should be valid | known message formats should be valid | def test_usage_statistics_message(message):
"""known message formats should be valid"""
res = requests.post(USAGE_STATISTICS_QA_URL, json=message, timeout=2)
assert res.status_code == 201
assert res.json() == {"event_count": 1} | [
"def",
"test_usage_statistics_message",
"(",
"message",
")",
":",
"res",
"=",
"requests",
".",
"post",
"(",
"USAGE_STATISTICS_QA_URL",
",",
"json",
"=",
"message",
",",
"timeout",
"=",
"2",
")",
"assert",
"res",
".",
"status_code",
"==",
"201",
"assert",
"res",
".",
"json",
"(",
")",
"==",
"{",
"\"event_count\"",
":",
"1",
"}"
] | [
1595,
0
] | [
1599,
43
] | python | en | ['en', 'en', 'en'] | True |
r | (xyz, src_loc) |
Distance from source to points on an xyz grid
|
Distance from source to points on an xyz grid
| def r(xyz, src_loc):
"""
Distance from source to points on an xyz grid
"""
return (
np.sqrt(
(xyz[:, 0] - src_loc[0]) ** 2
+ (xyz[:, 1] - src_loc[1]) ** 2
+ (xyz[:, 2] - src_loc[2]) ** 2
)
+ eps
) | [
"def",
"r",
"(",
"xyz",
",",
"src_loc",
")",
":",
"return",
"(",
"np",
".",
"sqrt",
"(",
"(",
"xyz",
"[",
":",
",",
"0",
"]",
"-",
"src_loc",
"[",
"0",
"]",
")",
"**",
"2",
"+",
"(",
"xyz",
"[",
":",
",",
"1",
"]",
"-",
"src_loc",
"[",
"1",
"]",
")",
"**",
"2",
"+",
"(",
"xyz",
"[",
":",
",",
"2",
"]",
"-",
"src_loc",
"[",
"2",
"]",
")",
"**",
"2",
")",
"+",
"eps",
")"
] | [
34,
0
] | [
45,
5
] | python | en | ['en', 'error', 'th'] | False |
layer_potentials | (rho1, rho2, h, A, B, xyz) |
Compute analytic solution of surface potential for 2-layered Earth
(Ref Telford 1990, section 8.3.4)s
|
Compute analytic solution of surface potential for 2-layered Earth
(Ref Telford 1990, section 8.3.4)s
| def layer_potentials(rho1, rho2, h, A, B, xyz):
"""
Compute analytic solution of surface potential for 2-layered Earth
(Ref Telford 1990, section 8.3.4)s
"""
def V(I, src_loc):
return (I * rho1 / (2.0 * np.pi * r(xyz, src_loc))) * (
1 + 2 * sum_term(rho1, rho2, h, r(xyz, src_loc))
)
pot = V(1.0, A)
if B is not None:
pot += V(-1.0, B)
return pot | [
"def",
"layer_potentials",
"(",
"rho1",
",",
"rho2",
",",
"h",
",",
"A",
",",
"B",
",",
"xyz",
")",
":",
"def",
"V",
"(",
"I",
",",
"src_loc",
")",
":",
"return",
"(",
"I",
"*",
"rho1",
"/",
"(",
"2.0",
"*",
"np",
".",
"pi",
"*",
"r",
"(",
"xyz",
",",
"src_loc",
")",
")",
")",
"*",
"(",
"1",
"+",
"2",
"*",
"sum_term",
"(",
"rho1",
",",
"rho2",
",",
"h",
",",
"r",
"(",
"xyz",
",",
"src_loc",
")",
")",
")",
"pot",
"=",
"V",
"(",
"1.0",
",",
"A",
")",
"if",
"B",
"is",
"not",
"None",
":",
"pot",
"+=",
"V",
"(",
"-",
"1.0",
",",
"B",
")",
"return",
"pot"
] | [
69,
0
] | [
85,
14
] | python | en | ['en', 'error', 'th'] | False |
G | (A, B, M, N) |
Geometric factor
|
Geometric factor
| def G(A, B, M, N):
"""
Geometric factor
"""
bot = 1.0 / (np.abs(A[0] - M) + eps)
if B is not None:
bot -= 1.0 / (np.abs(M - B[0]) + eps)
if N is not None:
bot -= 1.0 / (np.abs(N - A[0]) + eps)
if B is not None:
bot += 1.0 / (np.abs(N - B[0]) + eps)
return 1.0 / bot | [
"def",
"G",
"(",
"A",
",",
"B",
",",
"M",
",",
"N",
")",
":",
"bot",
"=",
"1.0",
"/",
"(",
"np",
".",
"abs",
"(",
"A",
"[",
"0",
"]",
"-",
"M",
")",
"+",
"eps",
")",
"if",
"B",
"is",
"not",
"None",
":",
"bot",
"-=",
"1.0",
"/",
"(",
"np",
".",
"abs",
"(",
"M",
"-",
"B",
"[",
"0",
"]",
")",
"+",
"eps",
")",
"if",
"N",
"is",
"not",
"None",
":",
"bot",
"-=",
"1.0",
"/",
"(",
"np",
".",
"abs",
"(",
"N",
"-",
"A",
"[",
"0",
"]",
")",
"+",
"eps",
")",
"if",
"B",
"is",
"not",
"None",
":",
"bot",
"+=",
"1.0",
"/",
"(",
"np",
".",
"abs",
"(",
"N",
"-",
"B",
"[",
"0",
"]",
")",
"+",
"eps",
")",
"return",
"1.0",
"/",
"bot"
] | [
140,
0
] | [
151,
20
] | python | en | ['en', 'error', 'th'] | False |
rho_a | (VM, VN, A, B, M, N) |
Apparent Resistivity
|
Apparent Resistivity
| def rho_a(VM, VN, A, B, M, N):
"""
Apparent Resistivity
"""
if VN is None:
return VM * 2.0 * np.pi * G(A, B, M, None)
else:
return (VM - VN) * 2.0 * np.pi * G(A, B, M, N) | [
"def",
"rho_a",
"(",
"VM",
",",
"VN",
",",
"A",
",",
"B",
",",
"M",
",",
"N",
")",
":",
"if",
"VN",
"is",
"None",
":",
"return",
"VM",
"*",
"2.0",
"*",
"np",
".",
"pi",
"*",
"G",
"(",
"A",
",",
"B",
",",
"M",
",",
"None",
")",
"else",
":",
"return",
"(",
"VM",
"-",
"VN",
")",
"*",
"2.0",
"*",
"np",
".",
"pi",
"*",
"G",
"(",
"A",
",",
"B",
",",
"M",
",",
"N",
")"
] | [
154,
0
] | [
161,
54
] | python | en | ['en', 'error', 'th'] | False |
solve_2D_potentials | (rho1, rho2, h, A, B) |
Here we solve the 2D DC problem for potentials (using SimPEG Mesh Class)
|
Here we solve the 2D DC problem for potentials (using SimPEG Mesh Class)
| def solve_2D_potentials(rho1, rho2, h, A, B):
"""
Here we solve the 2D DC problem for potentials (using SimPEG Mesh Class)
"""
sigma = 1.0 / rho2 * np.ones(mesh.nC)
sigma[mesh.gridCC[:, 1] >= -h] = 1.0 / rho1 # since the model is 2D
q = np.zeros(mesh.nC)
a = utils.closestPoints(mesh, A[:2])
q[a] = 1.0 / mesh.vol[a]
if B is not None:
b = utils.closestPoints(mesh, B[:2])
q[b] = -1.0 / mesh.vol[b]
# Use a Neumann Boundary Condition (so pole source is reasonable)
fxm, fxp, fym, fyp = mesh.faceBoundaryInd
n_xm = fxm.sum()
n_xp = fxp.sum()
n_ym = fym.sum()
n_yp = fyp.sum()
xBC_xm = np.zeros(n_xm) # 0.5*a_xm
xBC_xp = np.zeros(n_xp) # 0.5*a_xp/b_xp
yBC_xm = np.ones(n_xm) # 0.5*(1.-b_xm)
yBC_xp = np.ones(n_xp) # 0.5*(1.-1./b_xp)
xBC_ym = np.zeros(n_ym) # 0.5*a_ym
xBC_yp = np.zeros(n_yp) # 0.5*a_yp/b_yp
yBC_ym = np.ones(n_ym) # 0.5*(1.-b_ym)
yBC_yp = np.ones(n_yp) # 0.5*(1.-1./b_yp)
sortindsfx = np.argsort(np.r_[np.arange(mesh.nFx)[fxm],
np.arange(mesh.nFx)[fxp]])
sortindsfy = np.argsort(np.r_[np.arange(mesh.nFy)[fym],
np.arange(mesh.nFy)[fyp]])
xBC_x = np.r_[xBC_xm, xBC_xp][sortindsfx]
xBC_y = np.r_[xBC_ym, xBC_yp][sortindsfy]
yBC_x = np.r_[yBC_xm, yBC_xp][sortindsfx]
yBC_y = np.r_[yBC_ym, yBC_yp][sortindsfy]
x_BC = np.r_[xBC_x, xBC_y]
y_BC = np.r_[yBC_x, yBC_y]
V = utils.sdiag(mesh.vol)
Div = V * mesh.faceDiv
P_BC, B = mesh.getBCProjWF_simple()
M = B*mesh.aveCC2F
Grad = Div.T - P_BC*utils.sdiag(y_BC)*M
A = (
Div
* utils.sdiag(1.0 / (mesh.dim * mesh.aveF2CC.T * (1.0 / sigma)))
* Grad
)
A[0, 0] = A[0, 0] + 1. # Because Neumann
Ainv = Pardiso(A)
V = Ainv * q
return V | [
"def",
"solve_2D_potentials",
"(",
"rho1",
",",
"rho2",
",",
"h",
",",
"A",
",",
"B",
")",
":",
"sigma",
"=",
"1.0",
"/",
"rho2",
"*",
"np",
".",
"ones",
"(",
"mesh",
".",
"nC",
")",
"sigma",
"[",
"mesh",
".",
"gridCC",
"[",
":",
",",
"1",
"]",
">=",
"-",
"h",
"]",
"=",
"1.0",
"/",
"rho1",
"# since the model is 2D",
"q",
"=",
"np",
".",
"zeros",
"(",
"mesh",
".",
"nC",
")",
"a",
"=",
"utils",
".",
"closestPoints",
"(",
"mesh",
",",
"A",
"[",
":",
"2",
"]",
")",
"q",
"[",
"a",
"]",
"=",
"1.0",
"/",
"mesh",
".",
"vol",
"[",
"a",
"]",
"if",
"B",
"is",
"not",
"None",
":",
"b",
"=",
"utils",
".",
"closestPoints",
"(",
"mesh",
",",
"B",
"[",
":",
"2",
"]",
")",
"q",
"[",
"b",
"]",
"=",
"-",
"1.0",
"/",
"mesh",
".",
"vol",
"[",
"b",
"]",
"# Use a Neumann Boundary Condition (so pole source is reasonable)",
"fxm",
",",
"fxp",
",",
"fym",
",",
"fyp",
"=",
"mesh",
".",
"faceBoundaryInd",
"n_xm",
"=",
"fxm",
".",
"sum",
"(",
")",
"n_xp",
"=",
"fxp",
".",
"sum",
"(",
")",
"n_ym",
"=",
"fym",
".",
"sum",
"(",
")",
"n_yp",
"=",
"fyp",
".",
"sum",
"(",
")",
"xBC_xm",
"=",
"np",
".",
"zeros",
"(",
"n_xm",
")",
"# 0.5*a_xm",
"xBC_xp",
"=",
"np",
".",
"zeros",
"(",
"n_xp",
")",
"# 0.5*a_xp/b_xp",
"yBC_xm",
"=",
"np",
".",
"ones",
"(",
"n_xm",
")",
"# 0.5*(1.-b_xm)",
"yBC_xp",
"=",
"np",
".",
"ones",
"(",
"n_xp",
")",
"# 0.5*(1.-1./b_xp)",
"xBC_ym",
"=",
"np",
".",
"zeros",
"(",
"n_ym",
")",
"# 0.5*a_ym",
"xBC_yp",
"=",
"np",
".",
"zeros",
"(",
"n_yp",
")",
"# 0.5*a_yp/b_yp",
"yBC_ym",
"=",
"np",
".",
"ones",
"(",
"n_ym",
")",
"# 0.5*(1.-b_ym)",
"yBC_yp",
"=",
"np",
".",
"ones",
"(",
"n_yp",
")",
"# 0.5*(1.-1./b_yp)",
"sortindsfx",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"r_",
"[",
"np",
".",
"arange",
"(",
"mesh",
".",
"nFx",
")",
"[",
"fxm",
"]",
",",
"np",
".",
"arange",
"(",
"mesh",
".",
"nFx",
")",
"[",
"fxp",
"]",
"]",
")",
"sortindsfy",
"=",
"np",
".",
"argsort",
"(",
"np",
".",
"r_",
"[",
"np",
".",
"arange",
"(",
"mesh",
".",
"nFy",
")",
"[",
"fym",
"]",
",",
"np",
".",
"arange",
"(",
"mesh",
".",
"nFy",
")",
"[",
"fyp",
"]",
"]",
")",
"xBC_x",
"=",
"np",
".",
"r_",
"[",
"xBC_xm",
",",
"xBC_xp",
"]",
"[",
"sortindsfx",
"]",
"xBC_y",
"=",
"np",
".",
"r_",
"[",
"xBC_ym",
",",
"xBC_yp",
"]",
"[",
"sortindsfy",
"]",
"yBC_x",
"=",
"np",
".",
"r_",
"[",
"yBC_xm",
",",
"yBC_xp",
"]",
"[",
"sortindsfx",
"]",
"yBC_y",
"=",
"np",
".",
"r_",
"[",
"yBC_ym",
",",
"yBC_yp",
"]",
"[",
"sortindsfy",
"]",
"x_BC",
"=",
"np",
".",
"r_",
"[",
"xBC_x",
",",
"xBC_y",
"]",
"y_BC",
"=",
"np",
".",
"r_",
"[",
"yBC_x",
",",
"yBC_y",
"]",
"V",
"=",
"utils",
".",
"sdiag",
"(",
"mesh",
".",
"vol",
")",
"Div",
"=",
"V",
"*",
"mesh",
".",
"faceDiv",
"P_BC",
",",
"B",
"=",
"mesh",
".",
"getBCProjWF_simple",
"(",
")",
"M",
"=",
"B",
"*",
"mesh",
".",
"aveCC2F",
"Grad",
"=",
"Div",
".",
"T",
"-",
"P_BC",
"*",
"utils",
".",
"sdiag",
"(",
"y_BC",
")",
"*",
"M",
"A",
"=",
"(",
"Div",
"*",
"utils",
".",
"sdiag",
"(",
"1.0",
"/",
"(",
"mesh",
".",
"dim",
"*",
"mesh",
".",
"aveF2CC",
".",
"T",
"*",
"(",
"1.0",
"/",
"sigma",
")",
")",
")",
"*",
"Grad",
")",
"A",
"[",
"0",
",",
"0",
"]",
"=",
"A",
"[",
"0",
",",
"0",
"]",
"+",
"1.",
"# Because Neumann",
"Ainv",
"=",
"Pardiso",
"(",
"A",
")",
"V",
"=",
"Ainv",
"*",
"q",
"return",
"V"
] | [
164,
0
] | [
223,
12
] | python | en | ['en', 'error', 'th'] | False |
solve_2D_E | (rho1, rho2, h, A, B) |
solve the 2D DC resistivity problem for electric fields
|
solve the 2D DC resistivity problem for electric fields
| def solve_2D_E(rho1, rho2, h, A, B):
"""
solve the 2D DC resistivity problem for electric fields
"""
V = solve_2D_potentials(rho1, rho2, h, A, B)
E = -mesh.cellGrad * V
E = mesh.aveF2CCV * E
ex = E[: mesh.nC]
ez = E[mesh.nC :]
return ex, ez, V | [
"def",
"solve_2D_E",
"(",
"rho1",
",",
"rho2",
",",
"h",
",",
"A",
",",
"B",
")",
":",
"V",
"=",
"solve_2D_potentials",
"(",
"rho1",
",",
"rho2",
",",
"h",
",",
"A",
",",
"B",
")",
"E",
"=",
"-",
"mesh",
".",
"cellGrad",
"*",
"V",
"E",
"=",
"mesh",
".",
"aveF2CCV",
"*",
"E",
"ex",
"=",
"E",
"[",
":",
"mesh",
".",
"nC",
"]",
"ez",
"=",
"E",
"[",
"mesh",
".",
"nC",
":",
"]",
"return",
"ex",
",",
"ez",
",",
"V"
] | [
226,
0
] | [
236,
20
] | python | en | ['en', 'error', 'th'] | False |
random_string | (min_length=5, max_length=10) |
Get a random string.
Args:
min_length: Minimal length of string
max_length: Maximal length of string
Returns:
Random string of ascii characters
|
Get a random string. | def random_string(min_length=5, max_length=10) -> str:
"""
Get a random string.
Args:
min_length: Minimal length of string
max_length: Maximal length of string
Returns:
Random string of ascii characters
"""
length = random.randint(min_length, max_length)
return "".join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(length)
) | [
"def",
"random_string",
"(",
"min_length",
"=",
"5",
",",
"max_length",
"=",
"10",
")",
"->",
"str",
":",
"length",
"=",
"random",
".",
"randint",
"(",
"min_length",
",",
"max_length",
")",
"return",
"\"\"",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"length",
")",
")"
] | [
9,
0
] | [
24,
5
] | python | en | ['en', 'error', 'th'] | False |
iterative_tree | (
basedir: Union[str, PurePath],
nfolders_func: Callable,
nfiles_func: Callable,
repeat=1,
maxdepth=None,
filename=random_string,
payload: Optional[Callable[[Path], Generator[Path, None, None]]] = None,
) |
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen by evaluating a depth dependent function).
Args:
basedir: Directory to create files and folders in
nfolders_func: (depth) that returns the number of folders to be
created in a folder of that depth.
nfiles_func: Function(depth) that returns the number of files to be
created in a folder of that depth.
repeat: Walk this often through the directory tree to create new
subdirectories and files
maxdepth: Maximum depth to descend into current file tree. If None,
infinity.
filename: Callable to generate filename. Default returns short
random string
payload: Use this argument to generate files with content: Specify a
function that takes a directory ``dir`` (``Path`` object) as
argument, picks a name ``name``, creates the corresponding file
``dir/name`` and yields ``name``. Overrides ``filename`` argument
if both are passed. Takes Path object as catalog where to create
file and yields Path of created file.
If this option is not specified, all created files will be empty.
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
|
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen by evaluating a depth dependent function). | def iterative_tree(
basedir: Union[str, PurePath],
nfolders_func: Callable,
nfiles_func: Callable,
repeat=1,
maxdepth=None,
filename=random_string,
payload: Optional[Callable[[Path], Generator[Path, None, None]]] = None,
) -> Tuple[List[Path], List[Path]]:
"""
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen by evaluating a depth dependent function).
Args:
basedir: Directory to create files and folders in
nfolders_func: (depth) that returns the number of folders to be
created in a folder of that depth.
nfiles_func: Function(depth) that returns the number of files to be
created in a folder of that depth.
repeat: Walk this often through the directory tree to create new
subdirectories and files
maxdepth: Maximum depth to descend into current file tree. If None,
infinity.
filename: Callable to generate filename. Default returns short
random string
payload: Use this argument to generate files with content: Specify a
function that takes a directory ``dir`` (``Path`` object) as
argument, picks a name ``name``, creates the corresponding file
``dir/name`` and yields ``name``. Overrides ``filename`` argument
if both are passed. Takes Path object as catalog where to create
file and yields Path of created file.
If this option is not specified, all created files will be empty.
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
"""
alldirs = []
allfiles = []
basedir = Path(basedir)
basedir.mkdir(parents=True, exist_ok=True)
for i in range(repeat):
for root, dirs, files in os.walk(str(basedir)):
depth = os.path.relpath(root, str(basedir)).count(os.sep)
if maxdepth and depth >= maxdepth - 1:
del dirs[:]
n_folders = nfolders_func(depth)
n_files = nfiles_func(depth)
for _ in range(n_folders):
p = Path(root) / random_string()
p.mkdir(exist_ok=True)
alldirs.append(p)
if not payload:
for _ in range(n_files):
p = Path(root) / filename()
p.touch(exist_ok=True)
allfiles.append(p)
else:
payload_generator = payload(Path(root))
for _ in range(n_files):
p = next(payload_generator)
allfiles.append(p)
alldirs = list(set(alldirs))
allfiles = list(set(allfiles))
return alldirs, allfiles | [
"def",
"iterative_tree",
"(",
"basedir",
":",
"Union",
"[",
"str",
",",
"PurePath",
"]",
",",
"nfolders_func",
":",
"Callable",
",",
"nfiles_func",
":",
"Callable",
",",
"repeat",
"=",
"1",
",",
"maxdepth",
"=",
"None",
",",
"filename",
"=",
"random_string",
",",
"payload",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"Path",
"]",
",",
"Generator",
"[",
"Path",
",",
"None",
",",
"None",
"]",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Tuple",
"[",
"List",
"[",
"Path",
"]",
",",
"List",
"[",
"Path",
"]",
"]",
":",
"alldirs",
"=",
"[",
"]",
"allfiles",
"=",
"[",
"]",
"basedir",
"=",
"Path",
"(",
"basedir",
")",
"basedir",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"for",
"i",
"in",
"range",
"(",
"repeat",
")",
":",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"str",
"(",
"basedir",
")",
")",
":",
"depth",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"root",
",",
"str",
"(",
"basedir",
")",
")",
".",
"count",
"(",
"os",
".",
"sep",
")",
"if",
"maxdepth",
"and",
"depth",
">=",
"maxdepth",
"-",
"1",
":",
"del",
"dirs",
"[",
":",
"]",
"n_folders",
"=",
"nfolders_func",
"(",
"depth",
")",
"n_files",
"=",
"nfiles_func",
"(",
"depth",
")",
"for",
"_",
"in",
"range",
"(",
"n_folders",
")",
":",
"p",
"=",
"Path",
"(",
"root",
")",
"/",
"random_string",
"(",
")",
"p",
".",
"mkdir",
"(",
"exist_ok",
"=",
"True",
")",
"alldirs",
".",
"append",
"(",
"p",
")",
"if",
"not",
"payload",
":",
"for",
"_",
"in",
"range",
"(",
"n_files",
")",
":",
"p",
"=",
"Path",
"(",
"root",
")",
"/",
"filename",
"(",
")",
"p",
".",
"touch",
"(",
"exist_ok",
"=",
"True",
")",
"allfiles",
".",
"append",
"(",
"p",
")",
"else",
":",
"payload_generator",
"=",
"payload",
"(",
"Path",
"(",
"root",
")",
")",
"for",
"_",
"in",
"range",
"(",
"n_files",
")",
":",
"p",
"=",
"next",
"(",
"payload_generator",
")",
"allfiles",
".",
"append",
"(",
"p",
")",
"alldirs",
"=",
"list",
"(",
"set",
"(",
"alldirs",
")",
")",
"allfiles",
"=",
"list",
"(",
"set",
"(",
"allfiles",
")",
")",
"return",
"alldirs",
",",
"allfiles"
] | [
27,
0
] | [
92,
28
] | python | en | ['en', 'error', 'th'] | False |
iterative_gaussian_tree | (
basedir: Union[str, PurePath],
nfiles=2,
nfolders=1,
repeat=1,
maxdepth=None,
sigma_folders=1,
sigma_files=1,
min_folders=0,
min_files=0,
filename=random_string,
payload: Optional[Callable[[Path], Generator[Path, None, None]]] = None,
) |
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen from a Gaussian distribution).
Args:
basedir: Directory to create files and folders in
nfiles: Average number of files to create
nfolders: Average number of folders to create
repeat: Walk this often through the directory tree to create new
subdirectories and files
maxdepth: Maximum depth to descend into current file tree. If None,
infinity.
sigma_folders: Spread of number of folders
sigma_files: Spread of number of files
min_folders: Minimal number of folders to create. Default 0.
min_files: Minimal number of files to create. Default 0.
filename: Callable to generate filename. Default returns short
random string
payload: Use this argument to generate files with content: Specify a
function that takes a directory ``dir`` (``Path`` object) as
argument, picks a name ``name``, creates the corresponding file
``dir/name`` and yields ``name``. Overrides ``filename`` argument
if both are passed. Takes Path object as catalog where to create
file and returns Path of created file.
If this option is not specified, all created files will be empty.
Returns:
(List of dirs, List of files), all as :class:`pathlib.Path` objects.
|
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen from a Gaussian distribution). | def iterative_gaussian_tree(
basedir: Union[str, PurePath],
nfiles=2,
nfolders=1,
repeat=1,
maxdepth=None,
sigma_folders=1,
sigma_files=1,
min_folders=0,
min_files=0,
filename=random_string,
payload: Optional[Callable[[Path], Generator[Path, None, None]]] = None,
):
"""
Create a random set of files and folders by repeatedly walking through the
current tree and creating random files or subfolders (the number of files
and folders created is chosen from a Gaussian distribution).
Args:
basedir: Directory to create files and folders in
nfiles: Average number of files to create
nfolders: Average number of folders to create
repeat: Walk this often through the directory tree to create new
subdirectories and files
maxdepth: Maximum depth to descend into current file tree. If None,
infinity.
sigma_folders: Spread of number of folders
sigma_files: Spread of number of files
min_folders: Minimal number of folders to create. Default 0.
min_files: Minimal number of files to create. Default 0.
filename: Callable to generate filename. Default returns short
random string
payload: Use this argument to generate files with content: Specify a
function that takes a directory ``dir`` (``Path`` object) as
argument, picks a name ``name``, creates the corresponding file
``dir/name`` and yields ``name``. Overrides ``filename`` argument
if both are passed. Takes Path object as catalog where to create
file and returns Path of created file.
If this option is not specified, all created files will be empty.
Returns:
(List of dirs, List of files), all as :class:`pathlib.Path` objects.
"""
# noinspection PyUnusedLocal
def nfolders_func(*args):
return max(min_folders, int(random.gauss(nfolders, sigma_folders)))
# noinspection PyUnusedLocal
def nfiles_func(*args):
return max(min_files, int(random.gauss(nfiles, sigma_files)))
return iterative_tree(
basedir=basedir,
nfiles_func=nfiles_func,
nfolders_func=nfolders_func,
repeat=repeat,
maxdepth=maxdepth,
filename=filename,
payload=payload,
) | [
"def",
"iterative_gaussian_tree",
"(",
"basedir",
":",
"Union",
"[",
"str",
",",
"PurePath",
"]",
",",
"nfiles",
"=",
"2",
",",
"nfolders",
"=",
"1",
",",
"repeat",
"=",
"1",
",",
"maxdepth",
"=",
"None",
",",
"sigma_folders",
"=",
"1",
",",
"sigma_files",
"=",
"1",
",",
"min_folders",
"=",
"0",
",",
"min_files",
"=",
"0",
",",
"filename",
"=",
"random_string",
",",
"payload",
":",
"Optional",
"[",
"Callable",
"[",
"[",
"Path",
"]",
",",
"Generator",
"[",
"Path",
",",
"None",
",",
"None",
"]",
"]",
"]",
"=",
"None",
",",
")",
":",
"# noinspection PyUnusedLocal",
"def",
"nfolders_func",
"(",
"*",
"args",
")",
":",
"return",
"max",
"(",
"min_folders",
",",
"int",
"(",
"random",
".",
"gauss",
"(",
"nfolders",
",",
"sigma_folders",
")",
")",
")",
"# noinspection PyUnusedLocal",
"def",
"nfiles_func",
"(",
"*",
"args",
")",
":",
"return",
"max",
"(",
"min_files",
",",
"int",
"(",
"random",
".",
"gauss",
"(",
"nfiles",
",",
"sigma_files",
")",
")",
")",
"return",
"iterative_tree",
"(",
"basedir",
"=",
"basedir",
",",
"nfiles_func",
"=",
"nfiles_func",
",",
"nfolders_func",
"=",
"nfolders_func",
",",
"repeat",
"=",
"repeat",
",",
"maxdepth",
"=",
"maxdepth",
",",
"filename",
"=",
"filename",
",",
"payload",
"=",
"payload",
",",
")"
] | [
95,
0
] | [
154,
5
] | python | en | ['en', 'error', 'th'] | False |
choose_random_elements | (basedir, n_dirs, n_files, onfail="raise") |
Select random files and directories. If all directories and files must be
unique, use sample_random_elements instead.
Args:
basedir: Directory to scan
n_dirs: Number of directories to pick
n_files: Number of files to pick
onfail: What to do if there are no files or folders to pick from?
Either 'raise' (raise ValueError) or 'ignore' (return empty list)
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
|
Select random files and directories. If all directories and files must be
unique, use sample_random_elements instead. | def choose_random_elements(basedir, n_dirs, n_files, onfail="raise"):
"""
Select random files and directories. If all directories and files must be
unique, use sample_random_elements instead.
Args:
basedir: Directory to scan
n_dirs: Number of directories to pick
n_files: Number of files to pick
onfail: What to do if there are no files or folders to pick from?
Either 'raise' (raise ValueError) or 'ignore' (return empty list)
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
"""
alldirs = []
allfiles = []
for root, dirs, files in os.walk(str(basedir)):
for d in dirs:
alldirs.append(Path(root) / d)
for file in files:
allfiles.append(Path(root) / file)
if n_dirs and not alldirs:
if onfail == "raise":
raise ValueError(
"{} does not have subfolders, so cannot select directories."
)
else:
selected_dirs = []
else:
selected_dirs = [random.choice(alldirs) for _ in range(n_dirs)]
if n_files and not allfiles:
if onfail == "raise":
raise ValueError(
"{} does not contain any files, so cannot select random files."
)
elif onfail == "ignore":
selected_files = []
else:
raise ValueError("Unknown value for 'onfail' parameter.")
else:
selected_files = [random.choice(allfiles) for _ in range(n_files)]
return selected_dirs, selected_files | [
"def",
"choose_random_elements",
"(",
"basedir",
",",
"n_dirs",
",",
"n_files",
",",
"onfail",
"=",
"\"raise\"",
")",
":",
"alldirs",
"=",
"[",
"]",
"allfiles",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"str",
"(",
"basedir",
")",
")",
":",
"for",
"d",
"in",
"dirs",
":",
"alldirs",
".",
"append",
"(",
"Path",
"(",
"root",
")",
"/",
"d",
")",
"for",
"file",
"in",
"files",
":",
"allfiles",
".",
"append",
"(",
"Path",
"(",
"root",
")",
"/",
"file",
")",
"if",
"n_dirs",
"and",
"not",
"alldirs",
":",
"if",
"onfail",
"==",
"\"raise\"",
":",
"raise",
"ValueError",
"(",
"\"{} does not have subfolders, so cannot select directories.\"",
")",
"else",
":",
"selected_dirs",
"=",
"[",
"]",
"else",
":",
"selected_dirs",
"=",
"[",
"random",
".",
"choice",
"(",
"alldirs",
")",
"for",
"_",
"in",
"range",
"(",
"n_dirs",
")",
"]",
"if",
"n_files",
"and",
"not",
"allfiles",
":",
"if",
"onfail",
"==",
"\"raise\"",
":",
"raise",
"ValueError",
"(",
"\"{} does not contain any files, so cannot select random files.\"",
")",
"elif",
"onfail",
"==",
"\"ignore\"",
":",
"selected_files",
"=",
"[",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown value for 'onfail' parameter.\"",
")",
"else",
":",
"selected_files",
"=",
"[",
"random",
".",
"choice",
"(",
"allfiles",
")",
"for",
"_",
"in",
"range",
"(",
"n_files",
")",
"]",
"return",
"selected_dirs",
",",
"selected_files"
] | [
157,
0
] | [
198,
40
] | python | en | ['en', 'error', 'th'] | False |
sample_random_elements | (basedir, n_dirs, n_files, onfail="raise") |
Select random distinct files and directories. If the directories and files
do not have to be distinct, use choose_random_elements instead.
Args:
basedir: Directory to scan
n_dirs: Number of directories to pick
n_files: Number of files to pick
onfail: What to do if there are no files or folders to pick from?
Either 'raise' (raise ValueError) or 'ignore' (return list with
fewer elements)
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
|
Select random distinct files and directories. If the directories and files
do not have to be distinct, use choose_random_elements instead. | def sample_random_elements(basedir, n_dirs, n_files, onfail="raise"):
"""
Select random distinct files and directories. If the directories and files
do not have to be distinct, use choose_random_elements instead.
Args:
basedir: Directory to scan
n_dirs: Number of directories to pick
n_files: Number of files to pick
onfail: What to do if there are no files or folders to pick from?
Either 'raise' (raise ValueError) or 'ignore' (return list with
fewer elements)
Returns:
(List of dirs, List of files), all as pathlib.Path objects.
"""
alldirs = []
allfiles = []
for root, dirs, files in os.walk(str(basedir)):
for d in dirs:
alldirs.append(Path(root) / d)
for file in files:
allfiles.append(Path(root) / file)
if n_dirs and len(alldirs) < n_dirs:
if onfail == "raise":
raise ValueError(
"{} does not have enough subfolders, so cannot select "
"enough directories."
)
elif onfail == "ignore":
selected_dirs = random.sample(alldirs, len(alldirs))
else:
raise ValueError("Unknown value for 'onfail' parameter.")
else:
selected_dirs = random.sample(alldirs, n_dirs)
if n_files and len(allfiles) < n_files:
if onfail == "raise":
raise ValueError(
"{} does not contain enough files, so cannot select "
"enough random files."
)
elif onfail == "ignore":
selected_files = random.sample(allfiles, len(allfiles))
else:
raise ValueError("Unknown value for 'onfail' parameter.")
else:
selected_files = random.sample(allfiles, n_files)
return selected_dirs, selected_files | [
"def",
"sample_random_elements",
"(",
"basedir",
",",
"n_dirs",
",",
"n_files",
",",
"onfail",
"=",
"\"raise\"",
")",
":",
"alldirs",
"=",
"[",
"]",
"allfiles",
"=",
"[",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"str",
"(",
"basedir",
")",
")",
":",
"for",
"d",
"in",
"dirs",
":",
"alldirs",
".",
"append",
"(",
"Path",
"(",
"root",
")",
"/",
"d",
")",
"for",
"file",
"in",
"files",
":",
"allfiles",
".",
"append",
"(",
"Path",
"(",
"root",
")",
"/",
"file",
")",
"if",
"n_dirs",
"and",
"len",
"(",
"alldirs",
")",
"<",
"n_dirs",
":",
"if",
"onfail",
"==",
"\"raise\"",
":",
"raise",
"ValueError",
"(",
"\"{} does not have enough subfolders, so cannot select \"",
"\"enough directories.\"",
")",
"elif",
"onfail",
"==",
"\"ignore\"",
":",
"selected_dirs",
"=",
"random",
".",
"sample",
"(",
"alldirs",
",",
"len",
"(",
"alldirs",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown value for 'onfail' parameter.\"",
")",
"else",
":",
"selected_dirs",
"=",
"random",
".",
"sample",
"(",
"alldirs",
",",
"n_dirs",
")",
"if",
"n_files",
"and",
"len",
"(",
"allfiles",
")",
"<",
"n_files",
":",
"if",
"onfail",
"==",
"\"raise\"",
":",
"raise",
"ValueError",
"(",
"\"{} does not contain enough files, so cannot select \"",
"\"enough random files.\"",
")",
"elif",
"onfail",
"==",
"\"ignore\"",
":",
"selected_files",
"=",
"random",
".",
"sample",
"(",
"allfiles",
",",
"len",
"(",
"allfiles",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown value for 'onfail' parameter.\"",
")",
"else",
":",
"selected_files",
"=",
"random",
".",
"sample",
"(",
"allfiles",
",",
"n_files",
")",
"return",
"selected_dirs",
",",
"selected_files"
] | [
201,
0
] | [
247,
40
] | python | en | ['en', 'error', 'th'] | False |
ElasticsearchDocumentStore.__init__ | (
self,
host: Union[str, List[str]] = "localhost",
port: Union[int, List[int]] = 9200,
username: str = "",
password: str = "",
api_key_id: Optional[str] = None,
api_key: Optional[str] = None,
index: str = "document",
label_index: str = "label",
search_fields: Union[str, list] = "text",
text_field: str = "text",
name_field: str = "name",
embedding_field: str = "embedding",
embedding_dim: int = 768,
custom_mapping: Optional[dict] = None,
excluded_meta_data: Optional[list] = None,
faq_question_field: Optional[str] = None,
analyzer: str = "standard",
scheme: str = "http",
ca_certs: Optional[str] = None,
verify_certs: bool = True,
create_index: bool = True,
update_existing_documents: bool = False,
refresh_type: str = "wait_for",
similarity="dot_product",
timeout=30,
return_embedding: bool = False,
) |
A DocumentStore using Elasticsearch to store and query the documents for our search.
* Keeps all the logic to store and query documents from Elastic, incl. mapping of fields, adding filters or boosts to your queries, and storing embeddings
* You can either use an existing Elasticsearch index or create a new one via haystack
* Retrievers operate on top of this DocumentStore to find the relevant documents for a query
:param host: url(s) of elasticsearch nodes
:param port: port(s) of elasticsearch nodes
:param username: username (standard authentication via http_auth)
:param password: password (standard authentication via http_auth)
:param api_key_id: ID of the API key (altenative authentication mode to the above http_auth)
:param api_key: Secret value of the API key (altenative authentication mode to the above http_auth)
:param index: Name of index in elasticsearch to use for storing the documents that we want to search. If not existing yet, we will create one.
:param label_index: Name of index in elasticsearch to use for storing labels. If not existing yet, we will create one.
:param search_fields: Name of fields used by ElasticsearchRetriever to find matches in the docs to our incoming query (using elastic's multi_match query), e.g. ["title", "full_text"]
:param text_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. "full_text").
If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.
:param name_field: Name of field that contains the title of the the doc
:param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param embedding_dim: Dimensionality of embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param custom_mapping: If you want to use your own custom mapping for creating a new index in Elasticsearch, you can supply it here as a dictionary.
:param analyzer: Specify the default analyzer from one of the built-ins when creating a new Elasticsearch Index.
Elasticsearch also has built-in analyzers for different languages (e.g. impacting tokenization). More info at:
https://www.elastic.co/guide/en/elasticsearch/reference/7.9/analysis-analyzers.html
:param excluded_meta_data: Name of fields in Elasticsearch that should not be returned (e.g. [field_one, field_two]).
Helpful if you have fields with long, irrelevant content that you don't want to display in results (e.g. embedding vectors).
:param scheme: 'https' or 'http', protocol used to connect to your elasticsearch instance
:param ca_certs: Root certificates for SSL: it is a path to certificate authority (CA) certs on disk. You can use certifi package with certifi.where() to find where the CA certs file is located in your machine.
:param verify_certs: Whether to be strict about ca certificates
:param create_index: Whether to try creating a new index (If the index of that name is already existing, we will just continue in any case)
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists.
:param refresh_type: Type of ES refresh used to control when changes made by a request (e.g. bulk) are made visible to search.
If set to 'wait_for', continue only after changes are visible (slow, but safe).
If set to 'false', continue directly (fast, but sometimes unintuitive behaviour when docs are not immediately available after ingestion).
More info at https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docs-refresh.html
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param timeout: Number of seconds after which an ElasticSearch request times out.
:param return_embedding: To return document embedding
|
A DocumentStore using Elasticsearch to store and query the documents for our search. | def __init__(
self,
host: Union[str, List[str]] = "localhost",
port: Union[int, List[int]] = 9200,
username: str = "",
password: str = "",
api_key_id: Optional[str] = None,
api_key: Optional[str] = None,
index: str = "document",
label_index: str = "label",
search_fields: Union[str, list] = "text",
text_field: str = "text",
name_field: str = "name",
embedding_field: str = "embedding",
embedding_dim: int = 768,
custom_mapping: Optional[dict] = None,
excluded_meta_data: Optional[list] = None,
faq_question_field: Optional[str] = None,
analyzer: str = "standard",
scheme: str = "http",
ca_certs: Optional[str] = None,
verify_certs: bool = True,
create_index: bool = True,
update_existing_documents: bool = False,
refresh_type: str = "wait_for",
similarity="dot_product",
timeout=30,
return_embedding: bool = False,
):
"""
A DocumentStore using Elasticsearch to store and query the documents for our search.
* Keeps all the logic to store and query documents from Elastic, incl. mapping of fields, adding filters or boosts to your queries, and storing embeddings
* You can either use an existing Elasticsearch index or create a new one via haystack
* Retrievers operate on top of this DocumentStore to find the relevant documents for a query
:param host: url(s) of elasticsearch nodes
:param port: port(s) of elasticsearch nodes
:param username: username (standard authentication via http_auth)
:param password: password (standard authentication via http_auth)
:param api_key_id: ID of the API key (altenative authentication mode to the above http_auth)
:param api_key: Secret value of the API key (altenative authentication mode to the above http_auth)
:param index: Name of index in elasticsearch to use for storing the documents that we want to search. If not existing yet, we will create one.
:param label_index: Name of index in elasticsearch to use for storing labels. If not existing yet, we will create one.
:param search_fields: Name of fields used by ElasticsearchRetriever to find matches in the docs to our incoming query (using elastic's multi_match query), e.g. ["title", "full_text"]
:param text_field: Name of field that might contain the answer and will therefore be passed to the Reader Model (e.g. "full_text").
If no Reader is used (e.g. in FAQ-Style QA) the plain content of this field will just be returned.
:param name_field: Name of field that contains the title of the the doc
:param embedding_field: Name of field containing an embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param embedding_dim: Dimensionality of embedding vector (Only needed when using a dense retriever (e.g. DensePassageRetriever, EmbeddingRetriever) on top)
:param custom_mapping: If you want to use your own custom mapping for creating a new index in Elasticsearch, you can supply it here as a dictionary.
:param analyzer: Specify the default analyzer from one of the built-ins when creating a new Elasticsearch Index.
Elasticsearch also has built-in analyzers for different languages (e.g. impacting tokenization). More info at:
https://www.elastic.co/guide/en/elasticsearch/reference/7.9/analysis-analyzers.html
:param excluded_meta_data: Name of fields in Elasticsearch that should not be returned (e.g. [field_one, field_two]).
Helpful if you have fields with long, irrelevant content that you don't want to display in results (e.g. embedding vectors).
:param scheme: 'https' or 'http', protocol used to connect to your elasticsearch instance
:param ca_certs: Root certificates for SSL: it is a path to certificate authority (CA) certs on disk. You can use certifi package with certifi.where() to find where the CA certs file is located in your machine.
:param verify_certs: Whether to be strict about ca certificates
:param create_index: Whether to try creating a new index (If the index of that name is already existing, we will just continue in any case)
:param update_existing_documents: Whether to update any existing documents with the same ID when adding
documents. When set as True, any document with an existing ID gets updated.
If set to False, an error is raised if the document ID of the document being
added already exists.
:param refresh_type: Type of ES refresh used to control when changes made by a request (e.g. bulk) are made visible to search.
If set to 'wait_for', continue only after changes are visible (slow, but safe).
If set to 'false', continue directly (fast, but sometimes unintuitive behaviour when docs are not immediately available after ingestion).
More info at https://www.elastic.co/guide/en/elasticsearch/reference/6.8/docs-refresh.html
:param similarity: The similarity function used to compare document vectors. 'dot_product' is the default sine it is
more performant with DPR embeddings. 'cosine' is recommended if you are using a Sentence BERT model.
:param timeout: Number of seconds after which an ElasticSearch request times out.
:param return_embedding: To return document embedding
"""
self.client = self._init_elastic_client(host=host, port=port, username=username, password=password,
api_key=api_key, api_key_id=api_key_id, scheme=scheme,
ca_certs=ca_certs, verify_certs=verify_certs,timeout=timeout)
# configure mappings to ES fields that will be used for querying / displaying results
if type(search_fields) == str:
search_fields = [search_fields]
#TODO we should implement a more flexible interal mapping here that simplifies the usage of additional,
# custom fields (e.g. meta data you want to return)
self.search_fields = search_fields
self.text_field = text_field
self.name_field = name_field
self.embedding_field = embedding_field
self.embedding_dim = embedding_dim
self.excluded_meta_data = excluded_meta_data
self.faq_question_field = faq_question_field
self.analyzer = analyzer
self.return_embedding = return_embedding
self.custom_mapping = custom_mapping
self.index: str = index
self.label_index: str = label_index
if similarity in ["cosine", "dot_product"]:
self.similarity = similarity
else:
raise Exception("Invalid value for similarity in ElasticSearchDocumentStore constructor. Choose between 'cosine' and 'dot_product'")
if create_index:
self._create_document_index(index)
self._create_label_index(label_index)
self.update_existing_documents = update_existing_documents
self.refresh_type = refresh_type | [
"def",
"__init__",
"(",
"self",
",",
"host",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
"=",
"\"localhost\"",
",",
"port",
":",
"Union",
"[",
"int",
",",
"List",
"[",
"int",
"]",
"]",
"=",
"9200",
",",
"username",
":",
"str",
"=",
"\"\"",
",",
"password",
":",
"str",
"=",
"\"\"",
",",
"api_key_id",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"api_key",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"index",
":",
"str",
"=",
"\"document\"",
",",
"label_index",
":",
"str",
"=",
"\"label\"",
",",
"search_fields",
":",
"Union",
"[",
"str",
",",
"list",
"]",
"=",
"\"text\"",
",",
"text_field",
":",
"str",
"=",
"\"text\"",
",",
"name_field",
":",
"str",
"=",
"\"name\"",
",",
"embedding_field",
":",
"str",
"=",
"\"embedding\"",
",",
"embedding_dim",
":",
"int",
"=",
"768",
",",
"custom_mapping",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
",",
"excluded_meta_data",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"faq_question_field",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"analyzer",
":",
"str",
"=",
"\"standard\"",
",",
"scheme",
":",
"str",
"=",
"\"http\"",
",",
"ca_certs",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
",",
"verify_certs",
":",
"bool",
"=",
"True",
",",
"create_index",
":",
"bool",
"=",
"True",
",",
"update_existing_documents",
":",
"bool",
"=",
"False",
",",
"refresh_type",
":",
"str",
"=",
"\"wait_for\"",
",",
"similarity",
"=",
"\"dot_product\"",
",",
"timeout",
"=",
"30",
",",
"return_embedding",
":",
"bool",
"=",
"False",
",",
")",
":",
"self",
".",
"client",
"=",
"self",
".",
"_init_elastic_client",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"api_key",
"=",
"api_key",
",",
"api_key_id",
"=",
"api_key_id",
",",
"scheme",
"=",
"scheme",
",",
"ca_certs",
"=",
"ca_certs",
",",
"verify_certs",
"=",
"verify_certs",
",",
"timeout",
"=",
"timeout",
")",
"# configure mappings to ES fields that will be used for querying / displaying results",
"if",
"type",
"(",
"search_fields",
")",
"==",
"str",
":",
"search_fields",
"=",
"[",
"search_fields",
"]",
"#TODO we should implement a more flexible interal mapping here that simplifies the usage of additional,",
"# custom fields (e.g. meta data you want to return)",
"self",
".",
"search_fields",
"=",
"search_fields",
"self",
".",
"text_field",
"=",
"text_field",
"self",
".",
"name_field",
"=",
"name_field",
"self",
".",
"embedding_field",
"=",
"embedding_field",
"self",
".",
"embedding_dim",
"=",
"embedding_dim",
"self",
".",
"excluded_meta_data",
"=",
"excluded_meta_data",
"self",
".",
"faq_question_field",
"=",
"faq_question_field",
"self",
".",
"analyzer",
"=",
"analyzer",
"self",
".",
"return_embedding",
"=",
"return_embedding",
"self",
".",
"custom_mapping",
"=",
"custom_mapping",
"self",
".",
"index",
":",
"str",
"=",
"index",
"self",
".",
"label_index",
":",
"str",
"=",
"label_index",
"if",
"similarity",
"in",
"[",
"\"cosine\"",
",",
"\"dot_product\"",
"]",
":",
"self",
".",
"similarity",
"=",
"similarity",
"else",
":",
"raise",
"Exception",
"(",
"\"Invalid value for similarity in ElasticSearchDocumentStore constructor. Choose between 'cosine' and 'dot_product'\"",
")",
"if",
"create_index",
":",
"self",
".",
"_create_document_index",
"(",
"index",
")",
"self",
".",
"_create_label_index",
"(",
"label_index",
")",
"self",
".",
"update_existing_documents",
"=",
"update_existing_documents",
"self",
".",
"refresh_type",
"=",
"refresh_type"
] | [
20,
4
] | [
127,
40
] | python | en | ['en', 'error', 'th'] | False |
ElasticsearchDocumentStore._create_document_index | (self, index_name: str) |
Create a new index for storing documents. In case if an index with the name already exists, it ensures that
the embedding_field is present.
|
Create a new index for storing documents. In case if an index with the name already exists, it ensures that
the embedding_field is present.
| def _create_document_index(self, index_name: str):
"""
Create a new index for storing documents. In case if an index with the name already exists, it ensures that
the embedding_field is present.
"""
# check if the existing index has the embedding field; if not create it
if self.client.indices.exists(index=index_name):
if self.embedding_field:
mapping = self.client.indices.get(index_name)[index_name]["mappings"]
if self.embedding_field in mapping["properties"] and mapping["properties"][self.embedding_field]["type"] != "dense_vector":
raise Exception(f"The '{index_name}' index in Elasticsearch already has a field called '{self.embedding_field}'"
f" with the type '{mapping['properties'][self.embedding_field]['type']}'. Please update the "
f"document_store to use a different name for the embedding_field parameter.")
mapping["properties"][self.embedding_field] = {"type": "dense_vector", "dims": self.embedding_dim}
self.client.indices.put_mapping(index=index_name, body=mapping)
return
if self.custom_mapping:
mapping = self.custom_mapping
else:
mapping = {
"mappings": {
"properties": {
self.name_field: {"type": "keyword"},
self.text_field: {"type": "text"},
},
"dynamic_templates": [
{
"strings": {
"path_match": "*",
"match_mapping_type": "string",
"mapping": {"type": "keyword"}}}
],
},
"settings": {
"analysis": {
"analyzer": {
"default": {
"type": self.analyzer,
}
}
}
}
}
if self.embedding_field:
mapping["mappings"]["properties"][self.embedding_field] = {"type": "dense_vector", "dims": self.embedding_dim}
try:
self.client.indices.create(index=index_name, body=mapping)
except RequestError as e:
# With multiple workers we need to avoid race conditions, where:
# - there's no index in the beginning
# - both want to create one
# - one fails as the other one already created it
if not self.client.indices.exists(index=index_name):
raise e | [
"def",
"_create_document_index",
"(",
"self",
",",
"index_name",
":",
"str",
")",
":",
"# check if the existing index has the embedding field; if not create it",
"if",
"self",
".",
"client",
".",
"indices",
".",
"exists",
"(",
"index",
"=",
"index_name",
")",
":",
"if",
"self",
".",
"embedding_field",
":",
"mapping",
"=",
"self",
".",
"client",
".",
"indices",
".",
"get",
"(",
"index_name",
")",
"[",
"index_name",
"]",
"[",
"\"mappings\"",
"]",
"if",
"self",
".",
"embedding_field",
"in",
"mapping",
"[",
"\"properties\"",
"]",
"and",
"mapping",
"[",
"\"properties\"",
"]",
"[",
"self",
".",
"embedding_field",
"]",
"[",
"\"type\"",
"]",
"!=",
"\"dense_vector\"",
":",
"raise",
"Exception",
"(",
"f\"The '{index_name}' index in Elasticsearch already has a field called '{self.embedding_field}'\"",
"f\" with the type '{mapping['properties'][self.embedding_field]['type']}'. Please update the \"",
"f\"document_store to use a different name for the embedding_field parameter.\"",
")",
"mapping",
"[",
"\"properties\"",
"]",
"[",
"self",
".",
"embedding_field",
"]",
"=",
"{",
"\"type\"",
":",
"\"dense_vector\"",
",",
"\"dims\"",
":",
"self",
".",
"embedding_dim",
"}",
"self",
".",
"client",
".",
"indices",
".",
"put_mapping",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"mapping",
")",
"return",
"if",
"self",
".",
"custom_mapping",
":",
"mapping",
"=",
"self",
".",
"custom_mapping",
"else",
":",
"mapping",
"=",
"{",
"\"mappings\"",
":",
"{",
"\"properties\"",
":",
"{",
"self",
".",
"name_field",
":",
"{",
"\"type\"",
":",
"\"keyword\"",
"}",
",",
"self",
".",
"text_field",
":",
"{",
"\"type\"",
":",
"\"text\"",
"}",
",",
"}",
",",
"\"dynamic_templates\"",
":",
"[",
"{",
"\"strings\"",
":",
"{",
"\"path_match\"",
":",
"\"*\"",
",",
"\"match_mapping_type\"",
":",
"\"string\"",
",",
"\"mapping\"",
":",
"{",
"\"type\"",
":",
"\"keyword\"",
"}",
"}",
"}",
"]",
",",
"}",
",",
"\"settings\"",
":",
"{",
"\"analysis\"",
":",
"{",
"\"analyzer\"",
":",
"{",
"\"default\"",
":",
"{",
"\"type\"",
":",
"self",
".",
"analyzer",
",",
"}",
"}",
"}",
"}",
"}",
"if",
"self",
".",
"embedding_field",
":",
"mapping",
"[",
"\"mappings\"",
"]",
"[",
"\"properties\"",
"]",
"[",
"self",
".",
"embedding_field",
"]",
"=",
"{",
"\"type\"",
":",
"\"dense_vector\"",
",",
"\"dims\"",
":",
"self",
".",
"embedding_dim",
"}",
"try",
":",
"self",
".",
"client",
".",
"indices",
".",
"create",
"(",
"index",
"=",
"index_name",
",",
"body",
"=",
"mapping",
")",
"except",
"RequestError",
"as",
"e",
":",
"# With multiple workers we need to avoid race conditions, where:",
"# - there's no index in the beginning",
"# - both want to create one",
"# - one fails as the other one already created it",
"if",
"not",
"self",
".",
"client",
".",
"indices",
".",
"exists",
"(",
"index",
"=",
"index_name",
")",
":",
"raise",
"e"
] | [
174,
4
] | [
229,
23
] | python | en | ['en', 'error', 'th'] | False |
ElasticsearchDocumentStore.get_document_by_id | (self, id: str, index: Optional[str] = None) | Fetch a document by specifying its text id string | Fetch a document by specifying its text id string | def get_document_by_id(self, id: str, index: Optional[str] = None) -> Optional[Document]:
"""Fetch a document by specifying its text id string"""
index = index or self.index
documents = self.get_documents_by_id([id], index=index)
if documents:
return documents[0]
else:
return None | [
"def",
"get_document_by_id",
"(",
"self",
",",
"id",
":",
"str",
",",
"index",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Optional",
"[",
"Document",
"]",
":",
"index",
"=",
"index",
"or",
"self",
".",
"index",
"documents",
"=",
"self",
".",
"get_documents_by_id",
"(",
"[",
"id",
"]",
",",
"index",
"=",
"index",
")",
"if",
"documents",
":",
"return",
"documents",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] | [
271,
4
] | [
278,
23
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.