Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
MsvsSettings._GetLdManifestFlags
(self, config, name, gyp_to_build_path, allow_isolation, build_dir)
Returns a 3-tuple: - the set of flags that need to be added to the link to generate a default manifest - the intermediate manifest that the linker will generate that should be used to assert it doesn't add anything to the merged one. - the list of all the manifest files to be merged by the manifest tool and included into the link.
Returns a 3-tuple: - the set of flags that need to be added to the link to generate a default manifest - the intermediate manifest that the linker will generate that should be used to assert it doesn't add anything to the merged one. - the list of all the manifest files to be merged by the manifest tool and included into the link.
def _GetLdManifestFlags(self, config, name, gyp_to_build_path, allow_isolation, build_dir): """Returns a 3-tuple: - the set of flags that need to be added to the link to generate a default manifest - the intermediate manifest that the linker will generate that should be used to assert it doesn't add anything to the merged one. - the list of all the manifest files to be merged by the manifest tool and included into the link.""" generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'), config, default='true') if generate_manifest != 'true': # This means not only that the linker should not generate the intermediate # manifest but also that the manifest tool should do nothing even when # additional manifests are specified. return ['/MANIFEST:NO'], [], [] output_name = name + '.intermediate.manifest' flags = [ '/MANIFEST', '/ManifestFile:' + output_name, ] # Instead of using the MANIFESTUAC flags, we generate a .manifest to # include into the list of manifests. This allows us to avoid the need to # do two passes during linking. The /MANIFEST flag and /ManifestFile are # still used, and the intermediate manifest is used to assert that the # final manifest we get from merging all the additional manifest files # (plus the one we generate here) isn't modified by merging the # intermediate into it. # Always NO, because we generate a manifest file that has what we want. flags.append('/MANIFESTUAC:NO') config = self._TargetConfig(config) enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config, default='true') manifest_files = [] generated_manifest_outer = \ "<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \ "<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \ "</assembly>" if enable_uac == 'true': execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'), config, default='0') execution_level_map = { '0': 'asInvoker', '1': 'highestAvailable', '2': 'requireAdministrator' } ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config, default='false') inner = ''' <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level='%s' uiAccess='%s' /> </requestedPrivileges> </security> </trustInfo>''' % (execution_level_map[execution_level], ui_access) else: inner = '' generated_manifest_contents = generated_manifest_outer % inner generated_name = name + '.generated.manifest' # Need to join with the build_dir here as we're writing it during # generation time, but we return the un-joined version because the build # will occur in that directory. We only write the file if the contents # have changed so that simply regenerating the project files doesn't # cause a relink. build_dir_generated_name = os.path.join(build_dir, generated_name) gyp.common.EnsureDirExists(build_dir_generated_name) f = gyp.common.WriteOnDiff(build_dir_generated_name) f.write(generated_manifest_contents) f.close() manifest_files = [generated_name] if allow_isolation: flags.append('/ALLOWISOLATION') manifest_files += self._GetAdditionalManifestFiles(config, gyp_to_build_path) return flags, output_name, manifest_files
[ "def", "_GetLdManifestFlags", "(", "self", ",", "config", ",", "name", ",", "gyp_to_build_path", ",", "allow_isolation", ",", "build_dir", ")", ":", "generate_manifest", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'GenerateManifest'", ")", ",", "config", ",", "default", "=", "'true'", ")", "if", "generate_manifest", "!=", "'true'", ":", "# This means not only that the linker should not generate the intermediate", "# manifest but also that the manifest tool should do nothing even when", "# additional manifests are specified.", "return", "[", "'/MANIFEST:NO'", "]", ",", "[", "]", ",", "[", "]", "output_name", "=", "name", "+", "'.intermediate.manifest'", "flags", "=", "[", "'/MANIFEST'", ",", "'/ManifestFile:'", "+", "output_name", ",", "]", "# Instead of using the MANIFESTUAC flags, we generate a .manifest to", "# include into the list of manifests. This allows us to avoid the need to", "# do two passes during linking. The /MANIFEST flag and /ManifestFile are", "# still used, and the intermediate manifest is used to assert that the", "# final manifest we get from merging all the additional manifest files", "# (plus the one we generate here) isn't modified by merging the", "# intermediate into it.", "# Always NO, because we generate a manifest file that has what we want.", "flags", ".", "append", "(", "'/MANIFESTUAC:NO'", ")", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "enable_uac", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'EnableUAC'", ")", ",", "config", ",", "default", "=", "'true'", ")", "manifest_files", "=", "[", "]", "generated_manifest_outer", "=", "\"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\"", "\"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s\"", "\"</assembly>\"", "if", "enable_uac", "==", "'true'", ":", "execution_level", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'UACExecutionLevel'", ")", ",", "config", ",", "default", "=", "'0'", ")", "execution_level_map", "=", "{", "'0'", ":", "'asInvoker'", ",", "'1'", ":", "'highestAvailable'", ",", "'2'", ":", "'requireAdministrator'", "}", "ui_access", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'UACUIAccess'", ")", ",", "config", ",", "default", "=", "'false'", ")", "inner", "=", "'''\n<trustInfo xmlns=\"urn:schemas-microsoft-com:asm.v3\">\n <security>\n <requestedPrivileges>\n <requestedExecutionLevel level='%s' uiAccess='%s' />\n </requestedPrivileges>\n </security>\n</trustInfo>'''", "%", "(", "execution_level_map", "[", "execution_level", "]", ",", "ui_access", ")", "else", ":", "inner", "=", "''", "generated_manifest_contents", "=", "generated_manifest_outer", "%", "inner", "generated_name", "=", "name", "+", "'.generated.manifest'", "# Need to join with the build_dir here as we're writing it during", "# generation time, but we return the un-joined version because the build", "# will occur in that directory. We only write the file if the contents", "# have changed so that simply regenerating the project files doesn't", "# cause a relink.", "build_dir_generated_name", "=", "os", ".", "path", ".", "join", "(", "build_dir", ",", "generated_name", ")", "gyp", ".", "common", ".", "EnsureDirExists", "(", "build_dir_generated_name", ")", "f", "=", "gyp", ".", "common", ".", "WriteOnDiff", "(", "build_dir_generated_name", ")", "f", ".", "write", "(", "generated_manifest_contents", ")", "f", ".", "close", "(", ")", "manifest_files", "=", "[", "generated_name", "]", "if", "allow_isolation", ":", "flags", ".", "append", "(", "'/ALLOWISOLATION'", ")", "manifest_files", "+=", "self", ".", "_GetAdditionalManifestFiles", "(", "config", ",", "gyp_to_build_path", ")", "return", "flags", ",", "output_name", ",", "manifest_files" ]
[ 658, 2 ]
[ 743, 45 ]
python
en
['en', 'haw', 'en']
True
MsvsSettings._GetAdditionalManifestFiles
(self, config, gyp_to_build_path)
Gets additional manifest files that are added to the default one generated by the linker.
Gets additional manifest files that are added to the default one generated by the linker.
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path): """Gets additional manifest files that are added to the default one generated by the linker.""" files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config, default=[]) if isinstance(files, str): files = files.split(';') return [os.path.normpath( gyp_to_build_path(self.ConvertVSMacros(f, config=config))) for f in files]
[ "def", "_GetAdditionalManifestFiles", "(", "self", ",", "config", ",", "gyp_to_build_path", ")", ":", "files", "=", "self", ".", "_Setting", "(", "(", "'VCManifestTool'", ",", "'AdditionalManifestFiles'", ")", ",", "config", ",", "default", "=", "[", "]", ")", "if", "isinstance", "(", "files", ",", "str", ")", ":", "files", "=", "files", ".", "split", "(", "';'", ")", "return", "[", "os", ".", "path", ".", "normpath", "(", "gyp_to_build_path", "(", "self", ".", "ConvertVSMacros", "(", "f", ",", "config", "=", "config", ")", ")", ")", "for", "f", "in", "files", "]" ]
[ 745, 2 ]
[ 754, 27 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.IsUseLibraryDependencyInputs
(self, config)
Returns whether the target should be linked via Use Library Dependency Inputs (using component .objs of a given .lib).
Returns whether the target should be linked via Use Library Dependency Inputs (using component .objs of a given .lib).
def IsUseLibraryDependencyInputs(self, config): """Returns whether the target should be linked via Use Library Dependency Inputs (using component .objs of a given .lib).""" config = self._TargetConfig(config) uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config) return uldi == 'true'
[ "def", "IsUseLibraryDependencyInputs", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "uldi", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'UseLibraryDependencyInputs'", ")", ",", "config", ")", "return", "uldi", "==", "'true'" ]
[ 756, 2 ]
[ 761, 25 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.IsEmbedManifest
(self, config)
Returns whether manifest should be linked into binary.
Returns whether manifest should be linked into binary.
def IsEmbedManifest(self, config): """Returns whether manifest should be linked into binary.""" config = self._TargetConfig(config) embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config, default='true') return embed == 'true'
[ "def", "IsEmbedManifest", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "embed", "=", "self", ".", "_Setting", "(", "(", "'VCManifestTool'", ",", "'EmbedManifest'", ")", ",", "config", ",", "default", "=", "'true'", ")", "return", "embed", "==", "'true'" ]
[ 763, 2 ]
[ 768, 26 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.IsLinkIncremental
(self, config)
Returns whether the target should be linked incrementally.
Returns whether the target should be linked incrementally.
def IsLinkIncremental(self, config): """Returns whether the target should be linked incrementally.""" config = self._TargetConfig(config) link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config) return link_inc != '1'
[ "def", "IsLinkIncremental", "(", "self", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "link_inc", "=", "self", ".", "_Setting", "(", "(", "'VCLinkerTool'", ",", "'LinkIncremental'", ")", ",", "config", ")", "return", "link_inc", "!=", "'1'" ]
[ 770, 2 ]
[ 774, 26 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetRcflags
(self, config, gyp_to_ninja_path)
Returns the flags that need to be added to invocations of the resource compiler.
Returns the flags that need to be added to invocations of the resource compiler.
def GetRcflags(self, config, gyp_to_ninja_path): """Returns the flags that need to be added to invocations of the resource compiler.""" config = self._TargetConfig(config) rcflags = [] rc = self._GetWrapper(self, self.msvs_settings[config], 'VCResourceCompilerTool', append=rcflags) rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I') rcflags.append('/I' + gyp_to_ninja_path('.')) rc('PreprocessorDefinitions', prefix='/d') # /l arg must be in hex without leading '0x' rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:]) return rcflags
[ "def", "GetRcflags", "(", "self", ",", "config", ",", "gyp_to_ninja_path", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "rcflags", "=", "[", "]", "rc", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCResourceCompilerTool'", ",", "append", "=", "rcflags", ")", "rc", "(", "'AdditionalIncludeDirectories'", ",", "map", "=", "gyp_to_ninja_path", ",", "prefix", "=", "'/I'", ")", "rcflags", ".", "append", "(", "'/I'", "+", "gyp_to_ninja_path", "(", "'.'", ")", ")", "rc", "(", "'PreprocessorDefinitions'", ",", "prefix", "=", "'/d'", ")", "# /l arg must be in hex without leading '0x'", "rc", "(", "'Culture'", ",", "prefix", "=", "'/l'", ",", "map", "=", "lambda", "x", ":", "hex", "(", "int", "(", "x", ")", ")", "[", "2", ":", "]", ")", "return", "rcflags" ]
[ 776, 2 ]
[ 788, 18 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.BuildCygwinBashCommandLine
(self, args, path_to_base)
Build a command line that runs args via cygwin bash. We assume that all incoming paths are in Windows normpath'd form, so they need to be converted to posix style for the part of the command line that's passed to bash. We also have to do some Visual Studio macro emulation here because various rules use magic VS names for things. Also note that rules that contain ninja variables cannot be fixed here (for example ${source}), so the outer generator needs to make sure that the paths that are written out are in posix style, if the command line will be used here.
Build a command line that runs args via cygwin bash. We assume that all incoming paths are in Windows normpath'd form, so they need to be converted to posix style for the part of the command line that's passed to bash. We also have to do some Visual Studio macro emulation here because various rules use magic VS names for things. Also note that rules that contain ninja variables cannot be fixed here (for example ${source}), so the outer generator needs to make sure that the paths that are written out are in posix style, if the command line will be used here.
def BuildCygwinBashCommandLine(self, args, path_to_base): """Build a command line that runs args via cygwin bash. We assume that all incoming paths are in Windows normpath'd form, so they need to be converted to posix style for the part of the command line that's passed to bash. We also have to do some Visual Studio macro emulation here because various rules use magic VS names for things. Also note that rules that contain ninja variables cannot be fixed here (for example ${source}), so the outer generator needs to make sure that the paths that are written out are in posix style, if the command line will be used here.""" cygwin_dir = os.path.normpath( os.path.join(path_to_base, self.msvs_cygwin_dirs[0])) cd = ('cd %s' % path_to_base).replace('\\', '/') args = [a.replace('\\', '/').replace('"', '\\"') for a in args] args = ["'%s'" % a.replace("'", "'\\''") for a in args] bash_cmd = ' '.join(args) cmd = ( 'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir + 'bash -c "%s ; %s"' % (cd, bash_cmd)) return cmd
[ "def", "BuildCygwinBashCommandLine", "(", "self", ",", "args", ",", "path_to_base", ")", ":", "cygwin_dir", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "path_to_base", ",", "self", ".", "msvs_cygwin_dirs", "[", "0", "]", ")", ")", "cd", "=", "(", "'cd %s'", "%", "path_to_base", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "args", "=", "[", "a", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ".", "replace", "(", "'\"'", ",", "'\\\\\"'", ")", "for", "a", "in", "args", "]", "args", "=", "[", "\"'%s'\"", "%", "a", ".", "replace", "(", "\"'\"", ",", "\"'\\\\''\"", ")", "for", "a", "in", "args", "]", "bash_cmd", "=", "' '", ".", "join", "(", "args", ")", "cmd", "=", "(", "'call \"%s\\\\setup_env.bat\" && set CYGWIN=nontsec && '", "%", "cygwin_dir", "+", "'bash -c \"%s ; %s\"'", "%", "(", "cd", ",", "bash_cmd", ")", ")", "return", "cmd" ]
[ 790, 2 ]
[ 808, 14 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.IsRuleRunUnderCygwin
(self, rule)
Determine if an action should be run under cygwin. If the variable is unset, or set to 1 we use cygwin.
Determine if an action should be run under cygwin. If the variable is unset, or set to 1 we use cygwin.
def IsRuleRunUnderCygwin(self, rule): """Determine if an action should be run under cygwin. If the variable is unset, or set to 1 we use cygwin.""" return int(rule.get('msvs_cygwin_shell', self.spec.get('msvs_cygwin_shell', 1))) != 0
[ "def", "IsRuleRunUnderCygwin", "(", "self", ",", "rule", ")", ":", "return", "int", "(", "rule", ".", "get", "(", "'msvs_cygwin_shell'", ",", "self", ".", "spec", ".", "get", "(", "'msvs_cygwin_shell'", ",", "1", ")", ")", ")", "!=", "0" ]
[ 810, 2 ]
[ 814, 68 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._HasExplicitRuleForExtension
(self, spec, extension)
Determine if there's an explicit rule for a particular extension.
Determine if there's an explicit rule for a particular extension.
def _HasExplicitRuleForExtension(self, spec, extension): """Determine if there's an explicit rule for a particular extension.""" for rule in spec.get('rules', []): if rule['extension'] == extension: return True return False
[ "def", "_HasExplicitRuleForExtension", "(", "self", ",", "spec", ",", "extension", ")", ":", "for", "rule", "in", "spec", ".", "get", "(", "'rules'", ",", "[", "]", ")", ":", "if", "rule", "[", "'extension'", "]", "==", "extension", ":", "return", "True", "return", "False" ]
[ 816, 2 ]
[ 821, 16 ]
python
en
['en', 'en', 'en']
True
MsvsSettings._HasExplicitIdlActions
(self, spec)
Determine if an action should not run midl for .idl files.
Determine if an action should not run midl for .idl files.
def _HasExplicitIdlActions(self, spec): """Determine if an action should not run midl for .idl files.""" return any([action.get('explicit_idl_action', 0) for action in spec.get('actions', [])])
[ "def", "_HasExplicitIdlActions", "(", "self", ",", "spec", ")", ":", "return", "any", "(", "[", "action", ".", "get", "(", "'explicit_idl_action'", ",", "0", ")", "for", "action", "in", "spec", ".", "get", "(", "'actions'", ",", "[", "]", ")", "]", ")" ]
[ 823, 2 ]
[ 826, 55 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.HasExplicitIdlRulesOrActions
(self, spec)
Determine if there's an explicit rule or action for idl files. When there isn't we need to generate implicit rules to build MIDL .idl files.
Determine if there's an explicit rule or action for idl files. When there isn't we need to generate implicit rules to build MIDL .idl files.
def HasExplicitIdlRulesOrActions(self, spec): """Determine if there's an explicit rule or action for idl files. When there isn't we need to generate implicit rules to build MIDL .idl files.""" return (self._HasExplicitRuleForExtension(spec, 'idl') or self._HasExplicitIdlActions(spec))
[ "def", "HasExplicitIdlRulesOrActions", "(", "self", ",", "spec", ")", ":", "return", "(", "self", ".", "_HasExplicitRuleForExtension", "(", "spec", ",", "'idl'", ")", "or", "self", ".", "_HasExplicitIdlActions", "(", "spec", ")", ")" ]
[ 828, 2 ]
[ 832, 46 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.HasExplicitAsmRules
(self, spec)
Determine if there's an explicit rule for asm files. When there isn't we need to generate implicit rules to assemble .asm files.
Determine if there's an explicit rule for asm files. When there isn't we need to generate implicit rules to assemble .asm files.
def HasExplicitAsmRules(self, spec): """Determine if there's an explicit rule for asm files. When there isn't we need to generate implicit rules to assemble .asm files.""" return self._HasExplicitRuleForExtension(spec, 'asm')
[ "def", "HasExplicitAsmRules", "(", "self", ",", "spec", ")", ":", "return", "self", ".", "_HasExplicitRuleForExtension", "(", "spec", ",", "'asm'", ")" ]
[ 834, 2 ]
[ 837, 57 ]
python
en
['en', 'en', 'en']
True
MsvsSettings.GetIdlBuildData
(self, source, config)
Determine the implicit outputs for an idl file. Returns output directory, outputs, and variables and flags that are required.
Determine the implicit outputs for an idl file. Returns output directory, outputs, and variables and flags that are required.
def GetIdlBuildData(self, source, config): """Determine the implicit outputs for an idl file. Returns output directory, outputs, and variables and flags that are required.""" config = self._TargetConfig(config) midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool') def midl(name, default=None): return self.ConvertVSMacros(midl_get(name, default=default), config=config) tlb = midl('TypeLibraryName', default='${root}.tlb') header = midl('HeaderFileName', default='${root}.h') dlldata = midl('DLLDataFileName', default='dlldata.c') iid = midl('InterfaceIdentifierFileName', default='${root}_i.c') proxy = midl('ProxyFileName', default='${root}_p.c') # Note that .tlb is not included in the outputs as it is not always # generated depending on the content of the input idl file. outdir = midl('OutputDirectory', default='') output = [header, dlldata, iid, proxy] variables = [('tlb', tlb), ('h', header), ('dlldata', dlldata), ('iid', iid), ('proxy', proxy)] # TODO(scottmg): Are there configuration settings to set these flags? target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64' flags = ['/char', 'signed', '/env', target_platform, '/Oicf'] return outdir, output, variables, flags
[ "def", "GetIdlBuildData", "(", "self", ",", "source", ",", "config", ")", ":", "config", "=", "self", ".", "_TargetConfig", "(", "config", ")", "midl_get", "=", "self", ".", "_GetWrapper", "(", "self", ",", "self", ".", "msvs_settings", "[", "config", "]", ",", "'VCMIDLTool'", ")", "def", "midl", "(", "name", ",", "default", "=", "None", ")", ":", "return", "self", ".", "ConvertVSMacros", "(", "midl_get", "(", "name", ",", "default", "=", "default", ")", ",", "config", "=", "config", ")", "tlb", "=", "midl", "(", "'TypeLibraryName'", ",", "default", "=", "'${root}.tlb'", ")", "header", "=", "midl", "(", "'HeaderFileName'", ",", "default", "=", "'${root}.h'", ")", "dlldata", "=", "midl", "(", "'DLLDataFileName'", ",", "default", "=", "'dlldata.c'", ")", "iid", "=", "midl", "(", "'InterfaceIdentifierFileName'", ",", "default", "=", "'${root}_i.c'", ")", "proxy", "=", "midl", "(", "'ProxyFileName'", ",", "default", "=", "'${root}_p.c'", ")", "# Note that .tlb is not included in the outputs as it is not always", "# generated depending on the content of the input idl file.", "outdir", "=", "midl", "(", "'OutputDirectory'", ",", "default", "=", "''", ")", "output", "=", "[", "header", ",", "dlldata", ",", "iid", ",", "proxy", "]", "variables", "=", "[", "(", "'tlb'", ",", "tlb", ")", ",", "(", "'h'", ",", "header", ")", ",", "(", "'dlldata'", ",", "dlldata", ")", ",", "(", "'iid'", ",", "iid", ")", ",", "(", "'proxy'", ",", "proxy", ")", "]", "# TODO(scottmg): Are there configuration settings to set these flags?", "target_platform", "=", "'win32'", "if", "self", ".", "GetArch", "(", "config", ")", "==", "'x86'", "else", "'x64'", "flags", "=", "[", "'/char'", ",", "'signed'", ",", "'/env'", ",", "target_platform", ",", "'/Oicf'", "]", "return", "outdir", ",", "output", ",", "variables", ",", "flags" ]
[ 839, 2 ]
[ 864, 43 ]
python
en
['en', 'en', 'en']
True
PrecompiledHeader._PchHeader
(self)
Get the header that will appear in an #include line for all source files.
Get the header that will appear in an #include line for all source files.
def _PchHeader(self): """Get the header that will appear in an #include line for all source files.""" return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
[ "def", "_PchHeader", "(", "self", ")", ":", "return", "os", ".", "path", ".", "split", "(", "self", ".", "settings", ".", "msvs_precompiled_header", "[", "self", ".", "config", "]", ")", "[", "1", "]" ]
[ 887, 2 ]
[ 890, 79 ]
python
en
['en', 'en', 'en']
True
PrecompiledHeader.GetObjDependencies
(self, sources, objs, arch)
Given a list of sources files and the corresponding object files, returns a list of the pch files that should be depended upon. The additional wrapping in the return value is for interface compatibility with make.py on Mac, and xcode_emulation.py.
Given a list of sources files and the corresponding object files, returns a list of the pch files that should be depended upon. The additional wrapping in the return value is for interface compatibility with make.py on Mac, and xcode_emulation.py.
def GetObjDependencies(self, sources, objs, arch): """Given a list of sources files and the corresponding object files, returns a list of the pch files that should be depended upon. The additional wrapping in the return value is for interface compatibility with make.py on Mac, and xcode_emulation.py.""" assert arch is None if not self._PchHeader(): return [] pch_ext = os.path.splitext(self.pch_source)[1] for source in sources: if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext): return [(None, None, self.output_obj)] return []
[ "def", "GetObjDependencies", "(", "self", ",", "sources", ",", "objs", ",", "arch", ")", ":", "assert", "arch", "is", "None", "if", "not", "self", ".", "_PchHeader", "(", ")", ":", "return", "[", "]", "pch_ext", "=", "os", ".", "path", ".", "splitext", "(", "self", ".", "pch_source", ")", "[", "1", "]", "for", "source", "in", "sources", ":", "if", "_LanguageMatchesForPch", "(", "os", ".", "path", ".", "splitext", "(", "source", ")", "[", "1", "]", ",", "pch_ext", ")", ":", "return", "[", "(", "None", ",", "None", ",", "self", ".", "output_obj", ")", "]", "return", "[", "]" ]
[ 892, 2 ]
[ 904, 13 ]
python
en
['en', 'en', 'en']
True
PrecompiledHeader.GetPchBuildCommands
(self, arch)
Not used on Windows as there are no additional build steps required (instead, existing steps are modified in GetFlagsModifications below).
Not used on Windows as there are no additional build steps required (instead, existing steps are modified in GetFlagsModifications below).
def GetPchBuildCommands(self, arch): """Not used on Windows as there are no additional build steps required (instead, existing steps are modified in GetFlagsModifications below).""" return []
[ "def", "GetPchBuildCommands", "(", "self", ",", "arch", ")", ":", "return", "[", "]" ]
[ 906, 2 ]
[ 909, 13 ]
python
en
['en', 'en', 'en']
True
PrecompiledHeader.GetFlagsModifications
(self, input, output, implicit, command, cflags_c, cflags_cc, expand_special)
Get the modified cflags and implicit dependencies that should be used for the pch compilation step.
Get the modified cflags and implicit dependencies that should be used for the pch compilation step.
def GetFlagsModifications(self, input, output, implicit, command, cflags_c, cflags_cc, expand_special): """Get the modified cflags and implicit dependencies that should be used for the pch compilation step.""" if input == self.pch_source: pch_output = ['/Yc' + self._PchHeader()] if command == 'cxx': return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))], self.output_obj, []) elif command == 'cc': return ([('cflags_c', map(expand_special, cflags_c + pch_output))], self.output_obj, []) return [], output, implicit
[ "def", "GetFlagsModifications", "(", "self", ",", "input", ",", "output", ",", "implicit", ",", "command", ",", "cflags_c", ",", "cflags_cc", ",", "expand_special", ")", ":", "if", "input", "==", "self", ".", "pch_source", ":", "pch_output", "=", "[", "'/Yc'", "+", "self", ".", "_PchHeader", "(", ")", "]", "if", "command", "==", "'cxx'", ":", "return", "(", "[", "(", "'cflags_cc'", ",", "map", "(", "expand_special", ",", "cflags_cc", "+", "pch_output", ")", ")", "]", ",", "self", ".", "output_obj", ",", "[", "]", ")", "elif", "command", "==", "'cc'", ":", "return", "(", "[", "(", "'cflags_c'", ",", "map", "(", "expand_special", ",", "cflags_c", "+", "pch_output", ")", ")", "]", ",", "self", ".", "output_obj", ",", "[", "]", ")", "return", "[", "]", ",", "output", ",", "implicit" ]
[ 911, 2 ]
[ 923, 31 ]
python
en
['en', 'en', 'en']
True
HttpLexer.get_tokens_unprocessed
(self, text, stack=('root',))
Reset the content-type state.
Reset the content-type state.
def get_tokens_unprocessed(self, text, stack=('root',)): """Reset the content-type state.""" self.content_type = None return RegexLexer.get_tokens_unprocessed(self, text, stack)
[ "def", "get_tokens_unprocessed", "(", "self", ",", "text", ",", "stack", "=", "(", "'root'", ",", ")", ")", ":", "self", ".", "content_type", "=", "None", "return", "RegexLexer", ".", "get_tokens_unprocessed", "(", "self", ",", "text", ",", "stack", ")" ]
[ 124, 4 ]
[ 127, 67 ]
python
en
['en', 'en', 'en']
True
test_suite_edit_without_suite_name_raises_error
( mock_emit, monkeypatch, empty_data_context_stats_enabled, )
This is really only testing click missing arguments
This is really only testing click missing arguments
def test_suite_edit_without_suite_name_raises_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke(cli, "--v3-api suite edit", catch_exceptions=False) assert result.exit_code == 2 assert ( 'Error: Missing argument "EXPECTATION_SUITE".' in result.stderr or "Error: Missing argument 'EXPECTATION_SUITE'." in result.stderr ) assert mock_emit.call_count == 2 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), ]
[ "def", "test_suite_edit_without_suite_name_raises_error", "(", "mock_emit", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "\"--v3-api suite edit\"", ",", "catch_exceptions", "=", "False", ")", "assert", "result", ".", "exit_code", "==", "2", "assert", "(", "'Error: Missing argument \"EXPECTATION_SUITE\".'", "in", "result", ".", "stderr", "or", "\"Error: Missing argument 'EXPECTATION_SUITE'.\"", "in", "result", ".", "stderr", ")", "assert", "mock_emit", ".", "call_count", "==", "2", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]" ]
[ 786, 0 ]
[ 816, 5 ]
python
en
['en', 'en', 'en']
True
test_suite_edit_datasource_and_batch_request_error
( mock_emit, monkeypatch, empty_data_context_stats_enabled, )
This is really only testing click missing arguments
This is really only testing click missing arguments
def test_suite_edit_datasource_and_batch_request_error( mock_emit, monkeypatch, empty_data_context_stats_enabled, ): """This is really only testing click missing arguments""" context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ]
[ "def", "test_suite_edit_datasource_and_batch_request_error", "(", "mock_emit", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "# noinspection PyUnusedLocal", "suite", ":", "ExpectationSuite", "=", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "(", "context", ".", "list_expectation_suites", "(", ")", "[", "0", "]", ".", "expectation_suite_name", "==", "expectation_suite_name", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api suite edit {expectation_suite_name} --datasource-name some_datasource_name --batch-request some_file.json --interactive\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "1", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "(", "\"Only one of --datasource-name DATASOURCE_NAME and --batch-request <path to JSON file> options can be used.\"", "in", "stdout", ")", "assert", "mock_emit", ".", "call_count", "==", "3", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]" ]
[ 822, 0 ]
[ 875, 5 ]
python
en
['en', 'en', 'en']
True
test_suite_edit_with_non_existent_suite_name_raises_error
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, )
The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter
The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter
def test_suite_edit_with_non_existent_suite_name_raises_error( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled assert not context.list_expectation_suites() monkeypatch.chdir(os.path.dirname(context.root_directory)) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit not_a_real_suite", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert "Could not find a suite named `not_a_real_suite`." in stdout assert "by running `great_expectations suite list`" in stdout assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_with_non_existent_suite_name_raises_error", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "assert", "not", "context", ".", "list_expectation_suites", "(", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api suite edit not_a_real_suite\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "1", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"Could not find a suite named `not_a_real_suite`.\"", "in", "stdout", "assert", "\"by running `great_expectations suite list`\"", "in", "stdout", "assert", "mock_subprocess", ".", "call_count", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "3", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 883, 0 ]
[ 943, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_edit_with_non_existent_datasource_shows_helpful_error_message
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, )
The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter
The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter
def test_suite_edit_with_non_existent_datasource_shows_helpful_error_message( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ The command should: - exit with a clear error message - NOT open Data Docs - NOT open jupyter """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" # noinspection PyUnusedLocal suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, f"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.stdout assert ( "Unable to load datasource `not_real` -- no configuration found or invalid configuration." in stdout ) assert mock_subprocess.call_count == 0 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_with_non_existent_datasource_shows_helpful_error_message", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "# noinspection PyUnusedLocal", "suite", ":", "ExpectationSuite", "=", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "(", "context", ".", "list_expectation_suites", "(", ")", "[", "0", "]", ".", "expectation_suite_name", "==", "expectation_suite_name", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "f\"--v3-api suite edit {expectation_suite_name} --interactive --datasource-name not_real\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "1", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "(", "\"Unable to load datasource `not_real` -- no configuration found or invalid configuration.\"", "in", "stdout", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "3", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 951, 0 ]
[ 1021, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter
Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch.
def test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_multiple_datasources_with_no_additional_args_without_citations_runs_notebook_opens_jupyter", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1911\"", ",", "\"limit\"", ":", "1000", ",", "}", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"2\\n1\\n1\\n\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "mock_webbrowser", ".", "reset_mock", "(", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "mock_subprocess", ".", "reset_mock", "(", ")", "# remove the citations from the suite", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "isinstance", "(", "suite", ",", "ExpectationSuite", ")", "suite", ".", "meta", ".", "pop", "(", "\"citations\"", ",", "None", ")", "context", ".", "save_expectation_suite", "(", "expectation_suite", "=", "suite", ")", "# Actual testing really starts here", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"edit\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "]", ",", "input", "=", "\"2\\n1\\n1\\n\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "assert", "mock_subprocess", ".", "call_count", "==", "1", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "10", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", ",", "\"api_version\"", ":", "\"v3\"", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 1029, 0 ]
[ 1249, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter
Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch.
def test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup. We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="2\n1\n1\n\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_multiple_datasources_with_no_additional_args_with_citations_runs_notebook_opens_jupyter", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_pandas_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1911\"", ",", "\"limit\"", ":", "1000", ",", "}", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"2\\n1\\n1\\n\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "mock_webbrowser", ".", "reset_mock", "(", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "mock_subprocess", ".", "reset_mock", "(", ")", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "isinstance", "(", "suite", ",", "ExpectationSuite", ")", "# Actual testing really starts here", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"edit\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "]", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "not", "in", "stdout", "assert", "\"Select a datasource\"", "not", "in", "stdout", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "assert", "mock_subprocess", ".", "call_count", "==", "1", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "8", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", ",", "\"api_version\"", ":", "\"v3\"", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 1257, 0 ]
[ 1451, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter
Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch.
def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command helps the user to specify batch_request when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line arguments. This means that the command will help us specify batch_request interactively. The data context has two datasources -- we choose one of them. After that, we select a data connector and, finally, select a data asset from the list. The command should: - NOT open Data Docs - open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() # remove the citations from the suite context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) suite.meta.pop("citations", None) context.save_expectation_suite(expectation_suite=suite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 10 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_without_citations_runs_notebook_opens_jupyter", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_sqlite_db_datasource\"", ",", "\"data_connector_name\"", ":", "\"whole_table\"", ",", "\"data_asset_name\"", ":", "\"titanic\"", ",", "\"limit\"", ":", "1000", ",", "}", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"3\\n2\\ny\\n2\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "mock_webbrowser", ".", "reset_mock", "(", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "mock_subprocess", ".", "reset_mock", "(", ")", "# remove the citations from the suite", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "isinstance", "(", "suite", ",", "ExpectationSuite", ")", "suite", ".", "meta", ".", "pop", "(", "\"citations\"", ",", "None", ")", "context", ".", "save_expectation_suite", "(", "expectation_suite", "=", "suite", ")", "# Actual testing really starts here", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"edit\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "]", ",", "input", "=", "\"3\\n2\\ny\\n2\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "assert", "mock_subprocess", ".", "call_count", "==", "1", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "10", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", ",", "\"api_version\"", ":", "\"v3\"", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 1459, 0 ]
[ 1679, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter
( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, )
Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter
Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch.
def test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter( mock_webbrowser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ Here we verify that the "suite edit" command uses the batch_request found in citations in the existing suite when it is called without the optional command-line arguments that specify the batch. First, we call the "suite new" command to create the expectation suite our test will edit -- this step is a just a setup (we use an SQL datasource for this test). We then call the "suite edit" command without any optional command-line-arguments. The command should: - NOT open Data Docs - NOT open jupyter """ context: DataContext = titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_sqlite_db_datasource", "data_connector_name": "whole_table", "data_asset_name": "titanic", "limit": 1000, } batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--no-jupyter", ], input="3\n2\ny\n2\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "A batch of data is required to edit the suite" in stdout assert "Select a datasource" in stdout assert mock_webbrowser.call_count == 0 mock_webbrowser.reset_mock() assert mock_subprocess.call_count == 0 mock_subprocess.reset_mock() context = DataContext(context_root_dir=project_dir) suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert isinstance(suite, ExpectationSuite) # Actual testing really starts here runner = CliRunner(mix_stderr=False) monkeypatch.chdir(os.path.dirname(context.root_directory)) result = runner.invoke( cli, [ "--v3-api", "suite", "edit", f"{expectation_suite_name}", "--interactive", ], catch_exceptions=False, ) assert result.exit_code == 0 stdout = result.stdout assert "A batch of data is required to edit the suite" not in stdout assert "Select a datasource" not in stdout expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) assert mock_subprocess.call_count == 1 assert mock_webbrowser.call_count == 0 assert mock_emit.call_count == 8 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.edit.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.edit.end", "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe", "api_version": "v3", }, "success": True, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_edit_multiple_datasources_with_sql_with_no_additional_args_with_citations_runs_notebook_opens_jupyter", "(", "mock_webbrowser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_v013_multi_datasource_multi_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_sqlite_db_datasource\"", ",", "\"data_connector_name\"", ":", "\"whole_table\"", ",", "\"data_asset_name\"", ":", "\"titanic\"", ",", "\"limit\"", ":", "1000", ",", "}", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"3\\n2\\ny\\n2\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "in", "stdout", "assert", "\"Select a datasource\"", "in", "stdout", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "mock_webbrowser", ".", "reset_mock", "(", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "mock_subprocess", ".", "reset_mock", "(", ")", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "isinstance", "(", "suite", ",", "ExpectationSuite", ")", "# Actual testing really starts here", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"edit\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "]", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", "=", "result", ".", "stdout", "assert", "\"A batch of data is required to edit the suite\"", "not", "in", "stdout", "assert", "\"Select a datasource\"", "not", "in", "stdout", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "assert", "mock_subprocess", ".", "call_count", "==", "1", "assert", "mock_webbrowser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "8", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.edit.end\"", ",", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", ",", "\"api_version\"", ":", "\"v3\"", ",", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 1687, 0 ]
[ 1881, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_new_profile_on_context_with_no_datasource_raises_error
( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, )
We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message
We call the "suite new --profile" command on a context with no datasource
def test_suite_new_profile_on_context_with_no_datasource_raises_error( mock_subprocess, mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled, ): """ We call the "suite new --profile" command on a context with no datasource The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) expectation_suite_name: str = "test_suite_name" runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--interactive", "--profile", "--expectation-suite", f"{expectation_suite_name}", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( "No datasources found in the context. To add a datasource, run `great_expectations datasource new`" in stdout ) assert mock_subprocess.call_count == 0 assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_new_profile_on_context_with_no_datasource_raises_error", "(", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--interactive\"", ",", "\"--profile\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "]", ",", "input", "=", "\"\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "1", "stdout", ":", "str", "=", "result", ".", "output", "assert", "(", "\"No datasources found in the context. To add a datasource, run `great_expectations datasource new`\"", "in", "stdout", ")", "assert", "mock_subprocess", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "3", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 2502, 0 ]
[ 2571, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_new_profile_on_existing_suite_raises_error
( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled )
We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message
We call the "suite new --profile" command with an existing suite
def test_suite_new_profile_on_existing_suite_raises_error( mock_emit, caplog, monkeypatch, empty_data_context_stats_enabled ): """ We call the "suite new --profile" command with an existing suite The command should: - exit with a clear error message - send a DataContext init success message - send a new fail message """ context: DataContext = empty_data_context_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" suite: ExpectationSuite = context.create_expectation_suite( expectation_suite_name=expectation_suite_name ) context.save_expectation_suite(expectation_suite=suite) assert ( context.list_expectation_suites()[0].expectation_suite_name == expectation_suite_name ) batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 1 stdout: str = result.output assert ( f"An expectation suite named `{expectation_suite_name}` already exists." in stdout ) assert ( f"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`." in stdout ) assert mock_emit.call_count == 3 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": False, } ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_new_profile_on_existing_suite_raises_error", "(", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "empty_data_context_stats_enabled", ")", ":", "context", ":", "DataContext", "=", "empty_data_context_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "suite", ":", "ExpectationSuite", "=", "context", ".", "create_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "context", ".", "save_expectation_suite", "(", "expectation_suite", "=", "suite", ")", "assert", "(", "context", ".", "list_expectation_suites", "(", ")", "[", "0", "]", ".", "expectation_suite_name", "==", "expectation_suite_name", ")", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1911\"", ",", "}", "batch_request_file_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"batch_request.json\"", ")", "with", "open", "(", "batch_request_file_path", ",", "\"w\"", ")", "as", "json_file", ":", "json", ".", "dump", "(", "batch_request", ",", "json_file", ")", "mock_emit", ".", "reset_mock", "(", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--batch-request\"", ",", "f\"{batch_request_file_path}\"", ",", "\"--profile\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "1", "stdout", ":", "str", "=", "result", ".", "output", "assert", "(", "f\"An expectation suite named `{expectation_suite_name}` already exists.\"", "in", "stdout", ")", "assert", "(", "f\"If you intend to edit the suite please use `great_expectations suite edit {expectation_suite_name}`.\"", "in", "stdout", ")", "assert", "mock_emit", ".", "call_count", "==", "3", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "False", ",", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 2577, 0 ]
[ 2671, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_new_profile_runs_notebook_no_jupyter
( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, )
We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message
We call the "suite new --profile" command
def test_suite_new_profile_runs_notebook_no_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", "--no-jupyter", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert ( "Opening a notebook for you now to edit your expectation suite!" not in stdout ) assert ( "If you wish to avoid this you can add the `--no-jupyter` flag." not in stdout ) expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 0 assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_new_profile_runs_notebook_no_jupyter", "(", "mock_webbroser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1911\"", ",", "}", "batch_request_file_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"batch_request.json\"", ")", "with", "open", "(", "batch_request_file_path", ",", "\"w\"", ")", "as", "json_file", ":", "json", ".", "dump", "(", "batch_request", ",", "json_file", ")", "mock_emit", ".", "reset_mock", "(", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--batch-request\"", ",", "f\"{batch_request_file_path}\"", ",", "\"--profile\"", ",", "\"--no-jupyter\"", ",", "]", ",", "input", "=", "\"\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"Select a datasource\"", "not", "in", "stdout", "assert", "(", "\"Opening a notebook for you now to edit your expectation suite!\"", "not", "in", "stdout", ")", "assert", "(", "\"If you wish to avoid this you can add the `--no-jupyter` flag.\"", "not", "in", "stdout", ")", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "profiler_code_cell", ":", "str", "=", "f\"\"\"\\\nprofiler = UserConfigurableProfiler(\n profile_dataset=validator,\n excluded_expectations=None,\n ignored_columns=ignored_columns,\n not_null_only=False,\n primary_or_compound_key=False,\n semantic_types_dict=None,\n table_expectations_only=False,\n value_set_threshold=\"MANY\",\n)\nsuite = profiler.build_suite()\"\"\"", "profiler_code_cell", "=", "lint_code", "(", "code", "=", "profiler_code_cell", ")", ".", "rstrip", "(", "\"\\n\"", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "profiler_code_cell", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "assert", "expectation_suite_name", "in", "context", ".", "list_expectation_suite_names", "(", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "suite", ".", "expectations", "==", "[", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_columns_to_match_ordered_list\"", ",", "\"kwargs\"", ":", "{", "\"column_list\"", ":", "[", "\"Unnamed: 0\"", ",", "\"Name\"", ",", "\"PClass\"", ",", "\"Age\"", ",", "\"Sex\"", ",", "\"Survived\"", ",", "\"SexCode\"", ",", "]", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_row_count_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"max_value\"", ":", "1313", ",", "\"min_value\"", ":", "1313", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "]", "assert", "mock_subprocess", ".", "call_count", "==", "0", "assert", "mock_webbroser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "5", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 2679, 0 ]
[ 2880, 5 ]
python
en
['en', 'error', 'th']
False
test_suite_new_profile_runs_notebook_opens_jupyter
( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, )
We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message
We call the "suite new --profile" command
def test_suite_new_profile_runs_notebook_opens_jupyter( mock_webbroser, mock_subprocess, mock_emit, caplog, monkeypatch, titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled, ): """ We call the "suite new --profile" command The command should: - create a new notebook - open the notebook in jupyter - send a DataContext init success message - send a new success message """ context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled monkeypatch.chdir(os.path.dirname(context.root_directory)) project_dir: str = context.root_directory uncommitted_dir: str = os.path.join(project_dir, "uncommitted") expectation_suite_name: str = "test_suite_name" batch_request: dict = { "datasource_name": "my_datasource", "data_connector_name": "my_basic_data_connector", "data_asset_name": "Titanic_1911", } batch_request_file_path: str = os.path.join(uncommitted_dir, f"batch_request.json") with open(batch_request_file_path, "w") as json_file: json.dump(batch_request, json_file) mock_emit.reset_mock() runner: CliRunner = CliRunner(mix_stderr=False) result: Result = runner.invoke( cli, [ "--v3-api", "suite", "new", "--expectation-suite", f"{expectation_suite_name}", "--interactive", "--batch-request", f"{batch_request_file_path}", "--profile", ], input="\n", catch_exceptions=False, ) assert result.exit_code == 0 stdout: str = result.stdout assert "Select a datasource" not in stdout assert "Opening a notebook for you now to edit your expectation suite!" in stdout assert "If you wish to avoid this you can add the `--no-jupyter` flag." in stdout expected_suite_path: str = os.path.join( project_dir, "expectations", f"{expectation_suite_name}.json" ) assert os.path.isfile(expected_suite_path) expected_notebook_path: str = os.path.join( uncommitted_dir, f"edit_{expectation_suite_name}.ipynb" ) assert os.path.isfile(expected_notebook_path) batch_request_string: str = ( str(BatchRequest(**batch_request)) .replace("{\n", "{\n ") .replace(",\n", ",\n ") .replace("\n}", ",\n}") ) batch_request_string = fr"batch_request = {batch_request_string}" cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=batch_request_string, ) assert len(cells_of_interest_dict) == 1 cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=suite_identifier)", ) assert not cells_of_interest_dict cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string="context.open_data_docs(resource_identifier=validation_result_identifier)", ) assert len(cells_of_interest_dict) == 1 profiler_code_cell: str = f"""\ profiler = UserConfigurableProfiler( profile_dataset=validator, excluded_expectations=None, ignored_columns=ignored_columns, not_null_only=False, primary_or_compound_key=False, semantic_types_dict=None, table_expectations_only=False, value_set_threshold="MANY", ) suite = profiler.build_suite()""" profiler_code_cell = lint_code(code=profiler_code_cell).rstrip("\n") cells_of_interest_dict: Dict[int, dict] = find_code_in_notebook( nb=load_notebook_from_path(notebook_path=expected_notebook_path), search_string=profiler_code_cell, ) assert len(cells_of_interest_dict) == 1 run_notebook( notebook_path=expected_notebook_path, notebook_dir=uncommitted_dir, string_to_be_replaced="context.open_data_docs(resource_identifier=validation_result_identifier)", replacement_string="", ) context = DataContext(context_root_dir=project_dir) assert expectation_suite_name in context.list_expectation_suite_names() suite: ExpectationSuite = context.get_expectation_suite( expectation_suite_name=expectation_suite_name ) assert suite.expectations == [ ExpectationConfiguration( **{ "expectation_type": "expect_table_columns_to_match_ordered_list", "kwargs": { "column_list": [ "Unnamed: 0", "Name", "PClass", "Age", "Sex", "Survived", "SexCode", ] }, "meta": {}, } ), ExpectationConfiguration( **{ "expectation_type": "expect_table_row_count_to_be_between", "kwargs": {"max_value": 1313, "min_value": 1313}, "meta": {}, } ), ] assert mock_subprocess.call_count == 1 call_args: List[str] = mock_subprocess.call_args[0][0] assert call_args[0] == "jupyter" assert call_args[1] == "notebook" assert expected_notebook_path in call_args[2] assert mock_webbroser.call_count == 0 assert mock_emit.call_count == 5 assert mock_emit.call_args_list == [ mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), mock.call( { "event": "cli.suite.new.begin", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( { "event_payload": { "anonymized_expectation_suite_name": "9df638a13b727807e51b13ec1839bcbe" }, "event": "data_context.save_expectation_suite", "success": True, } ), mock.call( { "event": "cli.suite.new.end", "event_payload": {"api_version": "v3"}, "success": True, } ), mock.call( {"event_payload": {}, "event": "data_context.__init__", "success": True} ), ] assert_no_logging_messages_or_tracebacks( my_caplog=caplog, click_result=result, )
[ "def", "test_suite_new_profile_runs_notebook_opens_jupyter", "(", "mock_webbroser", ",", "mock_subprocess", ",", "mock_emit", ",", "caplog", ",", "monkeypatch", ",", "titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled", ",", ")", ":", "context", ":", "DataContext", "=", "titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled", "monkeypatch", ".", "chdir", "(", "os", ".", "path", ".", "dirname", "(", "context", ".", "root_directory", ")", ")", "project_dir", ":", "str", "=", "context", ".", "root_directory", "uncommitted_dir", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"uncommitted\"", ")", "expectation_suite_name", ":", "str", "=", "\"test_suite_name\"", "batch_request", ":", "dict", "=", "{", "\"datasource_name\"", ":", "\"my_datasource\"", ",", "\"data_connector_name\"", ":", "\"my_basic_data_connector\"", ",", "\"data_asset_name\"", ":", "\"Titanic_1911\"", ",", "}", "batch_request_file_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"batch_request.json\"", ")", "with", "open", "(", "batch_request_file_path", ",", "\"w\"", ")", "as", "json_file", ":", "json", ".", "dump", "(", "batch_request", ",", "json_file", ")", "mock_emit", ".", "reset_mock", "(", ")", "runner", ":", "CliRunner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "result", ":", "Result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"suite\"", ",", "\"new\"", ",", "\"--expectation-suite\"", ",", "f\"{expectation_suite_name}\"", ",", "\"--interactive\"", ",", "\"--batch-request\"", ",", "f\"{batch_request_file_path}\"", ",", "\"--profile\"", ",", "]", ",", "input", "=", "\"\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "stdout", ":", "str", "=", "result", ".", "stdout", "assert", "\"Select a datasource\"", "not", "in", "stdout", "assert", "\"Opening a notebook for you now to edit your expectation suite!\"", "in", "stdout", "assert", "\"If you wish to avoid this you can add the `--no-jupyter` flag.\"", "in", "stdout", "expected_suite_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_dir", ",", "\"expectations\"", ",", "f\"{expectation_suite_name}.json\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_suite_path", ")", "expected_notebook_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "uncommitted_dir", ",", "f\"edit_{expectation_suite_name}.ipynb\"", ")", "assert", "os", ".", "path", ".", "isfile", "(", "expected_notebook_path", ")", "batch_request_string", ":", "str", "=", "(", "str", "(", "BatchRequest", "(", "*", "*", "batch_request", ")", ")", ".", "replace", "(", "\"{\\n\"", ",", "\"{\\n \"", ")", ".", "replace", "(", "\",\\n\"", ",", "\",\\n \"", ")", ".", "replace", "(", "\"\\n}\"", ",", "\",\\n}\"", ")", ")", "batch_request_string", "=", "fr\"batch_request = {batch_request_string}\"", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "batch_request_string", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=suite_identifier)\"", ",", ")", "assert", "not", "cells_of_interest_dict", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "profiler_code_cell", ":", "str", "=", "f\"\"\"\\\nprofiler = UserConfigurableProfiler(\n profile_dataset=validator,\n excluded_expectations=None,\n ignored_columns=ignored_columns,\n not_null_only=False,\n primary_or_compound_key=False,\n semantic_types_dict=None,\n table_expectations_only=False,\n value_set_threshold=\"MANY\",\n)\nsuite = profiler.build_suite()\"\"\"", "profiler_code_cell", "=", "lint_code", "(", "code", "=", "profiler_code_cell", ")", ".", "rstrip", "(", "\"\\n\"", ")", "cells_of_interest_dict", ":", "Dict", "[", "int", ",", "dict", "]", "=", "find_code_in_notebook", "(", "nb", "=", "load_notebook_from_path", "(", "notebook_path", "=", "expected_notebook_path", ")", ",", "search_string", "=", "profiler_code_cell", ",", ")", "assert", "len", "(", "cells_of_interest_dict", ")", "==", "1", "run_notebook", "(", "notebook_path", "=", "expected_notebook_path", ",", "notebook_dir", "=", "uncommitted_dir", ",", "string_to_be_replaced", "=", "\"context.open_data_docs(resource_identifier=validation_result_identifier)\"", ",", "replacement_string", "=", "\"\"", ",", ")", "context", "=", "DataContext", "(", "context_root_dir", "=", "project_dir", ")", "assert", "expectation_suite_name", "in", "context", ".", "list_expectation_suite_names", "(", ")", "suite", ":", "ExpectationSuite", "=", "context", ".", "get_expectation_suite", "(", "expectation_suite_name", "=", "expectation_suite_name", ")", "assert", "suite", ".", "expectations", "==", "[", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_columns_to_match_ordered_list\"", ",", "\"kwargs\"", ":", "{", "\"column_list\"", ":", "[", "\"Unnamed: 0\"", ",", "\"Name\"", ",", "\"PClass\"", ",", "\"Age\"", ",", "\"Sex\"", ",", "\"Survived\"", ",", "\"SexCode\"", ",", "]", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "ExpectationConfiguration", "(", "*", "*", "{", "\"expectation_type\"", ":", "\"expect_table_row_count_to_be_between\"", ",", "\"kwargs\"", ":", "{", "\"max_value\"", ":", "1313", ",", "\"min_value\"", ":", "1313", "}", ",", "\"meta\"", ":", "{", "}", ",", "}", ")", ",", "]", "assert", "mock_subprocess", ".", "call_count", "==", "1", "call_args", ":", "List", "[", "str", "]", "=", "mock_subprocess", ".", "call_args", "[", "0", "]", "[", "0", "]", "assert", "call_args", "[", "0", "]", "==", "\"jupyter\"", "assert", "call_args", "[", "1", "]", "==", "\"notebook\"", "assert", "expected_notebook_path", "in", "call_args", "[", "2", "]", "assert", "mock_webbroser", ".", "call_count", "==", "0", "assert", "mock_emit", ".", "call_count", "==", "5", "assert", "mock_emit", ".", "call_args_list", "==", "[", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.begin\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "\"anonymized_expectation_suite_name\"", ":", "\"9df638a13b727807e51b13ec1839bcbe\"", "}", ",", "\"event\"", ":", "\"data_context.save_expectation_suite\"", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event\"", ":", "\"cli.suite.new.end\"", ",", "\"event_payload\"", ":", "{", "\"api_version\"", ":", "\"v3\"", "}", ",", "\"success\"", ":", "True", ",", "}", ")", ",", "mock", ".", "call", "(", "{", "\"event_payload\"", ":", "{", "}", ",", "\"event\"", ":", "\"data_context.__init__\"", ",", "\"success\"", ":", "True", "}", ")", ",", "]", "assert_no_logging_messages_or_tracebacks", "(", "my_caplog", "=", "caplog", ",", "click_result", "=", "result", ",", ")" ]
[ 2888, 0 ]
[ 3089, 5 ]
python
en
['en', 'error', 'th']
False
CIDRRange.__init__
(self, spec: str)
Initialize a CIDRRange from a spec, which can look like any of: 127.0.0.1 -- an exact IPv4 match ::1 -- an exact IPv6 match 192.168.0.0/16 -- an IPv4 range 2001:2000::/64 -- an IPv6 range If the prefix is not a valid IP address, or if the prefix length isn't a valid length for the class of IP address, the CIDRRange object will evaluate False, with information about the error in self.error. :param spec: string specifying the CIDR block in question
Initialize a CIDRRange from a spec, which can look like any of:
def __init__(self, spec: str) -> None: """ Initialize a CIDRRange from a spec, which can look like any of: 127.0.0.1 -- an exact IPv4 match ::1 -- an exact IPv6 match 192.168.0.0/16 -- an IPv4 range 2001:2000::/64 -- an IPv6 range If the prefix is not a valid IP address, or if the prefix length isn't a valid length for the class of IP address, the CIDRRange object will evaluate False, with information about the error in self.error. :param spec: string specifying the CIDR block in question """ self.error: Optional[str] = None self.address: Optional[str] = None self.prefix_len: Optional[int] = None prefix: Optional[str] = None pfx_len: Optional[int] = None addr: Optional[Union[IPv4Address, IPv6Address]] = None if '/' in spec: # CIDR range! Try to separate the address and its length. address, lenstr = spec.split('/', 1) try: pfx_len = int(lenstr) except ValueError: self.error = f"CIDR range {spec} has an invalid length, ignoring" return else: address = spec try: addr = ip_address(address) except ValueError: pass if addr is None: self.error = f"Invalid IP address {address}" return if pfx_len is None: pfx_len = addr.max_prefixlen elif pfx_len > addr.max_prefixlen: self.error = f"Invalid prefix length for IPv{addr.version} address {address}/{pfx_len}" return # Convert the parsed address to a string, so that any normalization # appropriate to the IP version can happen. self.address = str(addr) self.prefix_len = pfx_len
[ "def", "__init__", "(", "self", ",", "spec", ":", "str", ")", "->", "None", ":", "self", ".", "error", ":", "Optional", "[", "str", "]", "=", "None", "self", ".", "address", ":", "Optional", "[", "str", "]", "=", "None", "self", ".", "prefix_len", ":", "Optional", "[", "int", "]", "=", "None", "prefix", ":", "Optional", "[", "str", "]", "=", "None", "pfx_len", ":", "Optional", "[", "int", "]", "=", "None", "addr", ":", "Optional", "[", "Union", "[", "IPv4Address", ",", "IPv6Address", "]", "]", "=", "None", "if", "'/'", "in", "spec", ":", "# CIDR range! Try to separate the address and its length.", "address", ",", "lenstr", "=", "spec", ".", "split", "(", "'/'", ",", "1", ")", "try", ":", "pfx_len", "=", "int", "(", "lenstr", ")", "except", "ValueError", ":", "self", ".", "error", "=", "f\"CIDR range {spec} has an invalid length, ignoring\"", "return", "else", ":", "address", "=", "spec", "try", ":", "addr", "=", "ip_address", "(", "address", ")", "except", "ValueError", ":", "pass", "if", "addr", "is", "None", ":", "self", ".", "error", "=", "f\"Invalid IP address {address}\"", "return", "if", "pfx_len", "is", "None", ":", "pfx_len", "=", "addr", ".", "max_prefixlen", "elif", "pfx_len", ">", "addr", ".", "max_prefixlen", ":", "self", ".", "error", "=", "f\"Invalid prefix length for IPv{addr.version} address {address}/{pfx_len}\"", "return", "# Convert the parsed address to a string, so that any normalization", "# appropriate to the IP version can happen.", "self", ".", "address", "=", "str", "(", "addr", ")", "self", ".", "prefix_len", "=", "pfx_len" ]
[ 10, 4 ]
[ 65, 33 ]
python
en
['en', 'error', 'th']
False
CIDRRange.__bool__
(self)
A CIDRRange will evaluate as True IFF there is no error, the address is not None, and the prefix_len is not None.
A CIDRRange will evaluate as True IFF there is no error, the address is not None, and the prefix_len is not None.
def __bool__(self) -> bool: """ A CIDRRange will evaluate as True IFF there is no error, the address is not None, and the prefix_len is not None. """ return ((not self.error) and (self.address is not None) and (self.prefix_len is not None))
[ "def", "__bool__", "(", "self", ")", "->", "bool", ":", "return", "(", "(", "not", "self", ".", "error", ")", "and", "(", "self", ".", "address", "is", "not", "None", ")", "and", "(", "self", ".", "prefix_len", "is", "not", "None", ")", ")" ]
[ 67, 4 ]
[ 75, 46 ]
python
en
['en', 'error', 'th']
False
CIDRRange.as_dict
(self)
Return a dictionary version of a CIDRRange, suitable for use in an Envoy config as an envoy.api.v2.core.CidrRange.
Return a dictionary version of a CIDRRange, suitable for use in an Envoy config as an envoy.api.v2.core.CidrRange.
def as_dict(self) -> dict: """ Return a dictionary version of a CIDRRange, suitable for use in an Envoy config as an envoy.api.v2.core.CidrRange. """ return { "address_prefix": self.address, "prefix_len": self.prefix_len }
[ "def", "as_dict", "(", "self", ")", "->", "dict", ":", "return", "{", "\"address_prefix\"", ":", "self", ".", "address", ",", "\"prefix_len\"", ":", "self", ".", "prefix_len", "}" ]
[ 83, 4 ]
[ 92, 9 ]
python
en
['en', 'error', 'th']
False
test_parser_timing
()
We currently reuse the parser, clearing the stack between calls, which is about 10 times faster than not doing so. But these operations are really quick, so this may not be necessary.
We currently reuse the parser, clearing the stack between calls, which is about 10 times faster than not doing so. But these operations are really quick, so this may not be necessary.
def test_parser_timing(): """We currently reuse the parser, clearing the stack between calls, which is about 10 times faster than not doing so. But these operations are really quick, so this may not be necessary.""" assert ( timeit( "parse_evaluation_parameter('x', {'x': 1})", setup="from great_expectations.core.evaluation_parameters import parse_evaluation_parameter", number=100, ) < 1 )
[ "def", "test_parser_timing", "(", ")", ":", "assert", "(", "timeit", "(", "\"parse_evaluation_parameter('x', {'x': 1})\"", ",", "setup", "=", "\"from great_expectations.core.evaluation_parameters import parse_evaluation_parameter\"", ",", "number", "=", "100", ",", ")", "<", "1", ")" ]
[ 114, 0 ]
[ 124, 5 ]
python
en
['en', 'en', 'en']
True
cli
(ctx, v2_api, verbose, config_file_location, assume_yes)
Welcome to the great_expectations CLI! Most commands follow this format: great_expectations <NOUN> <VERB> The nouns are: checkpoint, datasource, docs, init, project, store, suite, validation-operator. Most nouns accept the following verbs: new, list, edit
Welcome to the great_expectations CLI!
def cli(ctx, v2_api, verbose, config_file_location, assume_yes): """ Welcome to the great_expectations CLI! Most commands follow this format: great_expectations <NOUN> <VERB> The nouns are: checkpoint, datasource, docs, init, project, store, suite, validation-operator. Most nouns accept the following verbs: new, list, edit """ logger = _set_up_logger() if verbose: # Note we are explicitly not using a logger in all CLI output to have # more control over console UI. logger.setLevel(logging.DEBUG) ctx.obj = CLIState( v2_api=v2_api, config_file_location=config_file_location, assume_yes=assume_yes ) if v2_api: cli_message("Using v2 (Batch Kwargs) API") else: cli_message("Using v3 (Batch Request) API")
[ "def", "cli", "(", "ctx", ",", "v2_api", ",", "verbose", ",", "config_file_location", ",", "assume_yes", ")", ":", "logger", "=", "_set_up_logger", "(", ")", "if", "verbose", ":", "# Note we are explicitly not using a logger in all CLI output to have", "# more control over console UI.", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "ctx", ".", "obj", "=", "CLIState", "(", "v2_api", "=", "v2_api", ",", "config_file_location", "=", "config_file_location", ",", "assume_yes", "=", "assume_yes", ")", "if", "v2_api", ":", "cli_message", "(", "\"Using v2 (Batch Kwargs) API\"", ")", "else", ":", "cli_message", "(", "\"Using v3 (Batch Request) API\"", ")" ]
[ 135, 0 ]
[ 156, 51 ]
python
en
['en', 'error', 'th']
False
CLI.is_v2_api
(ctx)
Determine if v2 api is requested by searching context params.
Determine if v2 api is requested by searching context params.
def is_v2_api(ctx): """Determine if v2 api is requested by searching context params.""" if ctx.params: return ctx.params and "v2_api" in ctx.params.keys() and ctx.params["v2_api"] root_ctx_params = ctx.find_root().params return ( root_ctx_params and "v2_api" in root_ctx_params.keys() and root_ctx_params["v2_api"] )
[ "def", "is_v2_api", "(", "ctx", ")", ":", "if", "ctx", ".", "params", ":", "return", "ctx", ".", "params", "and", "\"v2_api\"", "in", "ctx", ".", "params", ".", "keys", "(", ")", "and", "ctx", ".", "params", "[", "\"v2_api\"", "]", "root_ctx_params", "=", "ctx", ".", "find_root", "(", ")", ".", "params", "return", "(", "root_ctx_params", "and", "\"v2_api\"", "in", "root_ctx_params", ".", "keys", "(", ")", "and", "root_ctx_params", "[", "\"v2_api\"", "]", ")" ]
[ 90, 4 ]
[ 100, 9 ]
python
en
['en', 'en', 'en']
True
FontManager.get_char_size
(self)
Get the character size.
Get the character size.
def get_char_size(self): """ Get the character size. """ return self.fonts['NORMAL'].getsize('M')
[ "def", "get_char_size", "(", "self", ")", ":", "return", "self", ".", "fonts", "[", "'NORMAL'", "]", ".", "getsize", "(", "'M'", ")" ]
[ 155, 4 ]
[ 159, 48 ]
python
en
['en', 'error', 'th']
False
FontManager.get_font
(self, bold, oblique)
Get the font based on bold and italic flags.
Get the font based on bold and italic flags.
def get_font(self, bold, oblique): """ Get the font based on bold and italic flags. """ if bold and oblique: return self.fonts['BOLDITALIC'] elif bold: return self.fonts['BOLD'] elif oblique: return self.fonts['ITALIC'] else: return self.fonts['NORMAL']
[ "def", "get_font", "(", "self", ",", "bold", ",", "oblique", ")", ":", "if", "bold", "and", "oblique", ":", "return", "self", ".", "fonts", "[", "'BOLDITALIC'", "]", "elif", "bold", ":", "return", "self", ".", "fonts", "[", "'BOLD'", "]", "elif", "oblique", ":", "return", "self", ".", "fonts", "[", "'ITALIC'", "]", "else", ":", "return", "self", ".", "fonts", "[", "'NORMAL'", "]" ]
[ 161, 4 ]
[ 172, 39 ]
python
en
['en', 'error', 'th']
False
ImageFormatter.__init__
(self, **options)
See the class docstring for explanation of options.
See the class docstring for explanation of options.
def __init__(self, **options): """ See the class docstring for explanation of options. """ if not pil_available: raise PilNotAvailable( 'Python Imaging Library is required for this formatter') Formatter.__init__(self, **options) self.encoding = 'latin1' # let pygments.format() do the right thing # Read the style self.styles = dict(self.style) if self.style.background_color is None: self.background_color = '#fff' else: self.background_color = self.style.background_color # Image options self.image_format = get_choice_opt( options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'], self.default_image_format, normcase=True) self.image_pad = get_int_opt(options, 'image_pad', 10) self.line_pad = get_int_opt(options, 'line_pad', 2) # The fonts fontsize = get_int_opt(options, 'font_size', 14) self.fonts = FontManager(options.get('font_name', ''), fontsize) self.fontw, self.fonth = self.fonts.get_char_size() # Line number options self.line_number_fg = options.get('line_number_fg', '#886') self.line_number_bg = options.get('line_number_bg', '#eed') self.line_number_chars = get_int_opt(options, 'line_number_chars', 2) self.line_number_bold = get_bool_opt(options, 'line_number_bold', False) self.line_number_italic = get_bool_opt(options, 'line_number_italic', False) self.line_number_pad = get_int_opt(options, 'line_number_pad', 6) self.line_numbers = get_bool_opt(options, 'line_numbers', True) self.line_number_separator = get_bool_opt(options, 'line_number_separator', True) self.line_number_step = get_int_opt(options, 'line_number_step', 1) self.line_number_start = get_int_opt(options, 'line_number_start', 1) if self.line_numbers: self.line_number_width = (self.fontw * self.line_number_chars + self.line_number_pad * 2) else: self.line_number_width = 0 self.hl_lines = [] hl_lines_str = get_list_opt(options, 'hl_lines', []) for line in hl_lines_str: try: self.hl_lines.append(int(line)) except ValueError: pass self.hl_color = options.get('hl_color', self.style.highlight_color) or '#f90' self.drawables = []
[ "def", "__init__", "(", "self", ",", "*", "*", "options", ")", ":", "if", "not", "pil_available", ":", "raise", "PilNotAvailable", "(", "'Python Imaging Library is required for this formatter'", ")", "Formatter", ".", "__init__", "(", "self", ",", "*", "*", "options", ")", "self", ".", "encoding", "=", "'latin1'", "# let pygments.format() do the right thing", "# Read the style", "self", ".", "styles", "=", "dict", "(", "self", ".", "style", ")", "if", "self", ".", "style", ".", "background_color", "is", "None", ":", "self", ".", "background_color", "=", "'#fff'", "else", ":", "self", ".", "background_color", "=", "self", ".", "style", ".", "background_color", "# Image options", "self", ".", "image_format", "=", "get_choice_opt", "(", "options", ",", "'image_format'", ",", "[", "'png'", ",", "'jpeg'", ",", "'gif'", ",", "'bmp'", "]", ",", "self", ".", "default_image_format", ",", "normcase", "=", "True", ")", "self", ".", "image_pad", "=", "get_int_opt", "(", "options", ",", "'image_pad'", ",", "10", ")", "self", ".", "line_pad", "=", "get_int_opt", "(", "options", ",", "'line_pad'", ",", "2", ")", "# The fonts", "fontsize", "=", "get_int_opt", "(", "options", ",", "'font_size'", ",", "14", ")", "self", ".", "fonts", "=", "FontManager", "(", "options", ".", "get", "(", "'font_name'", ",", "''", ")", ",", "fontsize", ")", "self", ".", "fontw", ",", "self", ".", "fonth", "=", "self", ".", "fonts", ".", "get_char_size", "(", ")", "# Line number options", "self", ".", "line_number_fg", "=", "options", ".", "get", "(", "'line_number_fg'", ",", "'#886'", ")", "self", ".", "line_number_bg", "=", "options", ".", "get", "(", "'line_number_bg'", ",", "'#eed'", ")", "self", ".", "line_number_chars", "=", "get_int_opt", "(", "options", ",", "'line_number_chars'", ",", "2", ")", "self", ".", "line_number_bold", "=", "get_bool_opt", "(", "options", ",", "'line_number_bold'", ",", "False", ")", "self", ".", "line_number_italic", "=", "get_bool_opt", "(", "options", ",", "'line_number_italic'", ",", "False", ")", "self", ".", "line_number_pad", "=", "get_int_opt", "(", "options", ",", "'line_number_pad'", ",", "6", ")", "self", ".", "line_numbers", "=", "get_bool_opt", "(", "options", ",", "'line_numbers'", ",", "True", ")", "self", ".", "line_number_separator", "=", "get_bool_opt", "(", "options", ",", "'line_number_separator'", ",", "True", ")", "self", ".", "line_number_step", "=", "get_int_opt", "(", "options", ",", "'line_number_step'", ",", "1", ")", "self", ".", "line_number_start", "=", "get_int_opt", "(", "options", ",", "'line_number_start'", ",", "1", ")", "if", "self", ".", "line_numbers", ":", "self", ".", "line_number_width", "=", "(", "self", ".", "fontw", "*", "self", ".", "line_number_chars", "+", "self", ".", "line_number_pad", "*", "2", ")", "else", ":", "self", ".", "line_number_width", "=", "0", "self", ".", "hl_lines", "=", "[", "]", "hl_lines_str", "=", "get_list_opt", "(", "options", ",", "'hl_lines'", ",", "[", "]", ")", "for", "line", "in", "hl_lines_str", ":", "try", ":", "self", ".", "hl_lines", ".", "append", "(", "int", "(", "line", ")", ")", "except", "ValueError", ":", "pass", "self", ".", "hl_color", "=", "options", ".", "get", "(", "'hl_color'", ",", "self", ".", "style", ".", "highlight_color", ")", "or", "'#f90'", "self", ".", "drawables", "=", "[", "]" ]
[ 292, 4 ]
[ 346, 27 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_line_height
(self)
Get the height of a line.
Get the height of a line.
def _get_line_height(self): """ Get the height of a line. """ return self.fonth + self.line_pad
[ "def", "_get_line_height", "(", "self", ")", ":", "return", "self", ".", "fonth", "+", "self", ".", "line_pad" ]
[ 352, 4 ]
[ 356, 41 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_line_y
(self, lineno)
Get the Y coordinate of a line number.
Get the Y coordinate of a line number.
def _get_line_y(self, lineno): """ Get the Y coordinate of a line number. """ return lineno * self._get_line_height() + self.image_pad
[ "def", "_get_line_y", "(", "self", ",", "lineno", ")", ":", "return", "lineno", "*", "self", ".", "_get_line_height", "(", ")", "+", "self", ".", "image_pad" ]
[ 358, 4 ]
[ 362, 64 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_char_width
(self)
Get the width of a character.
Get the width of a character.
def _get_char_width(self): """ Get the width of a character. """ return self.fontw
[ "def", "_get_char_width", "(", "self", ")", ":", "return", "self", ".", "fontw" ]
[ 364, 4 ]
[ 368, 25 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_char_x
(self, charno)
Get the X coordinate of a character position.
Get the X coordinate of a character position.
def _get_char_x(self, charno): """ Get the X coordinate of a character position. """ return charno * self.fontw + self.image_pad + self.line_number_width
[ "def", "_get_char_x", "(", "self", ",", "charno", ")", ":", "return", "charno", "*", "self", ".", "fontw", "+", "self", ".", "image_pad", "+", "self", ".", "line_number_width" ]
[ 370, 4 ]
[ 374, 76 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_text_pos
(self, charno, lineno)
Get the actual position for a character and line position.
Get the actual position for a character and line position.
def _get_text_pos(self, charno, lineno): """ Get the actual position for a character and line position. """ return self._get_char_x(charno), self._get_line_y(lineno)
[ "def", "_get_text_pos", "(", "self", ",", "charno", ",", "lineno", ")", ":", "return", "self", ".", "_get_char_x", "(", "charno", ")", ",", "self", ".", "_get_line_y", "(", "lineno", ")" ]
[ 376, 4 ]
[ 380, 65 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_linenumber_pos
(self, lineno)
Get the actual position for the start of a line number.
Get the actual position for the start of a line number.
def _get_linenumber_pos(self, lineno): """ Get the actual position for the start of a line number. """ return (self.image_pad, self._get_line_y(lineno))
[ "def", "_get_linenumber_pos", "(", "self", ",", "lineno", ")", ":", "return", "(", "self", ".", "image_pad", ",", "self", ".", "_get_line_y", "(", "lineno", ")", ")" ]
[ 382, 4 ]
[ 386, 57 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_text_color
(self, style)
Get the correct color for the token from the style.
Get the correct color for the token from the style.
def _get_text_color(self, style): """ Get the correct color for the token from the style. """ if style['color'] is not None: fill = '#' + style['color'] else: fill = '#000' return fill
[ "def", "_get_text_color", "(", "self", ",", "style", ")", ":", "if", "style", "[", "'color'", "]", "is", "not", "None", ":", "fill", "=", "'#'", "+", "style", "[", "'color'", "]", "else", ":", "fill", "=", "'#000'", "return", "fill" ]
[ 388, 4 ]
[ 396, 19 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_style_font
(self, style)
Get the correct font for the style.
Get the correct font for the style.
def _get_style_font(self, style): """ Get the correct font for the style. """ return self.fonts.get_font(style['bold'], style['italic'])
[ "def", "_get_style_font", "(", "self", ",", "style", ")", ":", "return", "self", ".", "fonts", ".", "get_font", "(", "style", "[", "'bold'", "]", ",", "style", "[", "'italic'", "]", ")" ]
[ 398, 4 ]
[ 402, 66 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._get_image_size
(self, maxcharno, maxlineno)
Get the required image size.
Get the required image size.
def _get_image_size(self, maxcharno, maxlineno): """ Get the required image size. """ return (self._get_char_x(maxcharno) + self.image_pad, self._get_line_y(maxlineno + 0) + self.image_pad)
[ "def", "_get_image_size", "(", "self", ",", "maxcharno", ",", "maxlineno", ")", ":", "return", "(", "self", ".", "_get_char_x", "(", "maxcharno", ")", "+", "self", ".", "image_pad", ",", "self", ".", "_get_line_y", "(", "maxlineno", "+", "0", ")", "+", "self", ".", "image_pad", ")" ]
[ 404, 4 ]
[ 409, 65 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._draw_linenumber
(self, posno, lineno)
Remember a line number drawable to paint later.
Remember a line number drawable to paint later.
def _draw_linenumber(self, posno, lineno): """ Remember a line number drawable to paint later. """ self._draw_text( self._get_linenumber_pos(posno), str(lineno).rjust(self.line_number_chars), font=self.fonts.get_font(self.line_number_bold, self.line_number_italic), fill=self.line_number_fg, )
[ "def", "_draw_linenumber", "(", "self", ",", "posno", ",", "lineno", ")", ":", "self", ".", "_draw_text", "(", "self", ".", "_get_linenumber_pos", "(", "posno", ")", ",", "str", "(", "lineno", ")", ".", "rjust", "(", "self", ".", "line_number_chars", ")", ",", "font", "=", "self", ".", "fonts", ".", "get_font", "(", "self", ".", "line_number_bold", ",", "self", ".", "line_number_italic", ")", ",", "fill", "=", "self", ".", "line_number_fg", ",", ")" ]
[ 411, 4 ]
[ 421, 9 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._draw_text
(self, pos, text, font, **kw)
Remember a single drawable tuple to paint later.
Remember a single drawable tuple to paint later.
def _draw_text(self, pos, text, font, **kw): """ Remember a single drawable tuple to paint later. """ self.drawables.append((pos, text, font, kw))
[ "def", "_draw_text", "(", "self", ",", "pos", ",", "text", ",", "font", ",", "*", "*", "kw", ")", ":", "self", ".", "drawables", ".", "append", "(", "(", "pos", ",", "text", ",", "font", ",", "kw", ")", ")" ]
[ 423, 4 ]
[ 427, 52 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._create_drawables
(self, tokensource)
Create drawables for the token content.
Create drawables for the token content.
def _create_drawables(self, tokensource): """ Create drawables for the token content. """ lineno = charno = maxcharno = 0 for ttype, value in tokensource: while ttype not in self.styles: ttype = ttype.parent style = self.styles[ttype] # TODO: make sure tab expansion happens earlier in the chain. It # really ought to be done on the input, as to do it right here is # quite complex. value = value.expandtabs(4) lines = value.splitlines(True) # print lines for i, line in enumerate(lines): temp = line.rstrip('\n') if temp: self._draw_text( self._get_text_pos(charno, lineno), temp, font = self._get_style_font(style), fill = self._get_text_color(style) ) charno += len(temp) maxcharno = max(maxcharno, charno) if line.endswith('\n'): # add a line for each extra line in the value charno = 0 lineno += 1 self.maxcharno = maxcharno self.maxlineno = lineno
[ "def", "_create_drawables", "(", "self", ",", "tokensource", ")", ":", "lineno", "=", "charno", "=", "maxcharno", "=", "0", "for", "ttype", ",", "value", "in", "tokensource", ":", "while", "ttype", "not", "in", "self", ".", "styles", ":", "ttype", "=", "ttype", ".", "parent", "style", "=", "self", ".", "styles", "[", "ttype", "]", "# TODO: make sure tab expansion happens earlier in the chain. It", "# really ought to be done on the input, as to do it right here is", "# quite complex.", "value", "=", "value", ".", "expandtabs", "(", "4", ")", "lines", "=", "value", ".", "splitlines", "(", "True", ")", "# print lines", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "temp", "=", "line", ".", "rstrip", "(", "'\\n'", ")", "if", "temp", ":", "self", ".", "_draw_text", "(", "self", ".", "_get_text_pos", "(", "charno", ",", "lineno", ")", ",", "temp", ",", "font", "=", "self", ".", "_get_style_font", "(", "style", ")", ",", "fill", "=", "self", ".", "_get_text_color", "(", "style", ")", ")", "charno", "+=", "len", "(", "temp", ")", "maxcharno", "=", "max", "(", "maxcharno", ",", "charno", ")", "if", "line", ".", "endswith", "(", "'\\n'", ")", ":", "# add a line for each extra line in the value", "charno", "=", "0", "lineno", "+=", "1", "self", ".", "maxcharno", "=", "maxcharno", "self", ".", "maxlineno", "=", "lineno" ]
[ 429, 4 ]
[ 460, 31 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._draw_line_numbers
(self)
Create drawables for the line numbers.
Create drawables for the line numbers.
def _draw_line_numbers(self): """ Create drawables for the line numbers. """ if not self.line_numbers: return for p in xrange(self.maxlineno): n = p + self.line_number_start if (n % self.line_number_step) == 0: self._draw_linenumber(p, n)
[ "def", "_draw_line_numbers", "(", "self", ")", ":", "if", "not", "self", ".", "line_numbers", ":", "return", "for", "p", "in", "xrange", "(", "self", ".", "maxlineno", ")", ":", "n", "=", "p", "+", "self", ".", "line_number_start", "if", "(", "n", "%", "self", ".", "line_number_step", ")", "==", "0", ":", "self", ".", "_draw_linenumber", "(", "p", ",", "n", ")" ]
[ 462, 4 ]
[ 471, 43 ]
python
en
['en', 'error', 'th']
False
ImageFormatter._paint_line_number_bg
(self, im)
Paint the line number background on the image.
Paint the line number background on the image.
def _paint_line_number_bg(self, im): """ Paint the line number background on the image. """ if not self.line_numbers: return if self.line_number_fg is None: return draw = ImageDraw.Draw(im) recth = im.size[-1] rectw = self.image_pad + self.line_number_width - self.line_number_pad draw.rectangle([(0, 0), (rectw, recth)], fill=self.line_number_bg) draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) del draw
[ "def", "_paint_line_number_bg", "(", "self", ",", "im", ")", ":", "if", "not", "self", ".", "line_numbers", ":", "return", "if", "self", ".", "line_number_fg", "is", "None", ":", "return", "draw", "=", "ImageDraw", ".", "Draw", "(", "im", ")", "recth", "=", "im", ".", "size", "[", "-", "1", "]", "rectw", "=", "self", ".", "image_pad", "+", "self", ".", "line_number_width", "-", "self", ".", "line_number_pad", "draw", ".", "rectangle", "(", "[", "(", "0", ",", "0", ")", ",", "(", "rectw", ",", "recth", ")", "]", ",", "fill", "=", "self", ".", "line_number_bg", ")", "draw", ".", "line", "(", "[", "(", "rectw", ",", "0", ")", ",", "(", "rectw", ",", "recth", ")", "]", ",", "fill", "=", "self", ".", "line_number_fg", ")", "del", "draw" ]
[ 473, 4 ]
[ 487, 16 ]
python
en
['en', 'error', 'th']
False
ImageFormatter.format
(self, tokensource, outfile)
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items.
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``.
def format(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. This implementation calculates where it should draw each token on the pixmap, then calculates the required pixmap size and draws the items. """ self._create_drawables(tokensource) self._draw_line_numbers() im = Image.new( 'RGB', self._get_image_size(self.maxcharno, self.maxlineno), self.background_color ) self._paint_line_number_bg(im) draw = ImageDraw.Draw(im) # Highlight if self.hl_lines: x = self.image_pad + self.line_number_width - self.line_number_pad + 1 recth = self._get_line_height() rectw = im.size[0] - x for linenumber in self.hl_lines: y = self._get_line_y(linenumber - 1) draw.rectangle([(x, y), (x + rectw, y + recth)], fill=self.hl_color) for pos, value, font, kw in self.drawables: draw.text(pos, value, font=font, **kw) im.save(outfile, self.image_format.upper())
[ "def", "format", "(", "self", ",", "tokensource", ",", "outfile", ")", ":", "self", ".", "_create_drawables", "(", "tokensource", ")", "self", ".", "_draw_line_numbers", "(", ")", "im", "=", "Image", ".", "new", "(", "'RGB'", ",", "self", ".", "_get_image_size", "(", "self", ".", "maxcharno", ",", "self", ".", "maxlineno", ")", ",", "self", ".", "background_color", ")", "self", ".", "_paint_line_number_bg", "(", "im", ")", "draw", "=", "ImageDraw", ".", "Draw", "(", "im", ")", "# Highlight", "if", "self", ".", "hl_lines", ":", "x", "=", "self", ".", "image_pad", "+", "self", ".", "line_number_width", "-", "self", ".", "line_number_pad", "+", "1", "recth", "=", "self", ".", "_get_line_height", "(", ")", "rectw", "=", "im", ".", "size", "[", "0", "]", "-", "x", "for", "linenumber", "in", "self", ".", "hl_lines", ":", "y", "=", "self", ".", "_get_line_y", "(", "linenumber", "-", "1", ")", "draw", ".", "rectangle", "(", "[", "(", "x", ",", "y", ")", ",", "(", "x", "+", "rectw", ",", "y", "+", "recth", ")", "]", ",", "fill", "=", "self", ".", "hl_color", ")", "for", "pos", ",", "value", ",", "font", ",", "kw", "in", "self", ".", "drawables", ":", "draw", ".", "text", "(", "pos", ",", "value", ",", "font", "=", "font", ",", "*", "*", "kw", ")", "im", ".", "save", "(", "outfile", ",", "self", ".", "image_format", ".", "upper", "(", ")", ")" ]
[ 489, 4 ]
[ 517, 51 ]
python
en
['en', 'error', 'th']
False
file_signature
(filename)
Return a signature for a file.
Return a signature for a file.
def file_signature(filename): """ Return a signature for a file. """ if not os.path.isfile(filename): return None if not os.path.exists(filename): return None # Duplicate auto-generated files can be recognized with the sha1 hash. sig = hashlib.sha1() with open(filename, "rb") as f: buf = f.read() sig.update(buf) return sig.hexdigest()
[ "def", "file_signature", "(", "filename", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "None", "# Duplicate auto-generated files can be recognized with the sha1 hash.", "sig", "=", "hashlib", ".", "sha1", "(", ")", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "f", ":", "buf", "=", "f", ".", "read", "(", ")", "sig", ".", "update", "(", "buf", ")", "return", "sig", ".", "hexdigest", "(", ")" ]
[ 16, 0 ]
[ 33, 26 ]
python
en
['en', 'error', 'th']
False
configuration_signature
(config)
Return a signature for a configuration (xml_generator_configuration_t) object. This can then be used as a key in the cache. This method must take into account anything about a configuration that could cause the declarations generated to be different between runs.
Return a signature for a configuration (xml_generator_configuration_t) object.
def configuration_signature(config): """ Return a signature for a configuration (xml_generator_configuration_t) object. This can then be used as a key in the cache. This method must take into account anything about a configuration that could cause the declarations generated to be different between runs. """ sig = hashlib.sha1() if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t): sig.update(str(config.xml_generator_path).encode()) sig.update(str(config.working_directory).encode('utf-8')) if isinstance(config, cxx_parsers_cfg.xml_generator_configuration_t): sig.update(str(config.cflags).encode('utf-8')) for p in config.include_paths: sig.update(str(p).encode('utf-8')) for s in config.define_symbols: sig.update(str(s).encode('utf-8')) for u in config.undefine_symbols: sig.update(str(u).encode('utf-8')) return sig.hexdigest()
[ "def", "configuration_signature", "(", "config", ")", ":", "sig", "=", "hashlib", ".", "sha1", "(", ")", "if", "isinstance", "(", "config", ",", "cxx_parsers_cfg", ".", "xml_generator_configuration_t", ")", ":", "sig", ".", "update", "(", "str", "(", "config", ".", "xml_generator_path", ")", ".", "encode", "(", ")", ")", "sig", ".", "update", "(", "str", "(", "config", ".", "working_directory", ")", ".", "encode", "(", "'utf-8'", ")", ")", "if", "isinstance", "(", "config", ",", "cxx_parsers_cfg", ".", "xml_generator_configuration_t", ")", ":", "sig", ".", "update", "(", "str", "(", "config", ".", "cflags", ")", ".", "encode", "(", "'utf-8'", ")", ")", "for", "p", "in", "config", ".", "include_paths", ":", "sig", ".", "update", "(", "str", "(", "p", ")", ".", "encode", "(", "'utf-8'", ")", ")", "for", "s", "in", "config", ".", "define_symbols", ":", "sig", ".", "update", "(", "str", "(", "s", ")", ".", "encode", "(", "'utf-8'", ")", ")", "for", "u", "in", "config", ".", "undefine_symbols", ":", "sig", ".", "update", "(", "str", "(", "u", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "sig", ".", "hexdigest", "(", ")" ]
[ 36, 0 ]
[ 60, 26 ]
python
en
['en', 'error', 'th']
False
cache_base_t.flush
(self)
Flush (write out) the cache to disk if needed.
Flush (write out) the cache to disk if needed.
def flush(self): """ Flush (write out) the cache to disk if needed. """ raise NotImplementedError()
[ "def", "flush", "(", "self", ")", ":", "raise", "NotImplementedError", "(", ")" ]
[ 69, 4 ]
[ 72, 35 ]
python
en
['en', 'en', 'en']
True
cache_base_t.update
(self, source_file, configuration, declarations, included_files)
update cache entry :param source_file: path to the C++ source file being parsed :param configuration: configuration used in parsing :class:`xml_generator_configuration_t` :param declarations: declaration tree found when parsing :param included_files: files included by parsing.
update cache entry
def update(self, source_file, configuration, declarations, included_files): """ update cache entry :param source_file: path to the C++ source file being parsed :param configuration: configuration used in parsing :class:`xml_generator_configuration_t` :param declarations: declaration tree found when parsing :param included_files: files included by parsing. """ raise NotImplementedError()
[ "def", "update", "(", "self", ",", "source_file", ",", "configuration", ",", "declarations", ",", "included_files", ")", ":", "raise", "NotImplementedError", "(", ")" ]
[ 74, 4 ]
[ 85, 35 ]
python
en
['en', 'error', 'th']
False
cache_base_t.cached_value
(self, source_file, configuration)
Return declarations, we have cached, for the source_file and the given configuration. :param source_file: path to the C++ source file being parsed. :param configuration: configuration that was used for parsing.
Return declarations, we have cached, for the source_file and the given configuration.
def cached_value(self, source_file, configuration): """ Return declarations, we have cached, for the source_file and the given configuration. :param source_file: path to the C++ source file being parsed. :param configuration: configuration that was used for parsing. """ raise NotImplementedError()
[ "def", "cached_value", "(", "self", ",", "source_file", ",", "configuration", ")", ":", "raise", "NotImplementedError", "(", ")" ]
[ 87, 4 ]
[ 97, 35 ]
python
en
['en', 'error', 'th']
False
file_cache_t.__init__
(self, name)
:param name: name of the cache file.
:param name: name of the cache file.
def __init__(self, name): """ :param name: name of the cache file. """ cache_base_t.__init__(self) self.__name = name # Name of cache file # Map record_key to record_t self.__cache = self.__load(self.__name) self.__needs_flushed = not bool( self.__cache) # If empty then we need to flush for entry in self.__cache.values(): # Clear hit flags entry.was_hit = False
[ "def", "__init__", "(", "self", ",", "name", ")", ":", "cache_base_t", ".", "__init__", "(", "self", ")", "self", ".", "__name", "=", "name", "# Name of cache file", "# Map record_key to record_t", "self", ".", "__cache", "=", "self", ".", "__load", "(", "self", ".", "__name", ")", "self", ".", "__needs_flushed", "=", "not", "bool", "(", "self", ".", "__cache", ")", "# If empty then we need to flush", "for", "entry", "in", "self", ".", "__cache", ".", "values", "(", ")", ":", "# Clear hit flags", "entry", ".", "was_hit", "=", "False" ]
[ 163, 4 ]
[ 175, 33 ]
python
en
['en', 'error', 'th']
False
file_cache_t.__load
(file_name)
Load pickled cache from file and return the object.
Load pickled cache from file and return the object.
def __load(file_name): """ Load pickled cache from file and return the object. """ if os.path.exists(file_name) and not os.path.isfile(file_name): raise RuntimeError( 'Cache should be initialized with valid full file name') if not os.path.exists(file_name): open(file_name, 'w+b').close() return {} cache_file_obj = open(file_name, 'rb') try: file_cache_t.logger.info('Loading cache file "%s".', file_name) start_time = time.clock() cache = pickle.load(cache_file_obj) file_cache_t.logger.debug( "Cache file has been loaded in %.1f secs", (time.clock() - start_time)) file_cache_t.logger.debug( "Found cache in file: [%s] entries: %s", file_name, len(list(cache.keys()))) except (pickle.UnpicklingError, AttributeError, EOFError, ImportError, IndexError) as error: file_cache_t.logger.exception( "Error occurred while reading cache file: %s", error) cache_file_obj.close() file_cache_t.logger.info( "Invalid cache file: [%s] Regenerating.", file_name) open(file_name, 'w+b').close() # Create empty file cache = {} # Empty cache finally: cache_file_obj.close() return cache
[ "def", "__load", "(", "file_name", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "file_name", ")", "and", "not", "os", ".", "path", ".", "isfile", "(", "file_name", ")", ":", "raise", "RuntimeError", "(", "'Cache should be initialized with valid full file name'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "file_name", ")", ":", "open", "(", "file_name", ",", "'w+b'", ")", ".", "close", "(", ")", "return", "{", "}", "cache_file_obj", "=", "open", "(", "file_name", ",", "'rb'", ")", "try", ":", "file_cache_t", ".", "logger", ".", "info", "(", "'Loading cache file \"%s\".'", ",", "file_name", ")", "start_time", "=", "time", ".", "clock", "(", ")", "cache", "=", "pickle", ".", "load", "(", "cache_file_obj", ")", "file_cache_t", ".", "logger", ".", "debug", "(", "\"Cache file has been loaded in %.1f secs\"", ",", "(", "time", ".", "clock", "(", ")", "-", "start_time", ")", ")", "file_cache_t", ".", "logger", ".", "debug", "(", "\"Found cache in file: [%s] entries: %s\"", ",", "file_name", ",", "len", "(", "list", "(", "cache", ".", "keys", "(", ")", ")", ")", ")", "except", "(", "pickle", ".", "UnpicklingError", ",", "AttributeError", ",", "EOFError", ",", "ImportError", ",", "IndexError", ")", "as", "error", ":", "file_cache_t", ".", "logger", ".", "exception", "(", "\"Error occurred while reading cache file: %s\"", ",", "error", ")", "cache_file_obj", ".", "close", "(", ")", "file_cache_t", ".", "logger", ".", "info", "(", "\"Invalid cache file: [%s] Regenerating.\"", ",", "file_name", ")", "open", "(", "file_name", ",", "'w+b'", ")", ".", "close", "(", ")", "# Create empty file", "cache", "=", "{", "}", "# Empty cache", "finally", ":", "cache_file_obj", ".", "close", "(", ")", "return", "cache" ]
[ 178, 4 ]
[ 211, 20 ]
python
en
['en', 'en', 'en']
True
file_cache_t.update
(self, source_file, configuration, declarations, included_files)
Update a cached record with the current key and value contents.
Update a cached record with the current key and value contents.
def update(self, source_file, configuration, declarations, included_files): """ Update a cached record with the current key and value contents. """ record = record_t( source_signature=file_signature(source_file), config_signature=configuration_signature(configuration), included_files=included_files, included_files_signature=list( map( file_signature, included_files)), declarations=declarations) # Switched over to holding full record in cache so we don't have # to keep creating records in the next method. self.__cache[record.key()] = record self.__cache[record.key()].was_hit = True self.__needs_flushed = True
[ "def", "update", "(", "self", ",", "source_file", ",", "configuration", ",", "declarations", ",", "included_files", ")", ":", "record", "=", "record_t", "(", "source_signature", "=", "file_signature", "(", "source_file", ")", ",", "config_signature", "=", "configuration_signature", "(", "configuration", ")", ",", "included_files", "=", "included_files", ",", "included_files_signature", "=", "list", "(", "map", "(", "file_signature", ",", "included_files", ")", ")", ",", "declarations", "=", "declarations", ")", "# Switched over to holding full record in cache so we don't have", "# to keep creating records in the next method.", "self", ".", "__cache", "[", "record", ".", "key", "(", ")", "]", "=", "record", "self", ".", "__cache", "[", "record", ".", "key", "(", ")", "]", ".", "was_hit", "=", "True", "self", ".", "__needs_flushed", "=", "True" ]
[ 233, 4 ]
[ 249, 35 ]
python
en
['en', 'en', 'en']
True
file_cache_t.cached_value
(self, source_file, configuration)
Attempt to lookup the cached declarations for the given file and configuration. Returns None if declaration not found or signature check fails.
Attempt to lookup the cached declarations for the given file and configuration.
def cached_value(self, source_file, configuration): """ Attempt to lookup the cached declarations for the given file and configuration. Returns None if declaration not found or signature check fails. """ key = record_t.create_key(source_file, configuration) if key not in self.__cache: return None record = self.__cache[key] if self.__is_valid_signature(record): record.was_hit = True # Record cache hit return record.declarations else: # some file has been changed del self.__cache[key] return None
[ "def", "cached_value", "(", "self", ",", "source_file", ",", "configuration", ")", ":", "key", "=", "record_t", ".", "create_key", "(", "source_file", ",", "configuration", ")", "if", "key", "not", "in", "self", ".", "__cache", ":", "return", "None", "record", "=", "self", ".", "__cache", "[", "key", "]", "if", "self", ".", "__is_valid_signature", "(", "record", ")", ":", "record", ".", "was_hit", "=", "True", "# Record cache hit", "return", "record", ".", "declarations", "else", ":", "# some file has been changed", "del", "self", ".", "__cache", "[", "key", "]", "return", "None" ]
[ 251, 4 ]
[ 269, 23 ]
python
en
['en', 'error', 'th']
False
eval_doc_qa_feedback
(filters: FilterRequest = None)
Return basic accuracy metrics based on the user feedback. Which ratio of answers was correct? Which ratio of documents was correct? You can supply filters in the request to only use a certain subset of labels. **Example:** ``` | curl --location --request POST 'http://127.0.0.1:8000/eval-doc-qa-feedback' \ | --header 'Content-Type: application/json' \ | --data-raw '{ "filters": {"document_id": ["XRR3xnEBCYVTkbTystOB"]} }'
Return basic accuracy metrics based on the user feedback. Which ratio of answers was correct? Which ratio of documents was correct? You can supply filters in the request to only use a certain subset of labels.
def eval_doc_qa_feedback(filters: FilterRequest = None): """ Return basic accuracy metrics based on the user feedback. Which ratio of answers was correct? Which ratio of documents was correct? You can supply filters in the request to only use a certain subset of labels. **Example:** ``` | curl --location --request POST 'http://127.0.0.1:8000/eval-doc-qa-feedback' \ | --header 'Content-Type: application/json' \ | --data-raw '{ "filters": {"document_id": ["XRR3xnEBCYVTkbTystOB"]} }' """ if filters: filters = filters.filters filters["origin"] = ["user-feedback"] else: filters = {"origin": ["user-feedback"]} labels = document_store.get_all_labels( index=DB_INDEX_FEEDBACK, filters=filters ) if len(labels) > 0: answer_feedback = [1 if l.is_correct_answer else 0 for l in labels] doc_feedback = [1 if l.is_correct_document else 0 for l in labels] answer_accuracy = sum(answer_feedback)/len(answer_feedback) doc_accuracy = sum(doc_feedback)/len(doc_feedback) res = {"answer_accuracy": answer_accuracy, "document_accuracy": doc_accuracy, "n_feedback": len(labels)} else: res = {"answer_accuracy": None, "document_accuracy": None, "n_feedback": 0} return res
[ "def", "eval_doc_qa_feedback", "(", "filters", ":", "FilterRequest", "=", "None", ")", ":", "if", "filters", ":", "filters", "=", "filters", ".", "filters", "filters", "[", "\"origin\"", "]", "=", "[", "\"user-feedback\"", "]", "else", ":", "filters", "=", "{", "\"origin\"", ":", "[", "\"user-feedback\"", "]", "}", "labels", "=", "document_store", ".", "get_all_labels", "(", "index", "=", "DB_INDEX_FEEDBACK", ",", "filters", "=", "filters", ")", "if", "len", "(", "labels", ")", ">", "0", ":", "answer_feedback", "=", "[", "1", "if", "l", ".", "is_correct_answer", "else", "0", "for", "l", "in", "labels", "]", "doc_feedback", "=", "[", "1", "if", "l", ".", "is_correct_document", "else", "0", "for", "l", "in", "labels", "]", "answer_accuracy", "=", "sum", "(", "answer_feedback", ")", "/", "len", "(", "answer_feedback", ")", "doc_accuracy", "=", "sum", "(", "doc_feedback", ")", "/", "len", "(", "doc_feedback", ")", "res", "=", "{", "\"answer_accuracy\"", ":", "answer_accuracy", ",", "\"document_accuracy\"", ":", "doc_accuracy", ",", "\"n_feedback\"", ":", "len", "(", "labels", ")", "}", "else", ":", "res", "=", "{", "\"answer_accuracy\"", ":", "None", ",", "\"document_accuracy\"", ":", "None", ",", "\"n_feedback\"", ":", "0", "}", "return", "res" ]
[ 84, 0 ]
[ 123, 14 ]
python
en
['en', 'error', 'th']
False
export_doc_qa_feedback
(context_size: int = 2_000)
SQuAD format JSON export for question/answer pairs that were marked as "relevant". The context_size param can be used to limit response size for large documents.
SQuAD format JSON export for question/answer pairs that were marked as "relevant".
def export_doc_qa_feedback(context_size: int = 2_000): """ SQuAD format JSON export for question/answer pairs that were marked as "relevant". The context_size param can be used to limit response size for large documents. """ labels = document_store.get_all_labels( index=DB_INDEX_FEEDBACK, filters={"is_correct_answer": [True], "origin": ["user-feedback"]} ) export_data = [] for label in labels: document = document_store.get_document_by_id(label.document_id) text = document.text # the final length of context(including the answer string) is 'context_size'. # we try to add equal characters for context before and after the answer string. # if either beginning or end of text is reached, we correspondingly # append more context characters at the other end of answer string. context_to_add = int((context_size - len(label.answer)) / 2) start_pos = max(label.offset_start_in_doc - context_to_add, 0) additional_context_at_end = max(context_to_add - label.offset_start_in_doc, 0) end_pos = min(label.offset_start_in_doc + len(label.answer) + context_to_add, len(text) - 1) additional_context_at_start = max(label.offset_start_in_doc + len(label.answer) + context_to_add - len(text), 0) start_pos = max(0, start_pos - additional_context_at_start) end_pos = min(len(text) - 1, end_pos + additional_context_at_end) context_to_export = text[start_pos:end_pos] export_data.append({"paragraphs": [{"qas": label, "context": context_to_export}]}) export = {"data": export_data} return export
[ "def", "export_doc_qa_feedback", "(", "context_size", ":", "int", "=", "2_000", ")", ":", "labels", "=", "document_store", ".", "get_all_labels", "(", "index", "=", "DB_INDEX_FEEDBACK", ",", "filters", "=", "{", "\"is_correct_answer\"", ":", "[", "True", "]", ",", "\"origin\"", ":", "[", "\"user-feedback\"", "]", "}", ")", "export_data", "=", "[", "]", "for", "label", "in", "labels", ":", "document", "=", "document_store", ".", "get_document_by_id", "(", "label", ".", "document_id", ")", "text", "=", "document", ".", "text", "# the final length of context(including the answer string) is 'context_size'.", "# we try to add equal characters for context before and after the answer string.", "# if either beginning or end of text is reached, we correspondingly", "# append more context characters at the other end of answer string.", "context_to_add", "=", "int", "(", "(", "context_size", "-", "len", "(", "label", ".", "answer", ")", ")", "/", "2", ")", "start_pos", "=", "max", "(", "label", ".", "offset_start_in_doc", "-", "context_to_add", ",", "0", ")", "additional_context_at_end", "=", "max", "(", "context_to_add", "-", "label", ".", "offset_start_in_doc", ",", "0", ")", "end_pos", "=", "min", "(", "label", ".", "offset_start_in_doc", "+", "len", "(", "label", ".", "answer", ")", "+", "context_to_add", ",", "len", "(", "text", ")", "-", "1", ")", "additional_context_at_start", "=", "max", "(", "label", ".", "offset_start_in_doc", "+", "len", "(", "label", ".", "answer", ")", "+", "context_to_add", "-", "len", "(", "text", ")", ",", "0", ")", "start_pos", "=", "max", "(", "0", ",", "start_pos", "-", "additional_context_at_start", ")", "end_pos", "=", "min", "(", "len", "(", "text", ")", "-", "1", ",", "end_pos", "+", "additional_context_at_end", ")", "context_to_export", "=", "text", "[", "start_pos", ":", "end_pos", "]", "export_data", ".", "append", "(", "{", "\"paragraphs\"", ":", "[", "{", "\"qas\"", ":", "label", ",", "\"context\"", ":", "context_to_export", "}", "]", "}", ")", "export", "=", "{", "\"data\"", ":", "export_data", "}", "return", "export" ]
[ 126, 0 ]
[ 162, 17 ]
python
en
['en', 'error', 'th']
False
export_faq_feedback
()
Export feedback for faq-qa in JSON format.
Export feedback for faq-qa in JSON format.
def export_faq_feedback(): """ Export feedback for faq-qa in JSON format. """ labels = document_store.get_all_labels(index=DB_INDEX_FEEDBACK, filters={"origin": ["user-feedback-faq"]}) export_data = [] for label in labels: document = document_store.get_document_by_id(label.document_id) feedback = { "question": document.question, "query": label.question, "is_correct_answer": label.is_correct_answer, "is_correct_document": label.is_correct_answer, } export_data.append(feedback) export = {"data": export_data} return export
[ "def", "export_faq_feedback", "(", ")", ":", "labels", "=", "document_store", ".", "get_all_labels", "(", "index", "=", "DB_INDEX_FEEDBACK", ",", "filters", "=", "{", "\"origin\"", ":", "[", "\"user-feedback-faq\"", "]", "}", ")", "export_data", "=", "[", "]", "for", "label", "in", "labels", ":", "document", "=", "document_store", ".", "get_document_by_id", "(", "label", ".", "document_id", ")", "feedback", "=", "{", "\"question\"", ":", "document", ".", "question", ",", "\"query\"", ":", "label", ".", "question", ",", "\"is_correct_answer\"", ":", "label", ".", "is_correct_answer", ",", "\"is_correct_document\"", ":", "label", ".", "is_correct_answer", ",", "}", "export_data", ".", "append", "(", "feedback", ")", "export", "=", "{", "\"data\"", ":", "export_data", "}", "return", "export" ]
[ 166, 0 ]
[ 186, 17 ]
python
en
['en', 'error', 'th']
False
initialized_sqlite_project
( mock_webbrowser, caplog, monkeypatch, tmp_path_factory, titanic_sqlite_db_file, sa )
This is an initialized project through the CLI.
This is an initialized project through the CLI.
def initialized_sqlite_project( mock_webbrowser, caplog, monkeypatch, tmp_path_factory, titanic_sqlite_db_file, sa ): """This is an initialized project through the CLI.""" project_dir = str(tmp_path_factory.mktemp("my_rad_project")) engine = sa.create_engine( "sqlite:///{}".format(titanic_sqlite_db_file), pool_recycle=3600 ) inspector = sa.inspect(engine) # get the default schema and table for testing schemas = inspector.get_schema_names() default_schema = schemas[0] tables = [ table_name for table_name in inspector.get_table_names(schema=default_schema) ] default_table = tables[0] runner = CliRunner(mix_stderr=False) monkeypatch.chdir(project_dir) result = runner.invoke( cli, ["--v3-api", "init"], input=f"\n\n2\n6\ntitanic\n{engine.url}\n\n\n1\n{default_schema}\n{default_table}\nwarning\n\n\n\n", catch_exceptions=False, ) assert result.exit_code == 0 assert mock_webbrowser.call_count == 1 assert ( f"{project_dir}/great_expectations/uncommitted/data_docs/local_site/validations/warning/" in mock_webbrowser.call_args[0][0] ) assert_no_logging_messages_or_tracebacks(caplog, result) context = DataContext(os.path.join(project_dir, DataContext.GE_DIR)) assert isinstance(context, DataContext) assert len(context.list_datasources()) == 1 assert context.list_datasources() == [ { "class_name": "SqlAlchemyDatasource", "name": "titanic", "module_name": "great_expectations.datasource", "credentials": {"url": str(engine.url)}, "data_asset_type": { "class_name": "SqlAlchemyDataset", "module_name": "great_expectations.dataset", }, } ] return project_dir
[ "def", "initialized_sqlite_project", "(", "mock_webbrowser", ",", "caplog", ",", "monkeypatch", ",", "tmp_path_factory", ",", "titanic_sqlite_db_file", ",", "sa", ")", ":", "project_dir", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"my_rad_project\"", ")", ")", "engine", "=", "sa", ".", "create_engine", "(", "\"sqlite:///{}\"", ".", "format", "(", "titanic_sqlite_db_file", ")", ",", "pool_recycle", "=", "3600", ")", "inspector", "=", "sa", ".", "inspect", "(", "engine", ")", "# get the default schema and table for testing", "schemas", "=", "inspector", ".", "get_schema_names", "(", ")", "default_schema", "=", "schemas", "[", "0", "]", "tables", "=", "[", "table_name", "for", "table_name", "in", "inspector", ".", "get_table_names", "(", "schema", "=", "default_schema", ")", "]", "default_table", "=", "tables", "[", "0", "]", "runner", "=", "CliRunner", "(", "mix_stderr", "=", "False", ")", "monkeypatch", ".", "chdir", "(", "project_dir", ")", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"--v3-api\"", ",", "\"init\"", "]", ",", "input", "=", "f\"\\n\\n2\\n6\\ntitanic\\n{engine.url}\\n\\n\\n1\\n{default_schema}\\n{default_table}\\nwarning\\n\\n\\n\\n\"", ",", "catch_exceptions", "=", "False", ",", ")", "assert", "result", ".", "exit_code", "==", "0", "assert", "mock_webbrowser", ".", "call_count", "==", "1", "assert", "(", "f\"{project_dir}/great_expectations/uncommitted/data_docs/local_site/validations/warning/\"", "in", "mock_webbrowser", ".", "call_args", "[", "0", "]", "[", "0", "]", ")", "assert_no_logging_messages_or_tracebacks", "(", "caplog", ",", "result", ")", "context", "=", "DataContext", "(", "os", ".", "path", ".", "join", "(", "project_dir", ",", "DataContext", ".", "GE_DIR", ")", ")", "assert", "isinstance", "(", "context", ",", "DataContext", ")", "assert", "len", "(", "context", ".", "list_datasources", "(", ")", ")", "==", "1", "assert", "context", ".", "list_datasources", "(", ")", "==", "[", "{", "\"class_name\"", ":", "\"SqlAlchemyDatasource\"", ",", "\"name\"", ":", "\"titanic\"", ",", "\"module_name\"", ":", "\"great_expectations.datasource\"", ",", "\"credentials\"", ":", "{", "\"url\"", ":", "str", "(", "engine", ".", "url", ")", "}", ",", "\"data_asset_type\"", ":", "{", "\"class_name\"", ":", "\"SqlAlchemyDataset\"", ",", "\"module_name\"", ":", "\"great_expectations.dataset\"", ",", "}", ",", "}", "]", "return", "project_dir" ]
[ 424, 0 ]
[ 477, 22 ]
python
en
['en', 'en', 'en']
True
SmokeTestCase.test_xxx
(self)
XXX identity
XXX identity
def test_xxx(self): """XXX identity""" pass
[ "def", "test_xxx", "(", "self", ")", ":", "pass" ]
[ 3, 4 ]
[ 5, 12 ]
python
en
['en', 'pl', 'en']
False
bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context
( tmp_path_factory, monkeypatch, )
This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows.
This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows.
def bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context( tmp_path_factory, monkeypatch, ) -> DataContext: """ This fixture generates three years' worth (36 months; i.e., 36 batches) of taxi trip data with the number of rows of a batch sampled from a normal distribution with the mean of 5,000 rows and the standard deviation of 1,000 rows. """ # Re-enable GE_USAGE_STATS monkeypatch.delenv("GE_USAGE_STATS") project_path: str = str(tmp_path_factory.mktemp("taxi_data_context")) context_path: str = os.path.join(project_path, "great_expectations") os.makedirs(os.path.join(context_path, "expectations"), exist_ok=True) data_path: str = os.path.join(context_path, "..", "data") os.makedirs(os.path.join(data_path), exist_ok=True) shutil.copy( file_relative_path( __file__, os.path.join( "..", "integration", "fixtures", "yellow_trip_data_pandas_fixture", "great_expectations", "great_expectations.yml", ), ), str(os.path.join(context_path, "great_expectations.yml")), ) base_directory: str = file_relative_path( __file__, os.path.join( "..", "test_sets", "taxi_yellow_trip_data_samples", ), ) file_name_list: List[str] = get_filesystem_one_level_directory_glob_path_list( base_directory_path=base_directory, glob_directive="*.csv" ) file_name_list = sorted(file_name_list) num_files: int = len(file_name_list) rnd_num_sample: np.float64 output_file_lenths: List[int] = [ round(rnd_num_sample) for rnd_num_sample in np.random.normal(loc=5.0e3, scale=1.0e3, size=num_files) ] idx: int file_name: str output_file_name_length_map: Dict[str, int] = { file_name_list[idx]: output_file_lenths[idx] for idx, file_name in enumerate(file_name_list) } csv_source_path: str df: pd.DataFrame for file_name in file_name_list: csv_source_path = os.path.join(base_directory, file_name) df = pd.read_csv(filepath_or_buffer=csv_source_path) df = df.sample( n=output_file_name_length_map[file_name], replace=False, random_state=1 ) df.to_csv( path_or_buf=os.path.join(context_path, "..", "data", file_name), index=False ) context: DataContext = DataContext(context_root_dir=context_path) assert context.root_directory == context_path return context
[ "def", "bobster_columnar_table_multi_batch_normal_mean_5000_stdev_1000_data_context", "(", "tmp_path_factory", ",", "monkeypatch", ",", ")", "->", "DataContext", ":", "# Re-enable GE_USAGE_STATS", "monkeypatch", ".", "delenv", "(", "\"GE_USAGE_STATS\"", ")", "project_path", ":", "str", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"taxi_data_context\"", ")", ")", "context_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "project_path", ",", "\"great_expectations\"", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "context_path", ",", "\"expectations\"", ")", ",", "exist_ok", "=", "True", ")", "data_path", ":", "str", "=", "os", ".", "path", ".", "join", "(", "context_path", ",", "\"..\"", ",", "\"data\"", ")", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "data_path", ")", ",", "exist_ok", "=", "True", ")", "shutil", ".", "copy", "(", "file_relative_path", "(", "__file__", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"integration\"", ",", "\"fixtures\"", ",", "\"yellow_trip_data_pandas_fixture\"", ",", "\"great_expectations\"", ",", "\"great_expectations.yml\"", ",", ")", ",", ")", ",", "str", "(", "os", ".", "path", ".", "join", "(", "context_path", ",", "\"great_expectations.yml\"", ")", ")", ",", ")", "base_directory", ":", "str", "=", "file_relative_path", "(", "__file__", ",", "os", ".", "path", ".", "join", "(", "\"..\"", ",", "\"test_sets\"", ",", "\"taxi_yellow_trip_data_samples\"", ",", ")", ",", ")", "file_name_list", ":", "List", "[", "str", "]", "=", "get_filesystem_one_level_directory_glob_path_list", "(", "base_directory_path", "=", "base_directory", ",", "glob_directive", "=", "\"*.csv\"", ")", "file_name_list", "=", "sorted", "(", "file_name_list", ")", "num_files", ":", "int", "=", "len", "(", "file_name_list", ")", "rnd_num_sample", ":", "np", ".", "float64", "output_file_lenths", ":", "List", "[", "int", "]", "=", "[", "round", "(", "rnd_num_sample", ")", "for", "rnd_num_sample", "in", "np", ".", "random", ".", "normal", "(", "loc", "=", "5.0e3", ",", "scale", "=", "1.0e3", ",", "size", "=", "num_files", ")", "]", "idx", ":", "int", "file_name", ":", "str", "output_file_name_length_map", ":", "Dict", "[", "str", ",", "int", "]", "=", "{", "file_name_list", "[", "idx", "]", ":", "output_file_lenths", "[", "idx", "]", "for", "idx", ",", "file_name", "in", "enumerate", "(", "file_name_list", ")", "}", "csv_source_path", ":", "str", "df", ":", "pd", ".", "DataFrame", "for", "file_name", "in", "file_name_list", ":", "csv_source_path", "=", "os", ".", "path", ".", "join", "(", "base_directory", ",", "file_name", ")", "df", "=", "pd", ".", "read_csv", "(", "filepath_or_buffer", "=", "csv_source_path", ")", "df", "=", "df", ".", "sample", "(", "n", "=", "output_file_name_length_map", "[", "file_name", "]", ",", "replace", "=", "False", ",", "random_state", "=", "1", ")", "df", ".", "to_csv", "(", "path_or_buf", "=", "os", ".", "path", ".", "join", "(", "context_path", ",", "\"..\"", ",", "\"data\"", ",", "file_name", ")", ",", "index", "=", "False", ")", "context", ":", "DataContext", "=", "DataContext", "(", "context_root_dir", "=", "context_path", ")", "assert", "context", ".", "root_directory", "==", "context_path", "return", "context" ]
[ 217, 0 ]
[ 290, 18 ]
python
en
['en', 'error', 'th']
False
multi_part_name_parameter_container
()
$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format $parameter.date_strings.yyyy_mm_dd_date_format $parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format $parameter.date_strings.mm_yyyy_dd_date_format $parameter.date_strings.tolerances.max_abs_error_time_milliseconds $parameter.date_strings.tolerances.max_num_conversion_attempts $parameter.tolerances.mostly $mean
$parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format $parameter.date_strings.yyyy_mm_dd_date_format $parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format $parameter.date_strings.mm_yyyy_dd_date_format $parameter.date_strings.tolerances.max_abs_error_time_milliseconds $parameter.date_strings.tolerances.max_num_conversion_attempts $parameter.tolerances.mostly $mean
def multi_part_name_parameter_container(): """ $parameter.date_strings.yyyy_mm_dd_hh_mm_ss_tz_date_format $parameter.date_strings.yyyy_mm_dd_date_format $parameter.date_strings.mm_yyyy_dd_hh_mm_ss_tz_date_format $parameter.date_strings.mm_yyyy_dd_date_format $parameter.date_strings.tolerances.max_abs_error_time_milliseconds $parameter.date_strings.tolerances.max_num_conversion_attempts $parameter.tolerances.mostly $mean """ root_mean_node: ParameterNode = ParameterNode( { "mean": 6.5e-1, } ) financial_tolerances_parameter_node: ParameterNode = ParameterNode( { "usd": 1.0, } ) tolerances_parameter_node: ParameterNode = ParameterNode( { "mostly": 9.1e-1, "financial": financial_tolerances_parameter_node, } ) date_strings_tolerances_parameter_node: ParameterNode = ParameterNode( { "max_abs_error_time_milliseconds": 100, "max_num_conversion_attempts": 5, } ) date_strings_parameter_node: ParameterNode = ParameterNode( { "yyyy_mm_dd_hh_mm_ss_tz_date_format": ParameterNode( { "value": "%Y-%m-%d %H:%M:%S %Z", "details": ParameterNode( { "confidence": 7.8e-1, }, ), } ), "yyyy_mm_dd_date_format": ParameterNode( { "value": "%Y-%m-%d", "details": ParameterNode( { "confidence": 7.8e-1, }, ), } ), "mm_yyyy_dd_hh_mm_ss_tz_date_format": ParameterNode( { "value": "%m-%Y-%d %H:%M:%S %Z", "details": ParameterNode( { "confidence": 7.8e-1, }, ), } ), "mm_yyyy_dd_date_format": ParameterNode( { "value": "%m-%Y-%d", "details": ParameterNode( { "confidence": 7.8e-1, }, ), } ), "tolerances": date_strings_tolerances_parameter_node, } ) parameter_multi_part_name_parameter_node: ParameterNode = ParameterNode( { "date_strings": date_strings_parameter_node, "tolerances": tolerances_parameter_node, } ) root_parameter_node: ParameterNode = ParameterNode( { "parameter": parameter_multi_part_name_parameter_node, } ) return ParameterContainer( parameter_nodes={ "parameter": root_parameter_node, "mean": root_mean_node, } )
[ "def", "multi_part_name_parameter_container", "(", ")", ":", "root_mean_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"mean\"", ":", "6.5e-1", ",", "}", ")", "financial_tolerances_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"usd\"", ":", "1.0", ",", "}", ")", "tolerances_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"mostly\"", ":", "9.1e-1", ",", "\"financial\"", ":", "financial_tolerances_parameter_node", ",", "}", ")", "date_strings_tolerances_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"max_abs_error_time_milliseconds\"", ":", "100", ",", "\"max_num_conversion_attempts\"", ":", "5", ",", "}", ")", "date_strings_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"yyyy_mm_dd_hh_mm_ss_tz_date_format\"", ":", "ParameterNode", "(", "{", "\"value\"", ":", "\"%Y-%m-%d %H:%M:%S %Z\"", ",", "\"details\"", ":", "ParameterNode", "(", "{", "\"confidence\"", ":", "7.8e-1", ",", "}", ",", ")", ",", "}", ")", ",", "\"yyyy_mm_dd_date_format\"", ":", "ParameterNode", "(", "{", "\"value\"", ":", "\"%Y-%m-%d\"", ",", "\"details\"", ":", "ParameterNode", "(", "{", "\"confidence\"", ":", "7.8e-1", ",", "}", ",", ")", ",", "}", ")", ",", "\"mm_yyyy_dd_hh_mm_ss_tz_date_format\"", ":", "ParameterNode", "(", "{", "\"value\"", ":", "\"%m-%Y-%d %H:%M:%S %Z\"", ",", "\"details\"", ":", "ParameterNode", "(", "{", "\"confidence\"", ":", "7.8e-1", ",", "}", ",", ")", ",", "}", ")", ",", "\"mm_yyyy_dd_date_format\"", ":", "ParameterNode", "(", "{", "\"value\"", ":", "\"%m-%Y-%d\"", ",", "\"details\"", ":", "ParameterNode", "(", "{", "\"confidence\"", ":", "7.8e-1", ",", "}", ",", ")", ",", "}", ")", ",", "\"tolerances\"", ":", "date_strings_tolerances_parameter_node", ",", "}", ")", "parameter_multi_part_name_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"date_strings\"", ":", "date_strings_parameter_node", ",", "\"tolerances\"", ":", "tolerances_parameter_node", ",", "}", ")", "root_parameter_node", ":", "ParameterNode", "=", "ParameterNode", "(", "{", "\"parameter\"", ":", "parameter_multi_part_name_parameter_node", ",", "}", ")", "return", "ParameterContainer", "(", "parameter_nodes", "=", "{", "\"parameter\"", ":", "root_parameter_node", ",", "\"mean\"", ":", "root_mean_node", ",", "}", ")" ]
[ 396, 0 ]
[ 490, 5 ]
python
en
['en', 'error', 'th']
False
DebuggingRegexLexer.get_tokens_unprocessed
(self, text, stack=('root',))
Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``)
Split ``text`` into (tokentype, text) pairs.
def get_tokens_unprocessed(self, text, stack=('root',)): """ Split ``text`` into (tokentype, text) pairs. ``stack`` is the inital stack (default: ``['root']``) """ tokendefs = self._tokens self.ctx = ctx = LexerContext(text, 0) ctx.stack = list(stack) statetokens = tokendefs[ctx.stack[-1]] while 1: for rexmatch, action, new_state in statetokens: self.m = m = rexmatch(text, ctx.pos, ctx.end) if m: if action is not None: if type(action) is _TokenType: yield ctx.pos, action, m.group() ctx.pos = m.end() else: if not isinstance(self, ExtendedRegexLexer): for item in action(self, m): yield item ctx.pos = m.end() else: for item in action(self, m, ctx): yield item if not new_state: # altered the state stack? statetokens = tokendefs[ctx.stack[-1]] if new_state is not None: # state transition if isinstance(new_state, tuple): for state in new_state: if state == '#pop': ctx.stack.pop() elif state == '#push': ctx.stack.append(ctx.stack[-1]) else: ctx.stack.append(state) elif isinstance(new_state, int): # pop del ctx.stack[new_state:] elif new_state == '#push': ctx.stack.append(ctx.stack[-1]) else: assert False, 'wrong state def: %r' % new_state statetokens = tokendefs[ctx.stack[-1]] break else: try: if ctx.pos >= ctx.end: break if text[ctx.pos] == '\n': # at EOL, reset state to 'root' ctx.stack = ['root'] statetokens = tokendefs['root'] yield ctx.pos, Text, u'\n' ctx.pos += 1 continue yield ctx.pos, Error, text[ctx.pos] ctx.pos += 1 except IndexError: break
[ "def", "get_tokens_unprocessed", "(", "self", ",", "text", ",", "stack", "=", "(", "'root'", ",", ")", ")", ":", "tokendefs", "=", "self", ".", "_tokens", "self", ".", "ctx", "=", "ctx", "=", "LexerContext", "(", "text", ",", "0", ")", "ctx", ".", "stack", "=", "list", "(", "stack", ")", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "while", "1", ":", "for", "rexmatch", ",", "action", ",", "new_state", "in", "statetokens", ":", "self", ".", "m", "=", "m", "=", "rexmatch", "(", "text", ",", "ctx", ".", "pos", ",", "ctx", ".", "end", ")", "if", "m", ":", "if", "action", "is", "not", "None", ":", "if", "type", "(", "action", ")", "is", "_TokenType", ":", "yield", "ctx", ".", "pos", ",", "action", ",", "m", ".", "group", "(", ")", "ctx", ".", "pos", "=", "m", ".", "end", "(", ")", "else", ":", "if", "not", "isinstance", "(", "self", ",", "ExtendedRegexLexer", ")", ":", "for", "item", "in", "action", "(", "self", ",", "m", ")", ":", "yield", "item", "ctx", ".", "pos", "=", "m", ".", "end", "(", ")", "else", ":", "for", "item", "in", "action", "(", "self", ",", "m", ",", "ctx", ")", ":", "yield", "item", "if", "not", "new_state", ":", "# altered the state stack?", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "if", "new_state", "is", "not", "None", ":", "# state transition", "if", "isinstance", "(", "new_state", ",", "tuple", ")", ":", "for", "state", "in", "new_state", ":", "if", "state", "==", "'#pop'", ":", "ctx", ".", "stack", ".", "pop", "(", ")", "elif", "state", "==", "'#push'", ":", "ctx", ".", "stack", ".", "append", "(", "ctx", ".", "stack", "[", "-", "1", "]", ")", "else", ":", "ctx", ".", "stack", ".", "append", "(", "state", ")", "elif", "isinstance", "(", "new_state", ",", "int", ")", ":", "# pop", "del", "ctx", ".", "stack", "[", "new_state", ":", "]", "elif", "new_state", "==", "'#push'", ":", "ctx", ".", "stack", ".", "append", "(", "ctx", ".", "stack", "[", "-", "1", "]", ")", "else", ":", "assert", "False", ",", "'wrong state def: %r'", "%", "new_state", "statetokens", "=", "tokendefs", "[", "ctx", ".", "stack", "[", "-", "1", "]", "]", "break", "else", ":", "try", ":", "if", "ctx", ".", "pos", ">=", "ctx", ".", "end", ":", "break", "if", "text", "[", "ctx", ".", "pos", "]", "==", "'\\n'", ":", "# at EOL, reset state to 'root'", "ctx", ".", "stack", "=", "[", "'root'", "]", "statetokens", "=", "tokendefs", "[", "'root'", "]", "yield", "ctx", ".", "pos", ",", "Text", ",", "u'\\n'", "ctx", ".", "pos", "+=", "1", "continue", "yield", "ctx", ".", "pos", ",", "Error", ",", "text", "[", "ctx", ".", "pos", "]", "ctx", ".", "pos", "+=", "1", "except", "IndexError", ":", "break" ]
[ 36, 4 ]
[ 98, 25 ]
python
en
['en', 'error', 'th']
False
open_signal_receiver
(*signals)
A context manager for catching signals. Entering this context manager starts listening for the given signals and returns an async iterator; exiting the context manager stops listening. The async iterator blocks until a signal arrives, and then yields it. Note that if you leave the ``with`` block while the iterator has unextracted signals still pending inside it, then they will be re-delivered using Python's regular signal handling logic. This avoids a race condition when signals arrives just before we exit the ``with`` block. Args: signals: the signals to listen for. Raises: TypeError: if no signals were provided. RuntimeError: if you try to use this anywhere except Python's main thread. (This is a Python limitation.) Example: A common convention for Unix daemons is that they should reload their configuration when they receive a ``SIGHUP``. Here's a sketch of what that might look like using :func:`open_signal_receiver`:: with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter: async for signum in signal_aiter: assert signum == signal.SIGHUP reload_configuration()
A context manager for catching signals.
def open_signal_receiver(*signals): """A context manager for catching signals. Entering this context manager starts listening for the given signals and returns an async iterator; exiting the context manager stops listening. The async iterator blocks until a signal arrives, and then yields it. Note that if you leave the ``with`` block while the iterator has unextracted signals still pending inside it, then they will be re-delivered using Python's regular signal handling logic. This avoids a race condition when signals arrives just before we exit the ``with`` block. Args: signals: the signals to listen for. Raises: TypeError: if no signals were provided. RuntimeError: if you try to use this anywhere except Python's main thread. (This is a Python limitation.) Example: A common convention for Unix daemons is that they should reload their configuration when they receive a ``SIGHUP``. Here's a sketch of what that might look like using :func:`open_signal_receiver`:: with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter: async for signum in signal_aiter: assert signum == signal.SIGHUP reload_configuration() """ if not signals: raise TypeError("No signals were provided") if not is_main_thread(): raise RuntimeError( "Sorry, open_signal_receiver is only possible when running in " "Python interpreter's main thread" ) token = trio.lowlevel.current_trio_token() queue = SignalReceiver() def handler(signum, _): token.run_sync_soon(queue._add, signum, idempotent=True) try: with _signal_handler(signals, handler): yield queue finally: queue._redeliver_remaining()
[ "def", "open_signal_receiver", "(", "*", "signals", ")", ":", "if", "not", "signals", ":", "raise", "TypeError", "(", "\"No signals were provided\"", ")", "if", "not", "is_main_thread", "(", ")", ":", "raise", "RuntimeError", "(", "\"Sorry, open_signal_receiver is only possible when running in \"", "\"Python interpreter's main thread\"", ")", "token", "=", "trio", ".", "lowlevel", ".", "current_trio_token", "(", ")", "queue", "=", "SignalReceiver", "(", ")", "def", "handler", "(", "signum", ",", "_", ")", ":", "token", ".", "run_sync_soon", "(", "queue", ".", "_add", ",", "signum", ",", "idempotent", "=", "True", ")", "try", ":", "with", "_signal_handler", "(", "signals", ",", "handler", ")", ":", "yield", "queue", "finally", ":", "queue", ".", "_redeliver_remaining", "(", ")" ]
[ 113, 0 ]
[ 166, 36 ]
python
en
['en', 'en', 'en']
True
test_BasicDatasetProfiler_null_column
()
The profiler should determine that null columns are of null cardinality and of null type and not to generate expectations specific to types and cardinality categories. We verify this by running the basic profiler on a Pandas dataset with an empty column and asserting the number of successful results for the empty columns.
The profiler should determine that null columns are of null cardinality and of null type and not to generate expectations specific to types and cardinality categories.
def test_BasicDatasetProfiler_null_column(): """ The profiler should determine that null columns are of null cardinality and of null type and not to generate expectations specific to types and cardinality categories. We verify this by running the basic profiler on a Pandas dataset with an empty column and asserting the number of successful results for the empty columns. """ toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]}) assert ( len(toy_dataset.get_expectation_suite(suppress_warnings=True).expectations) == 0 ) expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset) # TODO: assert set - specific expectations assert ( len( [ result for result in evr_config["results"] if result.expectation_config["kwargs"].get("column") == "y" and result.success ] ) == 4 ) assert len( [ result for result in evr_config["results"] if result.expectation_config["kwargs"].get("column") == "y" and result.success ] ) < len( [ result for result in evr_config["results"] if result.expectation_config["kwargs"].get("column") == "x" and result.success ] )
[ "def", "test_BasicDatasetProfiler_null_column", "(", ")", ":", "toy_dataset", "=", "PandasDataset", "(", "{", "\"x\"", ":", "[", "1", ",", "2", ",", "3", "]", ",", "\"y\"", ":", "[", "None", ",", "None", ",", "None", "]", "}", ")", "assert", "(", "len", "(", "toy_dataset", ".", "get_expectation_suite", "(", "suppress_warnings", "=", "True", ")", ".", "expectations", ")", "==", "0", ")", "expectations_config", ",", "evr_config", "=", "BasicDatasetProfiler", ".", "profile", "(", "toy_dataset", ")", "# TODO: assert set - specific expectations", "assert", "(", "len", "(", "[", "result", "for", "result", "in", "evr_config", "[", "\"results\"", "]", "if", "result", ".", "expectation_config", "[", "\"kwargs\"", "]", ".", "get", "(", "\"column\"", ")", "==", "\"y\"", "and", "result", ".", "success", "]", ")", "==", "4", ")", "assert", "len", "(", "[", "result", "for", "result", "in", "evr_config", "[", "\"results\"", "]", "if", "result", ".", "expectation_config", "[", "\"kwargs\"", "]", ".", "get", "(", "\"column\"", ")", "==", "\"y\"", "and", "result", ".", "success", "]", ")", "<", "len", "(", "[", "result", "for", "result", "in", "evr_config", "[", "\"results\"", "]", "if", "result", ".", "expectation_config", "[", "\"kwargs\"", "]", ".", "get", "(", "\"column\"", ")", "==", "\"x\"", "and", "result", ".", "success", "]", ")" ]
[ 85, 0 ]
[ 127, 5 ]
python
en
['en', 'error', 'th']
False
test_BasicDatasetProfiler_partially_null_column
(dataset)
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture. "nulls" is the partially null column in the fixture dataset
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
def test_BasicDatasetProfiler_partially_null_column(dataset): """ Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture. "nulls" is the partially null column in the fixture dataset """ expectations_config, evr_config = BasicDatasetProfiler.profile(dataset) assert { "expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique", } == { expectation.expectation_type for expectation in expectations_config.expectations if expectation.kwargs.get("column") == "nulls" }
[ "def", "test_BasicDatasetProfiler_partially_null_column", "(", "dataset", ")", ":", "expectations_config", ",", "evr_config", "=", "BasicDatasetProfiler", ".", "profile", "(", "dataset", ")", "assert", "{", "\"expect_column_to_exist\"", ",", "\"expect_column_values_to_be_in_type_list\"", ",", "\"expect_column_unique_value_count_to_be_between\"", ",", "\"expect_column_proportion_of_unique_values_to_be_between\"", ",", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_in_set\"", ",", "\"expect_column_values_to_be_unique\"", ",", "}", "==", "{", "expectation", ".", "expectation_type", "for", "expectation", "in", "expectations_config", ".", "expectations", "if", "expectation", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "==", "\"nulls\"", "}" ]
[ 130, 0 ]
[ 152, 5 ]
python
en
['en', 'error', 'th']
False
test_BasicDatasetProfiler_non_numeric_low_cardinality
(non_numeric_low_card_dataset)
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset): """ Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture. """ expectations_config, evr_config = BasicDatasetProfiler.profile( non_numeric_low_card_dataset ) assert { "expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_distinct_values_to_be_in_set", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex", } == { expectation.expectation_type for expectation in expectations_config.expectations if expectation.kwargs.get("column") == "lowcardnonnum" }
[ "def", "test_BasicDatasetProfiler_non_numeric_low_cardinality", "(", "non_numeric_low_card_dataset", ")", ":", "expectations_config", ",", "evr_config", "=", "BasicDatasetProfiler", ".", "profile", "(", "non_numeric_low_card_dataset", ")", "assert", "{", "\"expect_column_to_exist\"", ",", "\"expect_column_values_to_be_in_type_list\"", ",", "\"expect_column_unique_value_count_to_be_between\"", ",", "\"expect_column_distinct_values_to_be_in_set\"", ",", "\"expect_column_proportion_of_unique_values_to_be_between\"", ",", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_in_set\"", ",", "\"expect_column_values_to_not_match_regex\"", ",", "}", "==", "{", "expectation", ".", "expectation_type", "for", "expectation", "in", "expectations_config", ".", "expectations", "if", "expectation", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "==", "\"lowcardnonnum\"", "}" ]
[ 155, 0 ]
[ 179, 5 ]
python
en
['en', 'error', 'th']
False
test_BasicDatasetProfiler_non_numeric_high_cardinality
( non_numeric_high_card_dataset, )
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
def test_BasicDatasetProfiler_non_numeric_high_cardinality( non_numeric_high_card_dataset, ): """ Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality non numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture. """ expectations_config, evr_config = BasicDatasetProfiler.profile( non_numeric_high_card_dataset ) assert { "expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex", } == { expectation.expectation_type for expectation in expectations_config.expectations if expectation.kwargs.get("column") == "highcardnonnum" }
[ "def", "test_BasicDatasetProfiler_non_numeric_high_cardinality", "(", "non_numeric_high_card_dataset", ",", ")", ":", "expectations_config", ",", "evr_config", "=", "BasicDatasetProfiler", ".", "profile", "(", "non_numeric_high_card_dataset", ")", "assert", "{", "\"expect_column_to_exist\"", ",", "\"expect_column_values_to_be_in_type_list\"", ",", "\"expect_column_unique_value_count_to_be_between\"", ",", "\"expect_column_proportion_of_unique_values_to_be_between\"", ",", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_in_set\"", ",", "\"expect_column_values_to_not_match_regex\"", ",", "}", "==", "{", "expectation", ".", "expectation_type", "for", "expectation", "in", "expectations_config", ".", "expectations", "if", "expectation", ".", "kwargs", ".", "get", "(", "\"column\"", ")", "==", "\"highcardnonnum\"", "}" ]
[ 182, 0 ]
[ 207, 5 ]
python
en
['en', 'error', 'th']
False
test_BasicDatasetProfiler_numeric_high_cardinality
(numeric_high_card_dataset)
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture.
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset): """ Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality numeric column. The test is executed against all the backends (Pandas, Spark, etc.), because it uses the fixture. """ expectations_config, evr_config = BasicDatasetProfiler.profile( numeric_high_card_dataset ) assert { "expect_column_to_exist", "expect_table_row_count_to_be_between", "expect_table_columns_to_match_ordered_list", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique", } == { expectation.expectation_type for expectation in expectations_config.expectations }
[ "def", "test_BasicDatasetProfiler_numeric_high_cardinality", "(", "numeric_high_card_dataset", ")", ":", "expectations_config", ",", "evr_config", "=", "BasicDatasetProfiler", ".", "profile", "(", "numeric_high_card_dataset", ")", "assert", "{", "\"expect_column_to_exist\"", ",", "\"expect_table_row_count_to_be_between\"", ",", "\"expect_table_columns_to_match_ordered_list\"", ",", "\"expect_column_values_to_be_in_type_list\"", ",", "\"expect_column_unique_value_count_to_be_between\"", ",", "\"expect_column_proportion_of_unique_values_to_be_between\"", ",", "\"expect_column_values_to_not_be_null\"", ",", "\"expect_column_values_to_be_in_set\"", ",", "\"expect_column_values_to_be_unique\"", ",", "}", "==", "{", "expectation", ".", "expectation_type", "for", "expectation", "in", "expectations_config", ".", "expectations", "}" ]
[ 210, 0 ]
[ 233, 5 ]
python
en
['en', 'error', 'th']
False
test_context_profiler
(filesystem_csv_data_context)
This just validates that it's possible to profile using the datasource hook, and have validation results available in the DataContext
This just validates that it's possible to profile using the datasource hook, and have validation results available in the DataContext
def test_context_profiler(filesystem_csv_data_context): """ This just validates that it's possible to profile using the datasource hook, and have validation results available in the DataContext """ context = filesystem_csv_data_context assert isinstance(context.datasources["rad_datasource"], PandasDatasource) assert context.list_expectation_suites() == [] context.profile_datasource("rad_datasource", profiler=BasicDatasetProfiler) assert len(context.list_expectation_suites()) == 1 expected_suite_name = "rad_datasource.subdir_reader.f1.BasicDatasetProfiler" profiled_expectations = context.get_expectation_suite(expected_suite_name) for exp in profiled_expectations.expectations: assert "BasicDatasetProfiler" in exp.meta assert "confidence" in exp.meta["BasicDatasetProfiler"] assert profiled_expectations.expectation_suite_name == expected_suite_name assert "batch_kwargs" in profiled_expectations.meta["BasicDatasetProfiler"] assert len(profiled_expectations.expectations) == 8
[ "def", "test_context_profiler", "(", "filesystem_csv_data_context", ")", ":", "context", "=", "filesystem_csv_data_context", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"rad_datasource\"", "]", ",", "PandasDatasource", ")", "assert", "context", ".", "list_expectation_suites", "(", ")", "==", "[", "]", "context", ".", "profile_datasource", "(", "\"rad_datasource\"", ",", "profiler", "=", "BasicDatasetProfiler", ")", "assert", "len", "(", "context", ".", "list_expectation_suites", "(", ")", ")", "==", "1", "expected_suite_name", "=", "\"rad_datasource.subdir_reader.f1.BasicDatasetProfiler\"", "profiled_expectations", "=", "context", ".", "get_expectation_suite", "(", "expected_suite_name", ")", "for", "exp", "in", "profiled_expectations", ".", "expectations", ":", "assert", "\"BasicDatasetProfiler\"", "in", "exp", ".", "meta", "assert", "\"confidence\"", "in", "exp", ".", "meta", "[", "\"BasicDatasetProfiler\"", "]", "assert", "profiled_expectations", ".", "expectation_suite_name", "==", "expected_suite_name", "assert", "\"batch_kwargs\"", "in", "profiled_expectations", ".", "meta", "[", "\"BasicDatasetProfiler\"", "]", "assert", "len", "(", "profiled_expectations", ".", "expectations", ")", "==", "8" ]
[ 276, 0 ]
[ 298, 55 ]
python
en
['en', 'error', 'th']
False
test_context_profiler_with_data_asset_name
(filesystem_csv_data_context)
If a valid data asset name is passed to the profiling method in the data_assets argument, the profiling method profiles only this data asset
If a valid data asset name is passed to the profiling method in the data_assets argument, the profiling method profiles only this data asset
def test_context_profiler_with_data_asset_name(filesystem_csv_data_context): """ If a valid data asset name is passed to the profiling method in the data_assets argument, the profiling method profiles only this data asset """ context = filesystem_csv_data_context assert isinstance(context.datasources["rad_datasource"], PandasDatasource) assert context.list_expectation_suites() == [] profiling_result = context.profile_datasource( "rad_datasource", data_assets=["f1"], profiler=BasicDatasetProfiler ) assert profiling_result["success"] == True assert len(profiling_result["results"]) == 1 assert ( profiling_result["results"][0][0].expectation_suite_name == "rad_datasource.subdir_reader.f1.BasicDatasetProfiler" )
[ "def", "test_context_profiler_with_data_asset_name", "(", "filesystem_csv_data_context", ")", ":", "context", "=", "filesystem_csv_data_context", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"rad_datasource\"", "]", ",", "PandasDatasource", ")", "assert", "context", ".", "list_expectation_suites", "(", ")", "==", "[", "]", "profiling_result", "=", "context", ".", "profile_datasource", "(", "\"rad_datasource\"", ",", "data_assets", "=", "[", "\"f1\"", "]", ",", "profiler", "=", "BasicDatasetProfiler", ")", "assert", "profiling_result", "[", "\"success\"", "]", "==", "True", "assert", "len", "(", "profiling_result", "[", "\"results\"", "]", ")", "==", "1", "assert", "(", "profiling_result", "[", "\"results\"", "]", "[", "0", "]", "[", "0", "]", ".", "expectation_suite_name", "==", "\"rad_datasource.subdir_reader.f1.BasicDatasetProfiler\"", ")" ]
[ 301, 0 ]
[ 319, 5 ]
python
en
['en', 'error', 'th']
False
test_context_profiler_with_nonexisting_data_asset_name
(filesystem_csv_data_context)
If a non-existing data asset name is passed to the profiling method in the data_assets argument, the profiling method must return an error code in the result and the names of the unrecognized assets
If a non-existing data asset name is passed to the profiling method in the data_assets argument, the profiling method must return an error code in the result and the names of the unrecognized assets
def test_context_profiler_with_nonexisting_data_asset_name(filesystem_csv_data_context): """ If a non-existing data asset name is passed to the profiling method in the data_assets argument, the profiling method must return an error code in the result and the names of the unrecognized assets """ context = filesystem_csv_data_context assert isinstance(context.datasources["rad_datasource"], PandasDatasource) assert context.list_expectation_suites() == [] profiling_result = context.profile_datasource( "rad_datasource", data_assets=["this_asset_doesnot_exist"], profiler=BasicDatasetProfiler, ) assert profiling_result == { "success": False, "error": { "code": 3, "not_found_data_assets": ["this_asset_doesnot_exist"], "data_assets": [("f1", "file")], }, }
[ "def", "test_context_profiler_with_nonexisting_data_asset_name", "(", "filesystem_csv_data_context", ")", ":", "context", "=", "filesystem_csv_data_context", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"rad_datasource\"", "]", ",", "PandasDatasource", ")", "assert", "context", ".", "list_expectation_suites", "(", ")", "==", "[", "]", "profiling_result", "=", "context", ".", "profile_datasource", "(", "\"rad_datasource\"", ",", "data_assets", "=", "[", "\"this_asset_doesnot_exist\"", "]", ",", "profiler", "=", "BasicDatasetProfiler", ",", ")", "assert", "profiling_result", "==", "{", "\"success\"", ":", "False", ",", "\"error\"", ":", "{", "\"code\"", ":", "3", ",", "\"not_found_data_assets\"", ":", "[", "\"this_asset_doesnot_exist\"", "]", ",", "\"data_assets\"", ":", "[", "(", "\"f1\"", ",", "\"file\"", ")", "]", ",", "}", ",", "}" ]
[ 322, 0 ]
[ 345, 5 ]
python
en
['en', 'error', 'th']
False
test_context_profiler_with_non_existing_generator
(filesystem_csv_data_context)
If a non-existing generator name is passed to the profiling method in the generator_name argument, the profiling method must raise an exception.
If a non-existing generator name is passed to the profiling method in the generator_name argument, the profiling method must raise an exception.
def test_context_profiler_with_non_existing_generator(filesystem_csv_data_context): """ If a non-existing generator name is passed to the profiling method in the generator_name argument, the profiling method must raise an exception. """ context = filesystem_csv_data_context assert isinstance(context.datasources["rad_datasource"], PandasDatasource) assert context.list_expectation_suites() == [] with pytest.raises(ge_exceptions.ProfilerError): profiling_result = context.profile_datasource( "rad_datasource", data_assets=["this_asset_doesnot_exist"], profiler=BasicDatasetProfiler, batch_kwargs_generator_name="this_gen_does_not_exist", )
[ "def", "test_context_profiler_with_non_existing_generator", "(", "filesystem_csv_data_context", ")", ":", "context", "=", "filesystem_csv_data_context", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"rad_datasource\"", "]", ",", "PandasDatasource", ")", "assert", "context", ".", "list_expectation_suites", "(", ")", "==", "[", "]", "with", "pytest", ".", "raises", "(", "ge_exceptions", ".", "ProfilerError", ")", ":", "profiling_result", "=", "context", ".", "profile_datasource", "(", "\"rad_datasource\"", ",", "data_assets", "=", "[", "\"this_asset_doesnot_exist\"", "]", ",", "profiler", "=", "BasicDatasetProfiler", ",", "batch_kwargs_generator_name", "=", "\"this_gen_does_not_exist\"", ",", ")" ]
[ 348, 0 ]
[ 363, 9 ]
python
en
['en', 'error', 'th']
False
test_context_profiler_without_generator_name_arg_on_datasource_with_multiple_generators
( filesystem_csv_data_context, filesystem_csv_2 )
If a no generator_name is passed to the profiling method and the datasource has more than one generators configured, the profiling method must return an error code in the result
If a no generator_name is passed to the profiling method and the datasource has more than one generators configured, the profiling method must return an error code in the result
def test_context_profiler_without_generator_name_arg_on_datasource_with_multiple_generators( filesystem_csv_data_context, filesystem_csv_2 ): """ If a no generator_name is passed to the profiling method and the datasource has more than one generators configured, the profiling method must return an error code in the result """ context = filesystem_csv_data_context context.add_batch_kwargs_generator( "rad_datasource", "second_generator", "SubdirReaderBatchKwargsGenerator", **{ "base_directory": str(filesystem_csv_2), } ) assert isinstance(context.datasources["rad_datasource"], PandasDatasource) profiling_result = context.profile_datasource( "rad_datasource", data_assets=["this_asset_doesnot_exist"], profiler=BasicDatasetProfiler, ) assert profiling_result == {"success": False, "error": {"code": 5}}
[ "def", "test_context_profiler_without_generator_name_arg_on_datasource_with_multiple_generators", "(", "filesystem_csv_data_context", ",", "filesystem_csv_2", ")", ":", "context", "=", "filesystem_csv_data_context", "context", ".", "add_batch_kwargs_generator", "(", "\"rad_datasource\"", ",", "\"second_generator\"", ",", "\"SubdirReaderBatchKwargsGenerator\"", ",", "*", "*", "{", "\"base_directory\"", ":", "str", "(", "filesystem_csv_2", ")", ",", "}", ")", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"rad_datasource\"", "]", ",", "PandasDatasource", ")", "profiling_result", "=", "context", ".", "profile_datasource", "(", "\"rad_datasource\"", ",", "data_assets", "=", "[", "\"this_asset_doesnot_exist\"", "]", ",", "profiler", "=", "BasicDatasetProfiler", ",", ")", "assert", "profiling_result", "==", "{", "\"success\"", ":", "False", ",", "\"error\"", ":", "{", "\"code\"", ":", "5", "}", "}" ]
[ 366, 0 ]
[ 390, 71 ]
python
en
['en', 'error', 'th']
False
test_context_profiler_without_generator_name_arg_on_datasource_with_no_generators
( filesystem_csv_data_context, )
If a no generator_name is passed to the profiling method and the datasource has no generators configured, the profiling method must return an error code in the result
If a no generator_name is passed to the profiling method and the datasource has no generators configured, the profiling method must return an error code in the result
def test_context_profiler_without_generator_name_arg_on_datasource_with_no_generators( filesystem_csv_data_context, ): """ If a no generator_name is passed to the profiling method and the datasource has no generators configured, the profiling method must return an error code in the result """ context = filesystem_csv_data_context context.add_datasource( "datasource_without_generators", module_name="great_expectations.datasource", class_name="PandasDatasource", ) assert isinstance( context.datasources["datasource_without_generators"], PandasDatasource ) profiling_result = context.profile_datasource( "datasource_without_generators", profiler=BasicDatasetProfiler ) assert profiling_result == {"success": False, "error": {"code": 4}}
[ "def", "test_context_profiler_without_generator_name_arg_on_datasource_with_no_generators", "(", "filesystem_csv_data_context", ",", ")", ":", "context", "=", "filesystem_csv_data_context", "context", ".", "add_datasource", "(", "\"datasource_without_generators\"", ",", "module_name", "=", "\"great_expectations.datasource\"", ",", "class_name", "=", "\"PandasDatasource\"", ",", ")", "assert", "isinstance", "(", "context", ".", "datasources", "[", "\"datasource_without_generators\"", "]", ",", "PandasDatasource", ")", "profiling_result", "=", "context", ".", "profile_datasource", "(", "\"datasource_without_generators\"", ",", "profiler", "=", "BasicDatasetProfiler", ")", "assert", "profiling_result", "==", "{", "\"success\"", ":", "False", ",", "\"error\"", ":", "{", "\"code\"", ":", "4", "}", "}" ]
[ 393, 0 ]
[ 413, 71 ]
python
en
['en', 'error', 'th']
False
one_sided
(alpha, p, treatment)
One sided confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0)
One sided confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf
def one_sided(alpha, p, treatment): """One sided confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) """ assert p.shape[0] == treatment.shape[0] adj = alpha * (1 - p) * treatment - alpha * p * (1 - treatment) return adj
[ "def", "one_sided", "(", "alpha", ",", "p", ",", "treatment", ")", ":", "assert", "p", ".", "shape", "[", "0", "]", "==", "treatment", ".", "shape", "[", "0", "]", "adj", "=", "alpha", "*", "(", "1", "-", "p", ")", "*", "treatment", "-", "alpha", "*", "p", "*", "(", "1", "-", "treatment", ")", "return", "adj" ]
[ 10, 0 ]
[ 23, 14 ]
python
en
['en', 'en', 'en']
True
alignment
(alpha, p, treatment)
Alignment confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0)
Alignment confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf
def alignment(alpha, p, treatment): """Alignment confounding function. Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) """ assert p.shape[0] == treatment.shape[0] adj = alpha * (1 - p) * treatment + alpha * p * (1 - treatment) return adj
[ "def", "alignment", "(", "alpha", ",", "p", ",", "treatment", ")", ":", "assert", "p", ".", "shape", "[", "0", "]", "==", "treatment", ".", "shape", "[", "0", "]", "adj", "=", "alpha", "*", "(", "1", "-", "p", ")", "*", "treatment", "+", "alpha", "*", "p", "*", "(", "1", "-", "treatment", ")", "return", "adj" ]
[ 26, 0 ]
[ 40, 14 ]
python
en
['en', 'en', 'en']
True
one_sided_att
(alpha, p, treatment)
One sided confounding function for the average effect of the treatment among the treated units (ATT) Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0)
One sided confounding function for the average effect of the treatment among the treated units (ATT)
def one_sided_att(alpha, p, treatment): """One sided confounding function for the average effect of the treatment among the treated units (ATT) Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) """ assert p.shape[0] == treatment.shape[0] adj = alpha * (1 - treatment) return adj
[ "def", "one_sided_att", "(", "alpha", ",", "p", ",", "treatment", ")", ":", "assert", "p", ".", "shape", "[", "0", "]", "==", "treatment", ".", "shape", "[", "0", "]", "adj", "=", "alpha", "*", "(", "1", "-", "treatment", ")", "return", "adj" ]
[ 43, 0 ]
[ 57, 14 ]
python
en
['en', 'en', 'en']
True
alignment_att
(alpha, p, treatment)
Alignment confounding function for the average effect of the treatment among the treated units (ATT) Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0)
Alignment confounding function for the average effect of the treatment among the treated units (ATT)
def alignment_att(alpha, p, treatment): """Alignment confounding function for the average effect of the treatment among the treated units (ATT) Reference: Blackwell, Matthew. "A selection bias approach to sensitivity analysis for causal effects." Political Analysis 22.2 (2014): 169-182. https://www.mattblackwell.org/files/papers/causalsens.pdf Args: alpha (np.array): a confounding values vector p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) """ assert p.shape[0] == treatment.shape[0] adj = alpha * (1 - treatment) return adj
[ "def", "alignment_att", "(", "alpha", ",", "p", ",", "treatment", ")", ":", "assert", "p", ".", "shape", "[", "0", "]", "==", "treatment", ".", "shape", "[", "0", "]", "adj", "=", "alpha", "*", "(", "1", "-", "treatment", ")", "return", "adj" ]
[ 60, 0 ]
[ 74, 14 ]
python
en
['en', 'en', 'en']
True
Sensitivity.__init__
(self, df, inference_features, p_col, treatment_col, outcome_col, learner, *args, **kwargs)
Initialize. Args: df (pd.DataFrame): input data frame inferenece_features (list of str): a list of columns that used in learner for inference p_col (str): column name of propensity score treatment_col (str): column name of whether in treatment of control outcome_col (str): column name of outcome learner (model): a model to estimate outcomes and treatment effects
Initialize.
def __init__(self, df, inference_features, p_col, treatment_col, outcome_col, learner, *args, **kwargs): """Initialize. Args: df (pd.DataFrame): input data frame inferenece_features (list of str): a list of columns that used in learner for inference p_col (str): column name of propensity score treatment_col (str): column name of whether in treatment of control outcome_col (str): column name of outcome learner (model): a model to estimate outcomes and treatment effects """ self.df = df self.inference_features = inference_features self.p_col = p_col self.treatment_col = treatment_col self.outcome_col = outcome_col self.learner = learner
[ "def", "__init__", "(", "self", ",", "df", ",", "inference_features", ",", "p_col", ",", "treatment_col", ",", "outcome_col", ",", "learner", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "df", "=", "df", "self", ".", "inference_features", "=", "inference_features", "self", ".", "p_col", "=", "p_col", "self", ".", "treatment_col", "=", "treatment_col", "self", ".", "outcome_col", "=", "outcome_col", "self", ".", "learner", "=", "learner" ]
[ 84, 4 ]
[ 102, 30 ]
python
en
['en', 'en', 'it']
False
Sensitivity.get_prediction
(self, X, p, treatment, y)
Return the treatment effects prediction. Args: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector Returns: (numpy.ndarray): Predictions of treatment effects
Return the treatment effects prediction.
def get_prediction(self, X, p, treatment, y): """Return the treatment effects prediction. Args: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector Returns: (numpy.ndarray): Predictions of treatment effects """ learner = self.learner try: preds = learner.fit_predict(X=X, p=p, treatment=treatment, y=y).flatten() except TypeError: preds = learner.fit_predict(X=X, treatment=treatment, y=y).flatten() return preds
[ "def", "get_prediction", "(", "self", ",", "X", ",", "p", ",", "treatment", ",", "y", ")", ":", "learner", "=", "self", ".", "learner", "try", ":", "preds", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "p", "=", "p", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", ".", "flatten", "(", ")", "except", "TypeError", ":", "preds", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", ".", "flatten", "(", ")", "return", "preds" ]
[ 104, 4 ]
[ 121, 20 ]
python
en
['en', 'en', 'en']
True
Sensitivity.get_ate_ci
(self, X, p, treatment, y)
Return the confidence intervals for treatment effects prediction. Args: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector Returns: (numpy.ndarray): Mean and confidence interval (LB, UB) of the ATE estimate.
Return the confidence intervals for treatment effects prediction.
def get_ate_ci(self, X, p, treatment, y): """Return the confidence intervals for treatment effects prediction. Args: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector Returns: (numpy.ndarray): Mean and confidence interval (LB, UB) of the ATE estimate. """ learner = self.learner from ..inference.meta.tlearner import BaseTLearner if isinstance(learner, BaseTLearner): ate, ate_lower, ate_upper = learner.estimate_ate(X=X, treatment=treatment, y=y) else: try: ate, ate_lower, ate_upper = learner.estimate_ate(X=X, p=p, treatment=treatment, y=y) except TypeError: ate, ate_lower, ate_upper = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True) return ate[0], ate_lower[0], ate_upper[0]
[ "def", "get_ate_ci", "(", "self", ",", "X", ",", "p", ",", "treatment", ",", "y", ")", ":", "learner", "=", "self", ".", "learner", "from", ".", ".", "inference", ".", "meta", ".", "tlearner", "import", "BaseTLearner", "if", "isinstance", "(", "learner", ",", "BaseTLearner", ")", ":", "ate", ",", "ate_lower", ",", "ate_upper", "=", "learner", ".", "estimate_ate", "(", "X", "=", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", "else", ":", "try", ":", "ate", ",", "ate_lower", ",", "ate_upper", "=", "learner", ".", "estimate_ate", "(", "X", "=", "X", ",", "p", "=", "p", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", "except", "TypeError", ":", "ate", ",", "ate_lower", ",", "ate_upper", "=", "learner", ".", "estimate_ate", "(", "X", "=", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ",", "return_ci", "=", "True", ")", "return", "ate", "[", "0", "]", ",", "ate_lower", "[", "0", "]", ",", "ate_upper", "[", "0", "]" ]
[ 123, 4 ]
[ 144, 49 ]
python
en
['en', 'en', 'en']
True
Sensitivity.get_class_object
(method_name, *args, **kwargs)
Return class object based on input method Args: method_name (list of str): a list of sensitivity analysis method Returns: (class): Sensitivy Class
Return class object based on input method Args: method_name (list of str): a list of sensitivity analysis method Returns: (class): Sensitivy Class
def get_class_object(method_name, *args, **kwargs): """Return class object based on input method Args: method_name (list of str): a list of sensitivity analysis method Returns: (class): Sensitivy Class """ method_list = ['Placebo Treatment', 'Random Cause', 'Subset Data', 'Random Replace', 'Selection Bias'] class_name = 'Sensitivity' + method_name.replace(' ', '') try: getattr(import_module('causalml.metrics.sensitivity'), class_name) return getattr(import_module('causalml.metrics.sensitivity'), class_name) except AttributeError: raise AttributeError('{} is not an existing method for sensitiviy analysis.'.format(method_name) + ' Select one of {}'.format(method_list))
[ "def", "get_class_object", "(", "method_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "method_list", "=", "[", "'Placebo Treatment'", ",", "'Random Cause'", ",", "'Subset Data'", ",", "'Random Replace'", ",", "'Selection Bias'", "]", "class_name", "=", "'Sensitivity'", "+", "method_name", ".", "replace", "(", "' '", ",", "''", ")", "try", ":", "getattr", "(", "import_module", "(", "'causalml.metrics.sensitivity'", ")", ",", "class_name", ")", "return", "getattr", "(", "import_module", "(", "'causalml.metrics.sensitivity'", ")", ",", "class_name", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "'{} is not an existing method for sensitiviy analysis.'", ".", "format", "(", "method_name", ")", "+", "' Select one of {}'", ".", "format", "(", "method_list", ")", ")" ]
[ 147, 4 ]
[ 163, 70 ]
python
en
['en', 'en', 'en']
True
Sensitivity.sensitivity_analysis
(self, methods, sample_size=None, confound='one_sided', alpha_range=None)
Return the sensitivity data by different method Args: method (list of str): a list of sensitivity analysis method sample_size (float, optional): ratio for subset the original data confound (string, optional): the name of confouding function alpha_range (np.array, optional): a parameter to pass the confounding function Returns: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector
Return the sensitivity data by different method
def sensitivity_analysis(self, methods, sample_size=None, confound='one_sided', alpha_range=None): """Return the sensitivity data by different method Args: method (list of str): a list of sensitivity analysis method sample_size (float, optional): ratio for subset the original data confound (string, optional): the name of confouding function alpha_range (np.array, optional): a parameter to pass the confounding function Returns: X (np.matrix): a feature matrix p (np.array): a propensity score vector between 0 and 1 treatment (np.array): a treatment vector (1 if treated, otherwise 0) y (np.array): an outcome vector """ if alpha_range is None: y = self.df[self.outcome_col] iqr = y.quantile(.75) - y.quantile(.25) alpha_range = np.linspace(-iqr/2, iqr/2, 11) if 0 not in alpha_range: alpha_range = np.append(alpha_range, 0) else: alpha_range = alpha_range alpha_range.sort() summary_df = pd.DataFrame(columns=['Method', 'ATE', 'New ATE', 'New ATE LB', 'New ATE UB']) for method in methods: sens = self.get_class_object(method) sens = sens(self.df, self.inference_features, self.p_col, self.treatment_col, self.outcome_col, self.learner, sample_size=sample_size, confound=confound, alpha_range=alpha_range) if method == 'Subset Data': method = method + '(sample size @{})'.format(sample_size) sens_df = sens.summary(method=method) summary_df = summary_df.append(sens_df) return summary_df
[ "def", "sensitivity_analysis", "(", "self", ",", "methods", ",", "sample_size", "=", "None", ",", "confound", "=", "'one_sided'", ",", "alpha_range", "=", "None", ")", ":", "if", "alpha_range", "is", "None", ":", "y", "=", "self", ".", "df", "[", "self", ".", "outcome_col", "]", "iqr", "=", "y", ".", "quantile", "(", ".75", ")", "-", "y", ".", "quantile", "(", ".25", ")", "alpha_range", "=", "np", ".", "linspace", "(", "-", "iqr", "/", "2", ",", "iqr", "/", "2", ",", "11", ")", "if", "0", "not", "in", "alpha_range", ":", "alpha_range", "=", "np", ".", "append", "(", "alpha_range", ",", "0", ")", "else", ":", "alpha_range", "=", "alpha_range", "alpha_range", ".", "sort", "(", ")", "summary_df", "=", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'Method'", ",", "'ATE'", ",", "'New ATE'", ",", "'New ATE LB'", ",", "'New ATE UB'", "]", ")", "for", "method", "in", "methods", ":", "sens", "=", "self", ".", "get_class_object", "(", "method", ")", "sens", "=", "sens", "(", "self", ".", "df", ",", "self", ".", "inference_features", ",", "self", ".", "p_col", ",", "self", ".", "treatment_col", ",", "self", ".", "outcome_col", ",", "self", ".", "learner", ",", "sample_size", "=", "sample_size", ",", "confound", "=", "confound", ",", "alpha_range", "=", "alpha_range", ")", "if", "method", "==", "'Subset Data'", ":", "method", "=", "method", "+", "'(sample size @{})'", ".", "format", "(", "sample_size", ")", "sens_df", "=", "sens", ".", "summary", "(", "method", "=", "method", ")", "summary_df", "=", "summary_df", ".", "append", "(", "sens_df", ")", "return", "summary_df" ]
[ 165, 4 ]
[ 204, 25 ]
python
en
['en', 'en', 'en']
True
Sensitivity.summary
(self, method)
Summary report Args: method_name (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe
Summary report Args: method_name (str): sensitivity analysis method
def summary(self, method): """Summary report Args: method_name (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe """ method_name = method X = self.df[self.inference_features].values p = self.df[self.p_col].values treatment = self.df[self.treatment_col].values y = self.df[self.outcome_col].values preds = self.get_prediction(X, p, treatment, y) ate = preds.mean() ate_new, ate_new_lower, ate_new_upper = self.sensitivity_estimate() sensitivity_summary = pd.DataFrame([method_name, ate, ate_new, ate_new_lower, ate_new_upper]).T sensitivity_summary.columns = ['Method', 'ATE', 'New ATE', 'New ATE LB', 'New ATE UB'] return sensitivity_summary
[ "def", "summary", "(", "self", ",", "method", ")", ":", "method_name", "=", "method", "X", "=", "self", ".", "df", "[", "self", ".", "inference_features", "]", ".", "values", "p", "=", "self", ".", "df", "[", "self", ".", "p_col", "]", ".", "values", "treatment", "=", "self", ".", "df", "[", "self", ".", "treatment_col", "]", ".", "values", "y", "=", "self", ".", "df", "[", "self", ".", "outcome_col", "]", ".", "values", "preds", "=", "self", ".", "get_prediction", "(", "X", ",", "p", ",", "treatment", ",", "y", ")", "ate", "=", "preds", ".", "mean", "(", ")", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper", "=", "self", ".", "sensitivity_estimate", "(", ")", "sensitivity_summary", "=", "pd", ".", "DataFrame", "(", "[", "method_name", ",", "ate", ",", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper", "]", ")", ".", "T", "sensitivity_summary", ".", "columns", "=", "[", "'Method'", ",", "'ATE'", ",", "'New ATE'", ",", "'New ATE LB'", ",", "'New ATE UB'", "]", "return", "sensitivity_summary" ]
[ 206, 4 ]
[ 228, 34 ]
python
en
['en', 'ky', 'en']
False
SensitivityPlaceboTreatment.sensitivity_estimate
(self)
Summary report Args: return_ci (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe
Summary report Args: return_ci (str): sensitivity analysis method
def sensitivity_estimate(self): """Summary report Args: return_ci (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe """ num_rows = self.df.shape[0] X = self.df[self.inference_features].values p = self.df[self.p_col].values treatment_new = np.random.randint(2, size=num_rows) y = self.df[self.outcome_col].values ate_new, ate_new_lower, ate_new_upper = self.get_ate_ci(X, p, treatment_new, y) return ate_new, ate_new_lower, ate_new_upper
[ "def", "sensitivity_estimate", "(", "self", ")", ":", "num_rows", "=", "self", ".", "df", ".", "shape", "[", "0", "]", "X", "=", "self", ".", "df", "[", "self", ".", "inference_features", "]", ".", "values", "p", "=", "self", ".", "df", "[", "self", ".", "p_col", "]", ".", "values", "treatment_new", "=", "np", ".", "random", ".", "randint", "(", "2", ",", "size", "=", "num_rows", ")", "y", "=", "self", ".", "df", "[", "self", ".", "outcome_col", "]", ".", "values", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper", "=", "self", ".", "get_ate_ci", "(", "X", ",", "p", ",", "treatment_new", ",", "y", ")", "return", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper" ]
[ 241, 4 ]
[ 257, 52 ]
python
en
['en', 'ky', 'en']
False
SensitivityRandomReplace.sensitivity_estimate
(self)
Replaces a random covariate with an irrelevant variable.
Replaces a random covariate with an irrelevant variable.
def sensitivity_estimate(self): """Replaces a random covariate with an irrelevant variable. """ logger.info('Replace feature {} with an random irrelevant variable'.format(self.replaced_feature)) df_new = self.df.copy() num_rows = self.df.shape[0] df_new[self.replaced_feature] = np.random.randn(num_rows) X_new = df_new[self.inference_features].values p_new = df_new[self.p_col].values treatment_new = df_new[self.treatment_col].values y_new = df_new[self.outcome_col].values ate_new, ate_new_lower, ate_new_upper = self.get_ate_ci(X_new, p_new, treatment_new, y_new) return ate_new, ate_new_lower, ate_new_upper
[ "def", "sensitivity_estimate", "(", "self", ")", ":", "logger", ".", "info", "(", "'Replace feature {} with an random irrelevant variable'", ".", "format", "(", "self", ".", "replaced_feature", ")", ")", "df_new", "=", "self", ".", "df", ".", "copy", "(", ")", "num_rows", "=", "self", ".", "df", ".", "shape", "[", "0", "]", "df_new", "[", "self", ".", "replaced_feature", "]", "=", "np", ".", "random", ".", "randn", "(", "num_rows", ")", "X_new", "=", "df_new", "[", "self", ".", "inference_features", "]", ".", "values", "p_new", "=", "df_new", "[", "self", ".", "p_col", "]", ".", "values", "treatment_new", "=", "df_new", "[", "self", ".", "treatment_col", "]", ".", "values", "y_new", "=", "df_new", "[", "self", ".", "outcome_col", "]", ".", "values", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper", "=", "self", ".", "get_ate_ci", "(", "X_new", ",", "p_new", ",", "treatment_new", ",", "y_new", ")", "return", "ate_new", ",", "ate_new_lower", ",", "ate_new_upper" ]
[ 293, 4 ]
[ 308, 52 ]
python
en
['en', 'en', 'en']
True
SensitivitySelectionBias.__init__
(self, *args, confound='one_sided', alpha_range=None, sensitivity_features=None, **kwargs)
Initialize. Args: confound (string): the name of confouding function alpha_range (np.array): a parameter to pass the confounding function sensitivity_features (list of str): ): a list of columns that to check each individual partial r-square
Initialize.
def __init__(self, *args, confound='one_sided', alpha_range=None, sensitivity_features=None, **kwargs): super().__init__(*args, **kwargs) """Initialize. Args: confound (string): the name of confouding function alpha_range (np.array): a parameter to pass the confounding function sensitivity_features (list of str): ): a list of columns that to check each individual partial r-square """ logger.info('Only works for linear outcome models right now. Check back soon.') confounding_functions = {'one_sided': one_sided, 'alignment': alignment, 'one_sided_att': one_sided_att, 'alignment_att': alignment_att} try: confound_func = confounding_functions[confound] except KeyError: raise NotImplementedError(f'Confounding function, {confound} is not implemented. \ Use one of {confounding_functions.keys()}') self.confound = confound_func if sensitivity_features is None: self.sensitivity_features = self.inference_features else: self.sensitivity_features = sensitivity_features if alpha_range is None: y = self.df[self.outcome_col] iqr = y.quantile(.75) - y.quantile(.25) self.alpha_range = np.linspace(-iqr/2, iqr/2, 11) if 0 not in self.alpha_range: self.alpha_range = np.append(self.alpha_range, 0) else: self.alpha_range = alpha_range self.alpha_range.sort()
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "confound", "=", "'one_sided'", ",", "alpha_range", "=", "None", ",", "sensitivity_features", "=", "None", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "logger", ".", "info", "(", "'Only works for linear outcome models right now. Check back soon.'", ")", "confounding_functions", "=", "{", "'one_sided'", ":", "one_sided", ",", "'alignment'", ":", "alignment", ",", "'one_sided_att'", ":", "one_sided_att", ",", "'alignment_att'", ":", "alignment_att", "}", "try", ":", "confound_func", "=", "confounding_functions", "[", "confound", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", "f'Confounding function, {confound} is not implemented. \\\n Use one of {confounding_functions.keys()}'", ")", "self", ".", "confound", "=", "confound_func", "if", "sensitivity_features", "is", "None", ":", "self", ".", "sensitivity_features", "=", "self", ".", "inference_features", "else", ":", "self", ".", "sensitivity_features", "=", "sensitivity_features", "if", "alpha_range", "is", "None", ":", "y", "=", "self", ".", "df", "[", "self", ".", "outcome_col", "]", "iqr", "=", "y", ".", "quantile", "(", ".75", ")", "-", "y", ".", "quantile", "(", ".25", ")", "self", ".", "alpha_range", "=", "np", ".", "linspace", "(", "-", "iqr", "/", "2", ",", "iqr", "/", "2", ",", "11", ")", "if", "0", "not", "in", "self", ".", "alpha_range", ":", "self", ".", "alpha_range", "=", "np", ".", "append", "(", "self", ".", "alpha_range", ",", "0", ")", "else", ":", "self", ".", "alpha_range", "=", "alpha_range", "self", ".", "alpha_range", ".", "sort", "(", ")" ]
[ 344, 4 ]
[ 383, 31 ]
python
en
['en', 'en', 'it']
False
SensitivitySelectionBias.summary
(self, method='Selection Bias')
Summary report for Selection Bias Method Args: method_name (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe
Summary report for Selection Bias Method Args: method_name (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe
def summary(self, method='Selection Bias'): """Summary report for Selection Bias Method Args: method_name (str): sensitivity analysis method Returns: (pd.DataFrame): a summary dataframe """ method_name = method sensitivity_summary = self.causalsens()[0] sensitivity_summary['Method'] = [method_name + ' (alpha@' + str(round(i, 5)) + ', with r-sqaure:' for i in sensitivity_summary.alpha] sensitivity_summary['Method'] = sensitivity_summary['Method'] + sensitivity_summary['rsqs'].round(5).astype(str) sensitivity_summary['ATE'] = sensitivity_summary[sensitivity_summary.alpha == 0]['New ATE'] return sensitivity_summary[['Method', 'ATE', 'New ATE', 'New ATE LB', 'New ATE UB']]
[ "def", "summary", "(", "self", ",", "method", "=", "'Selection Bias'", ")", ":", "method_name", "=", "method", "sensitivity_summary", "=", "self", ".", "causalsens", "(", ")", "[", "0", "]", "sensitivity_summary", "[", "'Method'", "]", "=", "[", "method_name", "+", "' (alpha@'", "+", "str", "(", "round", "(", "i", ",", "5", ")", ")", "+", "', with r-sqaure:'", "for", "i", "in", "sensitivity_summary", ".", "alpha", "]", "sensitivity_summary", "[", "'Method'", "]", "=", "sensitivity_summary", "[", "'Method'", "]", "+", "sensitivity_summary", "[", "'rsqs'", "]", ".", "round", "(", "5", ")", ".", "astype", "(", "str", ")", "sensitivity_summary", "[", "'ATE'", "]", "=", "sensitivity_summary", "[", "sensitivity_summary", ".", "alpha", "==", "0", "]", "[", "'New ATE'", "]", "return", "sensitivity_summary", "[", "[", "'Method'", ",", "'ATE'", ",", "'New ATE'", ",", "'New ATE LB'", ",", "'New ATE UB'", "]", "]" ]
[ 426, 4 ]
[ 440, 92 ]
python
en
['en', 'en', 'en']
True
SensitivitySelectionBias.plot
(sens_df, partial_rsqs_df=None, type='raw', ci=False, partial_rsqs=False)
Plot the results of a sensitivity analysis against unmeasured Args: sens_df (pandas.DataFrame): a data frame output from causalsens partial_rsqs_d (pandas.DataFrame) : a data frame output from causalsens including partial rsqure type (str, optional): the type of plot to draw, 'raw' or 'r.squared' are supported ci (bool, optional): whether plot confidence intervals partial_rsqs (bool, optional): whether plot partial rsquare results
Plot the results of a sensitivity analysis against unmeasured Args: sens_df (pandas.DataFrame): a data frame output from causalsens partial_rsqs_d (pandas.DataFrame) : a data frame output from causalsens including partial rsqure type (str, optional): the type of plot to draw, 'raw' or 'r.squared' are supported ci (bool, optional): whether plot confidence intervals partial_rsqs (bool, optional): whether plot partial rsquare results
def plot(sens_df, partial_rsqs_df=None, type='raw', ci=False, partial_rsqs=False): """Plot the results of a sensitivity analysis against unmeasured Args: sens_df (pandas.DataFrame): a data frame output from causalsens partial_rsqs_d (pandas.DataFrame) : a data frame output from causalsens including partial rsqure type (str, optional): the type of plot to draw, 'raw' or 'r.squared' are supported ci (bool, optional): whether plot confidence intervals partial_rsqs (bool, optional): whether plot partial rsquare results """ if type == 'raw' and not ci: fig, ax = plt.subplots() y_max = round(sens_df['New ATE UB'].max()*1.1, 4) y_min = round(sens_df['New ATE LB'].min()*0.9, 4) x_max = round(sens_df.alpha.max()*1.1, 4) x_min = round(sens_df.alpha.min()*0.9, 4) plt.ylim(y_min, y_max) plt.xlim(x_min, x_max) ax.plot(sens_df.alpha, sens_df['New ATE']) elif type == 'raw' and ci: fig, ax = plt.subplots() y_max = round(sens_df['New ATE UB'].max()*1.1, 4) y_min = round(sens_df['New ATE LB'].min()*0.9, 4) x_max = round(sens_df.alpha.max()*1.1, 4) x_min = round(sens_df.alpha.min()*0.9, 4) plt.ylim(y_min, y_max) plt.xlim(x_min, x_max) ax.fill_between(sens_df.alpha, sens_df['New ATE LB'], sens_df['New ATE UB'], color='gray', alpha=0.5) ax.plot(sens_df.alpha, sens_df['New ATE']) elif type == 'r.squared' and ci: fig, ax = plt.subplots() y_max = round(sens_df['New ATE UB'].max()*1.1, 4) y_min = round(sens_df['New ATE LB'].min()*0.9, 4) plt.ylim(y_min, y_max) ax.fill_between(sens_df.rsqs, sens_df['New ATE LB'], sens_df['New ATE UB'], color='gray', alpha=0.5) ax.plot(sens_df.rsqs, sens_df['New ATE']) if partial_rsqs: plt.scatter(partial_rsqs_df.partial_rsqs, list(sens_df[sens_df.alpha == 0]['New ATE']) * partial_rsqs_df.shape[0], marker='x', color="red", linewidth=10) elif type == 'r.squared' and not ci: fig, ax = plt.subplots() y_max = round(sens_df['New ATE UB'].max()*1.1, 4) y_min = round(sens_df['New ATE LB'].min()*0.9, 4) plt.ylim(y_min, y_max) plt.plot(sens_df.rsqs, sens_df['New ATE']) if partial_rsqs: plt.scatter(partial_rsqs_df.partial_rsqs, list(sens_df[sens_df.alpha == 0]['New ATE']) * partial_rsqs_df.shape[0], marker='x', color="red", linewidth=10)
[ "def", "plot", "(", "sens_df", ",", "partial_rsqs_df", "=", "None", ",", "type", "=", "'raw'", ",", "ci", "=", "False", ",", "partial_rsqs", "=", "False", ")", ":", "if", "type", "==", "'raw'", "and", "not", "ci", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "y_max", "=", "round", "(", "sens_df", "[", "'New ATE UB'", "]", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "y_min", "=", "round", "(", "sens_df", "[", "'New ATE LB'", "]", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "x_max", "=", "round", "(", "sens_df", ".", "alpha", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "x_min", "=", "round", "(", "sens_df", ".", "alpha", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "plt", ".", "ylim", "(", "y_min", ",", "y_max", ")", "plt", ".", "xlim", "(", "x_min", ",", "x_max", ")", "ax", ".", "plot", "(", "sens_df", ".", "alpha", ",", "sens_df", "[", "'New ATE'", "]", ")", "elif", "type", "==", "'raw'", "and", "ci", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "y_max", "=", "round", "(", "sens_df", "[", "'New ATE UB'", "]", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "y_min", "=", "round", "(", "sens_df", "[", "'New ATE LB'", "]", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "x_max", "=", "round", "(", "sens_df", ".", "alpha", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "x_min", "=", "round", "(", "sens_df", ".", "alpha", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "plt", ".", "ylim", "(", "y_min", ",", "y_max", ")", "plt", ".", "xlim", "(", "x_min", ",", "x_max", ")", "ax", ".", "fill_between", "(", "sens_df", ".", "alpha", ",", "sens_df", "[", "'New ATE LB'", "]", ",", "sens_df", "[", "'New ATE UB'", "]", ",", "color", "=", "'gray'", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "sens_df", ".", "alpha", ",", "sens_df", "[", "'New ATE'", "]", ")", "elif", "type", "==", "'r.squared'", "and", "ci", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "y_max", "=", "round", "(", "sens_df", "[", "'New ATE UB'", "]", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "y_min", "=", "round", "(", "sens_df", "[", "'New ATE LB'", "]", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "plt", ".", "ylim", "(", "y_min", ",", "y_max", ")", "ax", ".", "fill_between", "(", "sens_df", ".", "rsqs", ",", "sens_df", "[", "'New ATE LB'", "]", ",", "sens_df", "[", "'New ATE UB'", "]", ",", "color", "=", "'gray'", ",", "alpha", "=", "0.5", ")", "ax", ".", "plot", "(", "sens_df", ".", "rsqs", ",", "sens_df", "[", "'New ATE'", "]", ")", "if", "partial_rsqs", ":", "plt", ".", "scatter", "(", "partial_rsqs_df", ".", "partial_rsqs", ",", "list", "(", "sens_df", "[", "sens_df", ".", "alpha", "==", "0", "]", "[", "'New ATE'", "]", ")", "*", "partial_rsqs_df", ".", "shape", "[", "0", "]", ",", "marker", "=", "'x'", ",", "color", "=", "\"red\"", ",", "linewidth", "=", "10", ")", "elif", "type", "==", "'r.squared'", "and", "not", "ci", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "y_max", "=", "round", "(", "sens_df", "[", "'New ATE UB'", "]", ".", "max", "(", ")", "*", "1.1", ",", "4", ")", "y_min", "=", "round", "(", "sens_df", "[", "'New ATE LB'", "]", ".", "min", "(", ")", "*", "0.9", ",", "4", ")", "plt", ".", "ylim", "(", "y_min", ",", "y_max", ")", "plt", ".", "plot", "(", "sens_df", ".", "rsqs", ",", "sens_df", "[", "'New ATE'", "]", ")", "if", "partial_rsqs", ":", "plt", ".", "scatter", "(", "partial_rsqs_df", ".", "partial_rsqs", ",", "list", "(", "sens_df", "[", "sens_df", ".", "alpha", "==", "0", "]", "[", "'New ATE'", "]", ")", "*", "partial_rsqs_df", ".", "shape", "[", "0", "]", ",", "marker", "=", "'x'", ",", "color", "=", "\"red\"", ",", "linewidth", "=", "10", ")" ]
[ 443, 4 ]
[ 492, 62 ]
python
en
['en', 'en', 'en']
True
SensitivitySelectionBias.partial_rsqs_confounding
(sens_df, feature_name, partial_rsqs_value, range=0.01)
Check partial rsqs values of feature corresponding confounding amonunt of ATE Args: sens_df (pandas.DataFrame): a data frame output from causalsens feature_name (str): feature name to check partial_rsqs_value (float) : partial rsquare value of feature range (float) : range to search from sens_df Return: min and max value of confounding amount
Check partial rsqs values of feature corresponding confounding amonunt of ATE Args: sens_df (pandas.DataFrame): a data frame output from causalsens feature_name (str): feature name to check partial_rsqs_value (float) : partial rsquare value of feature range (float) : range to search from sens_df
def partial_rsqs_confounding(sens_df, feature_name, partial_rsqs_value, range=0.01): """Check partial rsqs values of feature corresponding confounding amonunt of ATE Args: sens_df (pandas.DataFrame): a data frame output from causalsens feature_name (str): feature name to check partial_rsqs_value (float) : partial rsquare value of feature range (float) : range to search from sens_df Return: min and max value of confounding amount """ rsqs_dict = [] for i in sens_df.rsqs: if partial_rsqs_value - partial_rsqs_value*range < i < partial_rsqs_value + partial_rsqs_value*range: rsqs_dict.append(i) if rsqs_dict: confounding_min = sens_df[sens_df.rsqs.isin(rsqs_dict)].alpha.min() confounding_max = sens_df[sens_df.rsqs.isin(rsqs_dict)].alpha.max() logger.info('Only works for linear outcome models right now. Check back soon.') logger.info('For feature {} with partial rsquare {} confounding amount with possible values: {}, {}'.format( feature_name, partial_rsqs_value, confounding_min, confounding_max)) return [confounding_min, confounding_max] else: logger.info('Cannot find correponding rsquare value within the range for input, please edit confounding', 'values vector or use a larger range and try again')
[ "def", "partial_rsqs_confounding", "(", "sens_df", ",", "feature_name", ",", "partial_rsqs_value", ",", "range", "=", "0.01", ")", ":", "rsqs_dict", "=", "[", "]", "for", "i", "in", "sens_df", ".", "rsqs", ":", "if", "partial_rsqs_value", "-", "partial_rsqs_value", "*", "range", "<", "i", "<", "partial_rsqs_value", "+", "partial_rsqs_value", "*", "range", ":", "rsqs_dict", ".", "append", "(", "i", ")", "if", "rsqs_dict", ":", "confounding_min", "=", "sens_df", "[", "sens_df", ".", "rsqs", ".", "isin", "(", "rsqs_dict", ")", "]", ".", "alpha", ".", "min", "(", ")", "confounding_max", "=", "sens_df", "[", "sens_df", ".", "rsqs", ".", "isin", "(", "rsqs_dict", ")", "]", ".", "alpha", ".", "max", "(", ")", "logger", ".", "info", "(", "'Only works for linear outcome models right now. Check back soon.'", ")", "logger", ".", "info", "(", "'For feature {} with partial rsquare {} confounding amount with possible values: {}, {}'", ".", "format", "(", "feature_name", ",", "partial_rsqs_value", ",", "confounding_min", ",", "confounding_max", ")", ")", "return", "[", "confounding_min", ",", "confounding_max", "]", "else", ":", "logger", ".", "info", "(", "'Cannot find correponding rsquare value within the range for input, please edit confounding'", ",", "'values vector or use a larger range and try again'", ")" ]
[ 495, 4 ]
[ 519, 170 ]
python
en
['en', 'en', 'en']
True
SourceTreeAndPathFromPath
(input_path)
Given input_path, returns a tuple with sourceTree and path values. Examples: input_path (source_tree, output_path) '$(VAR)/path' ('VAR', 'path') '$(VAR)' ('VAR', None) 'path' (None, 'path')
Given input_path, returns a tuple with sourceTree and path values.
def SourceTreeAndPathFromPath(input_path): """Given input_path, returns a tuple with sourceTree and path values. Examples: input_path (source_tree, output_path) '$(VAR)/path' ('VAR', 'path') '$(VAR)' ('VAR', None) 'path' (None, 'path') """ source_group_match = _path_leading_variable.match(input_path) if source_group_match: source_tree = source_group_match.group(1) output_path = source_group_match.group(3) # This may be None. else: source_tree = None output_path = input_path return (source_tree, output_path)
[ "def", "SourceTreeAndPathFromPath", "(", "input_path", ")", ":", "source_group_match", "=", "_path_leading_variable", ".", "match", "(", "input_path", ")", "if", "source_group_match", ":", "source_tree", "=", "source_group_match", ".", "group", "(", "1", ")", "output_path", "=", "source_group_match", ".", "group", "(", "3", ")", "# This may be None.", "else", ":", "source_tree", "=", "None", "output_path", "=", "input_path", "return", "(", "source_tree", ",", "output_path", ")" ]
[ 177, 0 ]
[ 195, 35 ]
python
en
['en', 'en', 'en']
True