Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
_TargetFromSpec | (old_spec, params) | Create fake target for xcode-ninja wrapper. | Create fake target for xcode-ninja wrapper. | def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target | [
"def",
"_TargetFromSpec",
"(",
"old_spec",
",",
"params",
")",
":",
"# Determine ninja top level build dir (e.g. /path/to/out).",
"ninja_toplevel",
"=",
"None",
"jobs",
"=",
"0",
"if",
"params",
":",
"options",
"=",
"params",
"[",
"'options'",
"]",
"ninja_toplevel",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"toplevel_dir",
",",
"gyp",
".",
"generator",
".",
"ninja",
".",
"ComputeOutputDir",
"(",
"params",
")",
")",
"jobs",
"=",
"params",
".",
"get",
"(",
"'generator_flags'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'xcode_ninja_jobs'",
",",
"0",
")",
"target_name",
"=",
"old_spec",
".",
"get",
"(",
"'target_name'",
")",
"product_name",
"=",
"old_spec",
".",
"get",
"(",
"'product_name'",
",",
"target_name",
")",
"product_extension",
"=",
"old_spec",
".",
"get",
"(",
"'product_extension'",
")",
"ninja_target",
"=",
"{",
"}",
"ninja_target",
"[",
"'target_name'",
"]",
"=",
"target_name",
"ninja_target",
"[",
"'product_name'",
"]",
"=",
"product_name",
"if",
"product_extension",
":",
"ninja_target",
"[",
"'product_extension'",
"]",
"=",
"product_extension",
"ninja_target",
"[",
"'toolset'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'toolset'",
")",
"ninja_target",
"[",
"'default_configuration'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'default_configuration'",
")",
"ninja_target",
"[",
"'configurations'",
"]",
"=",
"{",
"}",
"# Tell Xcode to look in |ninja_toplevel| for build products.",
"new_xcode_settings",
"=",
"{",
"}",
"if",
"ninja_toplevel",
":",
"new_xcode_settings",
"[",
"'CONFIGURATION_BUILD_DIR'",
"]",
"=",
"\"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)\"",
"%",
"ninja_toplevel",
"if",
"'configurations'",
"in",
"old_spec",
":",
"for",
"config",
"in",
"old_spec",
"[",
"'configurations'",
"]",
".",
"iterkeys",
"(",
")",
":",
"old_xcode_settings",
"=",
"old_spec",
"[",
"'configurations'",
"]",
"[",
"config",
"]",
".",
"get",
"(",
"'xcode_settings'",
",",
"{",
"}",
")",
"if",
"'IPHONEOS_DEPLOYMENT_TARGET'",
"in",
"old_xcode_settings",
":",
"new_xcode_settings",
"[",
"'CODE_SIGNING_REQUIRED'",
"]",
"=",
"\"NO\"",
"new_xcode_settings",
"[",
"'IPHONEOS_DEPLOYMENT_TARGET'",
"]",
"=",
"old_xcode_settings",
"[",
"'IPHONEOS_DEPLOYMENT_TARGET'",
"]",
"ninja_target",
"[",
"'configurations'",
"]",
"[",
"config",
"]",
"=",
"{",
"}",
"ninja_target",
"[",
"'configurations'",
"]",
"[",
"config",
"]",
"[",
"'xcode_settings'",
"]",
"=",
"new_xcode_settings",
"ninja_target",
"[",
"'mac_bundle'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'mac_bundle'",
",",
"0",
")",
"ninja_target",
"[",
"'ios_app_extension'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'ios_app_extension'",
",",
"0",
")",
"ninja_target",
"[",
"'ios_watchkit_extension'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'ios_watchkit_extension'",
",",
"0",
")",
"ninja_target",
"[",
"'ios_watchkit_app'",
"]",
"=",
"old_spec",
".",
"get",
"(",
"'ios_watchkit_app'",
",",
"0",
")",
"ninja_target",
"[",
"'type'",
"]",
"=",
"old_spec",
"[",
"'type'",
"]",
"if",
"ninja_toplevel",
":",
"ninja_target",
"[",
"'actions'",
"]",
"=",
"[",
"{",
"'action_name'",
":",
"'Compile and copy %s via ninja'",
"%",
"target_name",
",",
"'inputs'",
":",
"[",
"]",
",",
"'outputs'",
":",
"[",
"]",
",",
"'action'",
":",
"[",
"'env'",
",",
"'PATH=%s'",
"%",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
",",
"'ninja'",
",",
"'-C'",
",",
"new_xcode_settings",
"[",
"'CONFIGURATION_BUILD_DIR'",
"]",
",",
"target_name",
",",
"]",
",",
"'message'",
":",
"'Compile and copy %s via ninja'",
"%",
"target_name",
",",
"}",
",",
"]",
"if",
"jobs",
">",
"0",
":",
"ninja_target",
"[",
"'actions'",
"]",
"[",
"0",
"]",
"[",
"'action'",
"]",
".",
"extend",
"(",
"(",
"'-j'",
",",
"jobs",
")",
")",
"return",
"ninja_target"
] | [
55,
0
] | [
123,
21
] | python | en | ['en', 'en', 'en'] | True |
IsValidTargetForWrapper | (target_extras, executable_target_pattern, spec) | Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
| Limit targets for Xcode wrapper. | def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False | [
"def",
"IsValidTargetForWrapper",
"(",
"target_extras",
",",
"executable_target_pattern",
",",
"spec",
")",
":",
"target_name",
"=",
"spec",
".",
"get",
"(",
"'target_name'",
")",
"# Always include targets matching target_extras.",
"if",
"target_extras",
"is",
"not",
"None",
"and",
"re",
".",
"search",
"(",
"target_extras",
",",
"target_name",
")",
":",
"return",
"True",
"# Otherwise just show executable targets.",
"if",
"spec",
".",
"get",
"(",
"'type'",
",",
"''",
")",
"==",
"'executable'",
"and",
"spec",
".",
"get",
"(",
"'product_extension'",
",",
"''",
")",
"!=",
"'bundle'",
":",
"# If there is a filter and the target does not match, exclude the target.",
"if",
"executable_target_pattern",
"is",
"not",
"None",
":",
"if",
"not",
"re",
".",
"search",
"(",
"executable_target_pattern",
",",
"target_name",
")",
":",
"return",
"False",
"return",
"True",
"return",
"False"
] | [
125,
0
] | [
149,
14
] | python | en | ['en', 'en', 'en'] | True |
CreateWrapper | (target_list, target_dicts, data, params) | Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
| Initialize targets for the ninja wrapper. | def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data) | [
"def",
"CreateWrapper",
"(",
"target_list",
",",
"target_dicts",
",",
"data",
",",
"params",
")",
":",
"orig_gyp",
"=",
"params",
"[",
"'build_files'",
"]",
"[",
"0",
"]",
"for",
"gyp_name",
",",
"gyp_dict",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"if",
"gyp_name",
"==",
"orig_gyp",
":",
"depth",
"=",
"gyp_dict",
"[",
"'_DEPTH'",
"]",
"# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE",
"# and prepend .ninja before the .gyp extension.",
"generator_flags",
"=",
"params",
".",
"get",
"(",
"'generator_flags'",
",",
"{",
"}",
")",
"main_gyp",
"=",
"generator_flags",
".",
"get",
"(",
"'xcode_ninja_main_gyp'",
",",
"None",
")",
"if",
"main_gyp",
"is",
"None",
":",
"(",
"build_file_root",
",",
"build_file_ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"orig_gyp",
")",
"main_gyp",
"=",
"build_file_root",
"+",
"\".ninja\"",
"+",
"build_file_ext",
"# Create new |target_list|, |target_dicts| and |data| data structures.",
"new_target_list",
"=",
"[",
"]",
"new_target_dicts",
"=",
"{",
"}",
"new_data",
"=",
"{",
"}",
"# Set base keys needed for |data|.",
"new_data",
"[",
"main_gyp",
"]",
"=",
"{",
"}",
"new_data",
"[",
"main_gyp",
"]",
"[",
"'included_files'",
"]",
"=",
"[",
"]",
"new_data",
"[",
"main_gyp",
"]",
"[",
"'targets'",
"]",
"=",
"[",
"]",
"new_data",
"[",
"main_gyp",
"]",
"[",
"'xcode_settings'",
"]",
"=",
"data",
"[",
"orig_gyp",
"]",
".",
"get",
"(",
"'xcode_settings'",
",",
"{",
"}",
")",
"# Normally the xcode-ninja generator includes only valid executable targets.",
"# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to",
"# executable targets that match the pattern. (Default all)",
"executable_target_pattern",
"=",
"generator_flags",
".",
"get",
"(",
"'xcode_ninja_executable_target_pattern'",
",",
"None",
")",
"# For including other non-executable targets, add the matching target name",
"# to the |xcode_ninja_target_pattern| regular expression. (Default none)",
"target_extras",
"=",
"generator_flags",
".",
"get",
"(",
"'xcode_ninja_target_pattern'",
",",
"None",
")",
"for",
"old_qualified_target",
"in",
"target_list",
":",
"spec",
"=",
"target_dicts",
"[",
"old_qualified_target",
"]",
"if",
"IsValidTargetForWrapper",
"(",
"target_extras",
",",
"executable_target_pattern",
",",
"spec",
")",
":",
"# Add to new_target_list.",
"target_name",
"=",
"spec",
".",
"get",
"(",
"'target_name'",
")",
"new_target_name",
"=",
"'%s:%s#target'",
"%",
"(",
"main_gyp",
",",
"target_name",
")",
"new_target_list",
".",
"append",
"(",
"new_target_name",
")",
"# Add to new_target_dicts.",
"new_target_dicts",
"[",
"new_target_name",
"]",
"=",
"_TargetFromSpec",
"(",
"spec",
",",
"params",
")",
"# Add to new_data.",
"for",
"old_target",
"in",
"data",
"[",
"old_qualified_target",
".",
"split",
"(",
"':'",
")",
"[",
"0",
"]",
"]",
"[",
"'targets'",
"]",
":",
"if",
"old_target",
"[",
"'target_name'",
"]",
"==",
"target_name",
":",
"new_data_target",
"=",
"{",
"}",
"new_data_target",
"[",
"'target_name'",
"]",
"=",
"old_target",
"[",
"'target_name'",
"]",
"new_data_target",
"[",
"'toolset'",
"]",
"=",
"old_target",
"[",
"'toolset'",
"]",
"new_data",
"[",
"main_gyp",
"]",
"[",
"'targets'",
"]",
".",
"append",
"(",
"new_data_target",
")",
"# Create sources target.",
"sources_target_name",
"=",
"'sources_for_indexing'",
"sources_target",
"=",
"_TargetFromSpec",
"(",
"{",
"'target_name'",
":",
"sources_target_name",
",",
"'toolset'",
":",
"'target'",
",",
"'default_configuration'",
":",
"'Default'",
",",
"'mac_bundle'",
":",
"'0'",
",",
"'type'",
":",
"'executable'",
"}",
",",
"None",
")",
"# Tell Xcode to look everywhere for headers.",
"sources_target",
"[",
"'configurations'",
"]",
"=",
"{",
"'Default'",
":",
"{",
"'include_dirs'",
":",
"[",
"depth",
"]",
"}",
"}",
"sources",
"=",
"[",
"]",
"for",
"target",
",",
"target_dict",
"in",
"target_dicts",
".",
"iteritems",
"(",
")",
":",
"base",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"target",
")",
"files",
"=",
"target_dict",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"+",
"target_dict",
".",
"get",
"(",
"'mac_bundle_resources'",
",",
"[",
"]",
")",
"for",
"action",
"in",
"target_dict",
".",
"get",
"(",
"'actions'",
",",
"[",
"]",
")",
":",
"files",
".",
"extend",
"(",
"action",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
")",
"# Remove files starting with $. These are mostly intermediate files for the",
"# build system.",
"files",
"=",
"[",
"file",
"for",
"file",
"in",
"files",
"if",
"not",
"file",
".",
"startswith",
"(",
"'$'",
")",
"]",
"# Make sources relative to root build file.",
"relative_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"main_gyp",
")",
"sources",
"+=",
"[",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"base",
",",
"file",
")",
",",
"relative_path",
")",
"for",
"file",
"in",
"files",
"]",
"sources_target",
"[",
"'sources'",
"]",
"=",
"sorted",
"(",
"set",
"(",
"sources",
")",
")",
"# Put sources_to_index in it's own gyp.",
"sources_gyp",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"main_gyp",
")",
",",
"sources_target_name",
"+",
"\".gyp\"",
")",
"fully_qualified_target_name",
"=",
"'%s:%s#target'",
"%",
"(",
"sources_gyp",
",",
"sources_target_name",
")",
"# Add to new_target_list, new_target_dicts and new_data.",
"new_target_list",
".",
"append",
"(",
"fully_qualified_target_name",
")",
"new_target_dicts",
"[",
"fully_qualified_target_name",
"]",
"=",
"sources_target",
"new_data_target",
"=",
"{",
"}",
"new_data_target",
"[",
"'target_name'",
"]",
"=",
"sources_target",
"[",
"'target_name'",
"]",
"new_data_target",
"[",
"'_DEPTH'",
"]",
"=",
"depth",
"new_data_target",
"[",
"'toolset'",
"]",
"=",
"\"target\"",
"new_data",
"[",
"sources_gyp",
"]",
"=",
"{",
"}",
"new_data",
"[",
"sources_gyp",
"]",
"[",
"'targets'",
"]",
"=",
"[",
"]",
"new_data",
"[",
"sources_gyp",
"]",
"[",
"'included_files'",
"]",
"=",
"[",
"]",
"new_data",
"[",
"sources_gyp",
"]",
"[",
"'xcode_settings'",
"]",
"=",
"data",
"[",
"orig_gyp",
"]",
".",
"get",
"(",
"'xcode_settings'",
",",
"{",
"}",
")",
"new_data",
"[",
"sources_gyp",
"]",
"[",
"'targets'",
"]",
".",
"append",
"(",
"new_data_target",
")",
"# Write workspace to file.",
"_WriteWorkspace",
"(",
"main_gyp",
",",
"sources_gyp",
",",
"params",
")",
"return",
"(",
"new_target_list",
",",
"new_target_dicts",
",",
"new_data",
")"
] | [
151,
0
] | [
269,
54
] | python | en | ['en', 'en', 'en'] | True |
_NormalizedSource | (source) | Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
| Normalize the path. | def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source | [
"def",
"_NormalizedSource",
"(",
"source",
")",
":",
"normalized",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"source",
")",
"if",
"source",
".",
"count",
"(",
"'$'",
")",
"==",
"normalized",
".",
"count",
"(",
"'$'",
")",
":",
"source",
"=",
"normalized",
"return",
"source"
] | [
138,
0
] | [
153,
15
] | python | en | ['en', 'en', 'en'] | True |
_FixPath | (path) | Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
| Convert paths to a form that will make sense in a vcproj file. | def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path | [
"def",
"_FixPath",
"(",
"path",
")",
":",
"if",
"fixpath_prefix",
"and",
"path",
"and",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
"and",
"not",
"path",
"[",
"0",
"]",
"==",
"'$'",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"fixpath_prefix",
",",
"path",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'/'",
",",
"'\\\\'",
")",
"path",
"=",
"_NormalizedSource",
"(",
"path",
")",
"if",
"path",
"and",
"path",
"[",
"-",
"1",
"]",
"==",
"'\\\\'",
":",
"path",
"=",
"path",
"[",
":",
"-",
"1",
"]",
"return",
"path"
] | [
156,
0
] | [
170,
13
] | python | en | ['en', 'en', 'en'] | True |
_FixPaths | (paths) | Fix each of the paths of the list. | Fix each of the paths of the list. | def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths] | [
"def",
"_FixPaths",
"(",
"paths",
")",
":",
"return",
"[",
"_FixPath",
"(",
"i",
")",
"for",
"i",
"in",
"paths",
"]"
] | [
173,
0
] | [
175,
37
] | python | en | ['en', 'en', 'en'] | True |
_ConvertSourcesToFilterHierarchy | (sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None) | Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
| Converts a list split source file paths into a vcproj folder hierarchy. | def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result | [
"def",
"_ConvertSourcesToFilterHierarchy",
"(",
"sources",
",",
"prefix",
"=",
"None",
",",
"excluded",
"=",
"None",
",",
"list_excluded",
"=",
"True",
",",
"msvs_version",
"=",
"None",
")",
":",
"if",
"not",
"prefix",
":",
"prefix",
"=",
"[",
"]",
"result",
"=",
"[",
"]",
"excluded_result",
"=",
"[",
"]",
"folders",
"=",
"OrderedDict",
"(",
")",
"# Gather files into the final result, excluded, or folders.",
"for",
"s",
"in",
"sources",
":",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"filename",
"=",
"_NormalizedSource",
"(",
"'\\\\'",
".",
"join",
"(",
"prefix",
"+",
"s",
")",
")",
"if",
"filename",
"in",
"excluded",
":",
"excluded_result",
".",
"append",
"(",
"filename",
")",
"else",
":",
"result",
".",
"append",
"(",
"filename",
")",
"elif",
"msvs_version",
"and",
"not",
"msvs_version",
".",
"UsesVcxproj",
"(",
")",
":",
"# For MSVS 2008 and earlier, we need to process all files before walking",
"# the sub folders.",
"if",
"not",
"folders",
".",
"get",
"(",
"s",
"[",
"0",
"]",
")",
":",
"folders",
"[",
"s",
"[",
"0",
"]",
"]",
"=",
"[",
"]",
"folders",
"[",
"s",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"s",
"[",
"1",
":",
"]",
")",
"else",
":",
"contents",
"=",
"_ConvertSourcesToFilterHierarchy",
"(",
"[",
"s",
"[",
"1",
":",
"]",
"]",
",",
"prefix",
"+",
"[",
"s",
"[",
"0",
"]",
"]",
",",
"excluded",
"=",
"excluded",
",",
"list_excluded",
"=",
"list_excluded",
",",
"msvs_version",
"=",
"msvs_version",
")",
"contents",
"=",
"MSVSProject",
".",
"Filter",
"(",
"s",
"[",
"0",
"]",
",",
"contents",
"=",
"contents",
")",
"result",
".",
"append",
"(",
"contents",
")",
"# Add a folder for excluded files.",
"if",
"excluded_result",
"and",
"list_excluded",
":",
"excluded_folder",
"=",
"MSVSProject",
".",
"Filter",
"(",
"'_excluded_files'",
",",
"contents",
"=",
"excluded_result",
")",
"result",
".",
"append",
"(",
"excluded_folder",
")",
"if",
"msvs_version",
"and",
"msvs_version",
".",
"UsesVcxproj",
"(",
")",
":",
"return",
"result",
"# Populate all the folders.",
"for",
"f",
"in",
"folders",
":",
"contents",
"=",
"_ConvertSourcesToFilterHierarchy",
"(",
"folders",
"[",
"f",
"]",
",",
"prefix",
"=",
"prefix",
"+",
"[",
"f",
"]",
",",
"excluded",
"=",
"excluded",
",",
"list_excluded",
"=",
"list_excluded",
",",
"msvs_version",
"=",
"msvs_version",
")",
"contents",
"=",
"MSVSProject",
".",
"Filter",
"(",
"f",
",",
"contents",
"=",
"contents",
")",
"result",
".",
"append",
"(",
"contents",
")",
"return",
"result"
] | [
178,
0
] | [
240,
15
] | python | en | ['en', 'fr', 'en'] | True |
_AddActionStep | (actions_dict, inputs, outputs, description, command) | Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
| Merge action into an existing list of actions. | def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action) | [
"def",
"_AddActionStep",
"(",
"actions_dict",
",",
"inputs",
",",
"outputs",
",",
"description",
",",
"command",
")",
":",
"# Require there to be at least one input (call sites will ensure this).",
"assert",
"inputs",
"action",
"=",
"{",
"'inputs'",
":",
"inputs",
",",
"'outputs'",
":",
"outputs",
",",
"'description'",
":",
"description",
",",
"'command'",
":",
"command",
",",
"}",
"# Pick where to stick this action.",
"# While less than optimal in terms of build time, attach them to the first",
"# input for now.",
"chosen_input",
"=",
"inputs",
"[",
"0",
"]",
"# Add it there.",
"if",
"chosen_input",
"not",
"in",
"actions_dict",
":",
"actions_dict",
"[",
"chosen_input",
"]",
"=",
"[",
"]",
"actions_dict",
"[",
"chosen_input",
"]",
".",
"append",
"(",
"action",
")"
] | [
393,
0
] | [
425,
43
] | python | en | ['en', 'en', 'en'] | True |
_AddCustomBuildToolForMSVS | (p, spec, primary_input,
inputs, outputs, description, cmd) | Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
| Add a custom build tool to execute something. | def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool]) | [
"def",
"_AddCustomBuildToolForMSVS",
"(",
"p",
",",
"spec",
",",
"primary_input",
",",
"inputs",
",",
"outputs",
",",
"description",
",",
"cmd",
")",
":",
"inputs",
"=",
"_FixPaths",
"(",
"inputs",
")",
"outputs",
"=",
"_FixPaths",
"(",
"outputs",
")",
"tool",
"=",
"MSVSProject",
".",
"Tool",
"(",
"'VCCustomBuildTool'",
",",
"{",
"'Description'",
":",
"description",
",",
"'AdditionalDependencies'",
":",
"';'",
".",
"join",
"(",
"inputs",
")",
",",
"'Outputs'",
":",
"';'",
".",
"join",
"(",
"outputs",
")",
",",
"'CommandLine'",
":",
"cmd",
",",
"}",
")",
"# Add to the properties of primary input for each config.",
"for",
"config_name",
",",
"c_data",
"in",
"spec",
"[",
"'configurations'",
"]",
".",
"iteritems",
"(",
")",
":",
"p",
".",
"AddFileConfig",
"(",
"_FixPath",
"(",
"primary_input",
")",
",",
"_ConfigFullName",
"(",
"config_name",
",",
"c_data",
")",
",",
"tools",
"=",
"[",
"tool",
"]",
")"
] | [
428,
0
] | [
453,
71
] | python | en | ['en', 'en', 'en'] | True |
_AddAccumulatedActionsToMSVS | (p, spec, actions_dict) | Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
| Add actions accumulated into an actions_dict, merging as needed. | def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command) | [
"def",
"_AddAccumulatedActionsToMSVS",
"(",
"p",
",",
"spec",
",",
"actions_dict",
")",
":",
"for",
"primary_input",
"in",
"actions_dict",
":",
"inputs",
"=",
"OrderedSet",
"(",
")",
"outputs",
"=",
"OrderedSet",
"(",
")",
"descriptions",
"=",
"[",
"]",
"commands",
"=",
"[",
"]",
"for",
"action",
"in",
"actions_dict",
"[",
"primary_input",
"]",
":",
"inputs",
".",
"update",
"(",
"OrderedSet",
"(",
"action",
"[",
"'inputs'",
"]",
")",
")",
"outputs",
".",
"update",
"(",
"OrderedSet",
"(",
"action",
"[",
"'outputs'",
"]",
")",
")",
"descriptions",
".",
"append",
"(",
"action",
"[",
"'description'",
"]",
")",
"commands",
".",
"append",
"(",
"action",
"[",
"'command'",
"]",
")",
"# Add the custom build step for one input file.",
"description",
"=",
"', and also '",
".",
"join",
"(",
"descriptions",
")",
"command",
"=",
"'\\r\\n'",
".",
"join",
"(",
"commands",
")",
"_AddCustomBuildToolForMSVS",
"(",
"p",
",",
"spec",
",",
"primary_input",
"=",
"primary_input",
",",
"inputs",
"=",
"inputs",
",",
"outputs",
"=",
"outputs",
",",
"description",
"=",
"description",
",",
"cmd",
"=",
"command",
")"
] | [
456,
0
] | [
483,
43
] | python | en | ['en', 'en', 'en'] | True |
_RuleExpandPath | (path, input_file) | Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
| Given the input file to which a rule applied, string substitute a path. | def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path | [
"def",
"_RuleExpandPath",
"(",
"path",
",",
"input_file",
")",
":",
"path",
"=",
"path",
".",
"replace",
"(",
"'$(InputName)'",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"input_file",
")",
"[",
"1",
"]",
")",
"[",
"0",
"]",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'$(InputDir)'",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"input_file",
")",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'$(InputExt)'",
",",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"input_file",
")",
"[",
"1",
"]",
")",
"[",
"1",
"]",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'$(InputFileName)'",
",",
"os",
".",
"path",
".",
"split",
"(",
"input_file",
")",
"[",
"1",
"]",
")",
"path",
"=",
"path",
".",
"replace",
"(",
"'$(InputPath)'",
",",
"input_file",
")",
"return",
"path"
] | [
486,
0
] | [
502,
13
] | python | en | ['en', 'en', 'en'] | True |
_FindRuleTriggerFiles | (rule, sources) | Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
| Find the list of files which a particular rule applies to. | def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', []) | [
"def",
"_FindRuleTriggerFiles",
"(",
"rule",
",",
"sources",
")",
":",
"return",
"rule",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")"
] | [
505,
0
] | [
514,
37
] | python | en | ['en', 'en', 'en'] | True |
_RuleInputsAndOutputs | (rule, trigger_file) | Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
| Find the inputs and outputs generated by a rule. | def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs) | [
"def",
"_RuleInputsAndOutputs",
"(",
"rule",
",",
"trigger_file",
")",
":",
"raw_inputs",
"=",
"_FixPaths",
"(",
"rule",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
")",
"raw_outputs",
"=",
"_FixPaths",
"(",
"rule",
".",
"get",
"(",
"'outputs'",
",",
"[",
"]",
")",
")",
"inputs",
"=",
"OrderedSet",
"(",
")",
"outputs",
"=",
"OrderedSet",
"(",
")",
"inputs",
".",
"add",
"(",
"trigger_file",
")",
"for",
"i",
"in",
"raw_inputs",
":",
"inputs",
".",
"add",
"(",
"_RuleExpandPath",
"(",
"i",
",",
"trigger_file",
")",
")",
"for",
"o",
"in",
"raw_outputs",
":",
"outputs",
".",
"add",
"(",
"_RuleExpandPath",
"(",
"o",
",",
"trigger_file",
")",
")",
"return",
"(",
"inputs",
",",
"outputs",
")"
] | [
517,
0
] | [
535,
26
] | python | en | ['en', 'en', 'en'] | True |
_GenerateNativeRulesForMSVS | (p, rules, output_dir, spec, options) | Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
| Generate a native rules file. | def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename) | [
"def",
"_GenerateNativeRulesForMSVS",
"(",
"p",
",",
"rules",
",",
"output_dir",
",",
"spec",
",",
"options",
")",
":",
"rules_filename",
"=",
"'%s%s.rules'",
"%",
"(",
"spec",
"[",
"'target_name'",
"]",
",",
"options",
".",
"suffix",
")",
"rules_file",
"=",
"MSVSToolFile",
".",
"Writer",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"rules_filename",
")",
",",
"spec",
"[",
"'target_name'",
"]",
")",
"# Add each rule.",
"for",
"r",
"in",
"rules",
":",
"rule_name",
"=",
"r",
"[",
"'rule_name'",
"]",
"rule_ext",
"=",
"r",
"[",
"'extension'",
"]",
"inputs",
"=",
"_FixPaths",
"(",
"r",
".",
"get",
"(",
"'inputs'",
",",
"[",
"]",
")",
")",
"outputs",
"=",
"_FixPaths",
"(",
"r",
".",
"get",
"(",
"'outputs'",
",",
"[",
"]",
")",
")",
"# Skip a rule with no action and no inputs.",
"if",
"'action'",
"not",
"in",
"r",
"and",
"not",
"r",
".",
"get",
"(",
"'rule_sources'",
",",
"[",
"]",
")",
":",
"continue",
"cmd",
"=",
"_BuildCommandLineForRule",
"(",
"spec",
",",
"r",
",",
"has_input_path",
"=",
"True",
",",
"do_setup_env",
"=",
"True",
")",
"rules_file",
".",
"AddCustomBuildRule",
"(",
"name",
"=",
"rule_name",
",",
"description",
"=",
"r",
".",
"get",
"(",
"'message'",
",",
"rule_name",
")",
",",
"extensions",
"=",
"[",
"rule_ext",
"]",
",",
"additional_dependencies",
"=",
"inputs",
",",
"outputs",
"=",
"outputs",
",",
"cmd",
"=",
"cmd",
")",
"# Write out rules file.",
"rules_file",
".",
"WriteIfChanged",
"(",
")",
"# Add rules file to project.",
"p",
".",
"AddToolFile",
"(",
"rules_filename",
")"
] | [
538,
0
] | [
573,
31
] | python | en | ['en', 'co', 'en'] | True |
_GenerateExternalRules | (rules, output_dir, spec,
sources, options, actions_to_add) | Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
| Generate an external makefile to do a set of rules. | def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd) | [
"def",
"_GenerateExternalRules",
"(",
"rules",
",",
"output_dir",
",",
"spec",
",",
"sources",
",",
"options",
",",
"actions_to_add",
")",
":",
"filename",
"=",
"'%s_rules%s.mk'",
"%",
"(",
"spec",
"[",
"'target_name'",
"]",
",",
"options",
".",
"suffix",
")",
"mk_file",
"=",
"gyp",
".",
"common",
".",
"WriteOnDiff",
"(",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"filename",
")",
")",
"# Find cygwin style versions of some paths.",
"mk_file",
".",
"write",
"(",
"'OutDirCygwin:=$(shell cygpath -u \"$(OutDir)\")\\n'",
")",
"mk_file",
".",
"write",
"(",
"'IntDirCygwin:=$(shell cygpath -u \"$(IntDir)\")\\n'",
")",
"# Gather stuff needed to emit all: target.",
"all_inputs",
"=",
"OrderedSet",
"(",
")",
"all_outputs",
"=",
"OrderedSet",
"(",
")",
"all_output_dirs",
"=",
"OrderedSet",
"(",
")",
"first_outputs",
"=",
"[",
"]",
"for",
"rule",
"in",
"rules",
":",
"trigger_files",
"=",
"_FindRuleTriggerFiles",
"(",
"rule",
",",
"sources",
")",
"for",
"tf",
"in",
"trigger_files",
":",
"inputs",
",",
"outputs",
"=",
"_RuleInputsAndOutputs",
"(",
"rule",
",",
"tf",
")",
"all_inputs",
".",
"update",
"(",
"OrderedSet",
"(",
"inputs",
")",
")",
"all_outputs",
".",
"update",
"(",
"OrderedSet",
"(",
"outputs",
")",
")",
"# Only use one target from each rule as the dependency for",
"# 'all' so we don't try to build each rule multiple times.",
"first_outputs",
".",
"append",
"(",
"list",
"(",
"outputs",
")",
"[",
"0",
"]",
")",
"# Get the unique output directories for this rule.",
"output_dirs",
"=",
"[",
"os",
".",
"path",
".",
"split",
"(",
"i",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"outputs",
"]",
"for",
"od",
"in",
"output_dirs",
":",
"all_output_dirs",
".",
"add",
"(",
"od",
")",
"first_outputs_cyg",
"=",
"[",
"_Cygwinify",
"(",
"i",
")",
"for",
"i",
"in",
"first_outputs",
"]",
"# Write out all: target, including mkdir for each output directory.",
"mk_file",
".",
"write",
"(",
"'all: %s\\n'",
"%",
"' '",
".",
"join",
"(",
"first_outputs_cyg",
")",
")",
"for",
"od",
"in",
"all_output_dirs",
":",
"if",
"od",
":",
"mk_file",
".",
"write",
"(",
"'\\tmkdir -p `cygpath -u \"%s\"`\\n'",
"%",
"od",
")",
"mk_file",
".",
"write",
"(",
"'\\n'",
")",
"# Define how each output is generated.",
"for",
"rule",
"in",
"rules",
":",
"trigger_files",
"=",
"_FindRuleTriggerFiles",
"(",
"rule",
",",
"sources",
")",
"for",
"tf",
"in",
"trigger_files",
":",
"# Get all the inputs and outputs for this rule for this trigger file.",
"inputs",
",",
"outputs",
"=",
"_RuleInputsAndOutputs",
"(",
"rule",
",",
"tf",
")",
"inputs",
"=",
"[",
"_Cygwinify",
"(",
"i",
")",
"for",
"i",
"in",
"inputs",
"]",
"outputs",
"=",
"[",
"_Cygwinify",
"(",
"i",
")",
"for",
"i",
"in",
"outputs",
"]",
"# Prepare the command line for this rule.",
"cmd",
"=",
"[",
"_RuleExpandPath",
"(",
"c",
",",
"tf",
")",
"for",
"c",
"in",
"rule",
"[",
"'action'",
"]",
"]",
"cmd",
"=",
"[",
"'\"%s\"'",
"%",
"i",
"for",
"i",
"in",
"cmd",
"]",
"cmd",
"=",
"' '",
".",
"join",
"(",
"cmd",
")",
"# Add it to the makefile.",
"mk_file",
".",
"write",
"(",
"'%s: %s\\n'",
"%",
"(",
"' '",
".",
"join",
"(",
"outputs",
")",
",",
"' '",
".",
"join",
"(",
"inputs",
")",
")",
")",
"mk_file",
".",
"write",
"(",
"'\\t%s\\n\\n'",
"%",
"cmd",
")",
"# Close up the file.",
"mk_file",
".",
"close",
"(",
")",
"# Add makefile to list of sources.",
"sources",
".",
"add",
"(",
"filename",
")",
"# Add a build action to call makefile.",
"cmd",
"=",
"[",
"'make'",
",",
"'OutDir=$(OutDir)'",
",",
"'IntDir=$(IntDir)'",
",",
"'-j'",
",",
"'${NUMBER_OF_PROCESSORS_PLUS_1}'",
",",
"'-f'",
",",
"filename",
"]",
"cmd",
"=",
"_BuildCommandLineForRuleRaw",
"(",
"spec",
",",
"cmd",
",",
"True",
",",
"False",
",",
"True",
",",
"True",
")",
"# Insert makefile as 0'th input, so it gets the action attached there,",
"# as this is easier to understand from in the IDE.",
"all_inputs",
"=",
"list",
"(",
"all_inputs",
")",
"all_inputs",
".",
"insert",
"(",
"0",
",",
"filename",
")",
"_AddActionStep",
"(",
"actions_to_add",
",",
"inputs",
"=",
"_FixPaths",
"(",
"all_inputs",
")",
",",
"outputs",
"=",
"_FixPaths",
"(",
"all_outputs",
")",
",",
"description",
"=",
"'Running external rules for %s'",
"%",
"spec",
"[",
"'target_name'",
"]",
",",
"command",
"=",
"cmd",
")"
] | [
582,
0
] | [
660,
29
] | python | en | ['en', 'en', 'en'] | True |
_EscapeEnvironmentVariableExpansion | (s) | Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
| Escapes % characters. | def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s | [
"def",
"_EscapeEnvironmentVariableExpansion",
"(",
"s",
")",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'%'",
",",
"'%%'",
")",
"return",
"s"
] | [
663,
0
] | [
678,
10
] | python | en | ['es', 'en', 'en'] | True |
_EscapeCommandLineArgumentForMSVS | (s) | Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
| Escapes a Windows command-line argument. | def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s | [
"def",
"_EscapeCommandLineArgumentForMSVS",
"(",
"s",
")",
":",
"def",
"_Replace",
"(",
"match",
")",
":",
"# For a literal quote, CommandLineToArgv requires an odd number of",
"# backslashes preceding it, and it produces half as many literal backslashes",
"# (rounded down). So we need to produce 2n+1 backslashes.",
"return",
"2",
"*",
"match",
".",
"group",
"(",
"1",
")",
"+",
"'\\\\\"'",
"# Escape all quotes so that they are interpreted literally.",
"s",
"=",
"quote_replacer_regex",
".",
"sub",
"(",
"_Replace",
",",
"s",
")",
"# Now add unescaped quotes so that any whitespace is interpreted literally.",
"s",
"=",
"'\"'",
"+",
"s",
"+",
"'\"'",
"return",
"s"
] | [
684,
0
] | [
709,
10
] | python | en | ['en', 'fr', 'en'] | True |
_EscapeVCProjCommandLineArgListItem | (s) | Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
| Escapes command line arguments for MSVS. | def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s | [
"def",
"_EscapeVCProjCommandLineArgListItem",
"(",
"s",
")",
":",
"def",
"_Replace",
"(",
"match",
")",
":",
"# For a non-literal quote, CommandLineToArgv requires an even number of",
"# backslashes preceding it, and it produces half as many literal",
"# backslashes. So we need to produce 2n backslashes.",
"return",
"2",
"*",
"match",
".",
"group",
"(",
"1",
")",
"+",
"'\"'",
"+",
"match",
".",
"group",
"(",
"2",
")",
"+",
"'\"'",
"segments",
"=",
"s",
".",
"split",
"(",
"'\"'",
")",
"# The unquoted segments are at the even-numbered indices.",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"segments",
")",
",",
"2",
")",
":",
"segments",
"[",
"i",
"]",
"=",
"delimiters_replacer_regex",
".",
"sub",
"(",
"_Replace",
",",
"segments",
"[",
"i",
"]",
")",
"# Concatenate back into a single string",
"s",
"=",
"'\"'",
".",
"join",
"(",
"segments",
")",
"if",
"len",
"(",
"segments",
")",
"%",
"2",
"==",
"0",
":",
"# String ends while still quoted according to VCProj's convention. This",
"# means the delimiter and the next list item that follow this one in the",
"# .vcproj file will be misinterpreted as part of this item. There is nothing",
"# we can do about this. Adding an extra quote would correct the problem in",
"# the VCProj but cause the same problem on the final command-line. Moving",
"# the item to the end of the list does works, but that's only possible if",
"# there's only one such item. Let's just warn the user.",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"'Warning: MSVS may misinterpret the odd number of '",
"+",
"'quotes in '",
"+",
"s",
")",
"return",
"s"
] | [
715,
0
] | [
759,
10
] | python | en | ['en', 'fr', 'en'] | True |
_EscapeCppDefineForMSVS | (s) | Escapes a CPP define so that it will reach the compiler unaltered. | Escapes a CPP define so that it will reach the compiler unaltered. | def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s | [
"def",
"_EscapeCppDefineForMSVS",
"(",
"s",
")",
":",
"s",
"=",
"_EscapeEnvironmentVariableExpansion",
"(",
"s",
")",
"s",
"=",
"_EscapeCommandLineArgumentForMSVS",
"(",
"s",
")",
"s",
"=",
"_EscapeVCProjCommandLineArgListItem",
"(",
"s",
")",
"# cl.exe replaces literal # characters with = in preprocesor definitions for",
"# some reason. Octal-encode to work around that.",
"s",
"=",
"s",
".",
"replace",
"(",
"'#'",
",",
"'\\\\%03o'",
"%",
"ord",
"(",
"'#'",
")",
")",
"return",
"s"
] | [
762,
0
] | [
770,
10
] | python | en | ['en', 'en', 'en'] | True |
_EscapeCommandLineArgumentForMSBuild | (s) | Escapes a Windows command-line argument for use by MSBuild. | Escapes a Windows command-line argument for use by MSBuild. | def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s | [
"def",
"_EscapeCommandLineArgumentForMSBuild",
"(",
"s",
")",
":",
"def",
"_Replace",
"(",
"match",
")",
":",
"return",
"(",
"len",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"/",
"2",
"*",
"4",
")",
"*",
"'\\\\'",
"+",
"'\\\\\"'",
"# Escape all quotes so that they are interpreted literally.",
"s",
"=",
"quote_replacer_regex2",
".",
"sub",
"(",
"_Replace",
",",
"s",
")",
"return",
"s"
] | [
776,
0
] | [
784,
10
] | python | en | ['en', 'en', 'en'] | True |
_EscapeCppDefineForMSBuild | (s) | Escapes a CPP define so that it will reach the compiler unaltered. | Escapes a CPP define so that it will reach the compiler unaltered. | def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s | [
"def",
"_EscapeCppDefineForMSBuild",
"(",
"s",
")",
":",
"s",
"=",
"_EscapeEnvironmentVariableExpansion",
"(",
"s",
")",
"s",
"=",
"_EscapeCommandLineArgumentForMSBuild",
"(",
"s",
")",
"s",
"=",
"_EscapeMSBuildSpecialCharacters",
"(",
"s",
")",
"# cl.exe replaces literal # characters with = in preprocesor definitions for",
"# some reason. Octal-encode to work around that.",
"s",
"=",
"s",
".",
"replace",
"(",
"'#'",
",",
"'\\\\%03o'",
"%",
"ord",
"(",
"'#'",
")",
")",
"return",
"s"
] | [
801,
0
] | [
809,
10
] | python | en | ['en', 'en', 'en'] | True |
_GenerateRulesForMSVS | (p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add) | Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
| Generate all the rules for a particular project. | def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(rules, sources, excluded_sources, False) | [
"def",
"_GenerateRulesForMSVS",
"(",
"p",
",",
"output_dir",
",",
"options",
",",
"spec",
",",
"sources",
",",
"excluded_sources",
",",
"actions_to_add",
")",
":",
"rules",
"=",
"spec",
".",
"get",
"(",
"'rules'",
",",
"[",
"]",
")",
"rules_native",
"=",
"[",
"r",
"for",
"r",
"in",
"rules",
"if",
"not",
"int",
"(",
"r",
".",
"get",
"(",
"'msvs_external_rule'",
",",
"0",
")",
")",
"]",
"rules_external",
"=",
"[",
"r",
"for",
"r",
"in",
"rules",
"if",
"int",
"(",
"r",
".",
"get",
"(",
"'msvs_external_rule'",
",",
"0",
")",
")",
"]",
"# Handle rules that use a native rules file.",
"if",
"rules_native",
":",
"_GenerateNativeRulesForMSVS",
"(",
"p",
",",
"rules_native",
",",
"output_dir",
",",
"spec",
",",
"options",
")",
"# Handle external rules (non-native rules).",
"if",
"rules_external",
":",
"_GenerateExternalRules",
"(",
"rules_external",
",",
"output_dir",
",",
"spec",
",",
"sources",
",",
"options",
",",
"actions_to_add",
")",
"_AdjustSourcesForRules",
"(",
"rules",
",",
"sources",
",",
"excluded_sources",
",",
"False",
")"
] | [
812,
0
] | [
838,
65
] | python | en | ['en', 'en', 'en'] | True |
_FilterActionsFromExcluded | (excluded_sources, actions_to_add) | Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
| Take inputs with actions attached out of the list of exclusions. | def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep] | [
"def",
"_FilterActionsFromExcluded",
"(",
"excluded_sources",
",",
"actions_to_add",
")",
":",
"must_keep",
"=",
"OrderedSet",
"(",
"_FixPaths",
"(",
"actions_to_add",
".",
"keys",
"(",
")",
")",
")",
"return",
"[",
"s",
"for",
"s",
"in",
"excluded_sources",
"if",
"s",
"not",
"in",
"must_keep",
"]"
] | [
863,
0
] | [
873,
60
] | python | en | ['en', 'en', 'en'] | True |
_GetGuidOfProject | (proj_path, spec) | Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
| Get the guid for the project. | def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid | [
"def",
"_GetGuidOfProject",
"(",
"proj_path",
",",
"spec",
")",
":",
"# Pluck out the default configuration.",
"default_config",
"=",
"_GetDefaultConfiguration",
"(",
"spec",
")",
"# Decide the guid of the project.",
"guid",
"=",
"default_config",
".",
"get",
"(",
"'msvs_guid'",
")",
"if",
"guid",
":",
"if",
"VALID_MSVS_GUID_CHARS",
".",
"match",
"(",
"guid",
")",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'Invalid MSVS guid: \"%s\". Must match regex: \"%s\".'",
"%",
"(",
"guid",
",",
"VALID_MSVS_GUID_CHARS",
".",
"pattern",
")",
")",
"guid",
"=",
"'{%s}'",
"%",
"guid",
"guid",
"=",
"guid",
"or",
"MSVSNew",
".",
"MakeGuid",
"(",
"proj_path",
")",
"return",
"guid"
] | [
880,
0
] | [
901,
13
] | python | en | ['en', 'en', 'en'] | True |
_GetMsbuildToolsetOfProject | (proj_path, spec, version) | Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
| Get the platform toolset for the project. | def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset | [
"def",
"_GetMsbuildToolsetOfProject",
"(",
"proj_path",
",",
"spec",
",",
"version",
")",
":",
"# Pluck out the default configuration.",
"default_config",
"=",
"_GetDefaultConfiguration",
"(",
"spec",
")",
"toolset",
"=",
"default_config",
".",
"get",
"(",
"'msbuild_toolset'",
")",
"if",
"not",
"toolset",
"and",
"version",
".",
"DefaultToolset",
"(",
")",
":",
"toolset",
"=",
"version",
".",
"DefaultToolset",
"(",
")",
"return",
"toolset"
] | [
904,
0
] | [
919,
16
] | python | en | ['en', 'en', 'en'] | True |
_GenerateProject | (project, options, version, generator_flags) | Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
| Generates a vcproj file. | def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags) | [
"def",
"_GenerateProject",
"(",
"project",
",",
"options",
",",
"version",
",",
"generator_flags",
")",
":",
"default_config",
"=",
"_GetDefaultConfiguration",
"(",
"project",
".",
"spec",
")",
"# Skip emitting anything if told to with msvs_existing_vcproj option.",
"if",
"default_config",
".",
"get",
"(",
"'msvs_existing_vcproj'",
")",
":",
"return",
"[",
"]",
"if",
"version",
".",
"UsesVcxproj",
"(",
")",
":",
"return",
"_GenerateMSBuildProject",
"(",
"project",
",",
"options",
",",
"version",
",",
"generator_flags",
")",
"else",
":",
"return",
"_GenerateMSVSProject",
"(",
"project",
",",
"options",
",",
"version",
",",
"generator_flags",
")"
] | [
922,
0
] | [
942,
75
] | python | en | ['en', 'it', 'pt'] | False |
_ValidateSourcesForMSVSProject | (spec, version) | Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
| Makes sure if duplicate basenames are not specified in the source list. | def _ValidateSourcesForMSVSProject(spec, version):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
version: The VisualStudioVersion object.
"""
# This validation should not be applied to MSVC2010 and later.
assert not version.UsesVcxproj()
# TODO: Check if MSVC allows this for loadable_module targets.
if spec.get('type', None) not in ('static_library', 'shared_library'):
return
sources = spec.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'MSVC08 cannot handle that.')
raise GypError('Duplicate basenames in sources section, see list above') | [
"def",
"_ValidateSourcesForMSVSProject",
"(",
"spec",
",",
"version",
")",
":",
"# This validation should not be applied to MSVC2010 and later.",
"assert",
"not",
"version",
".",
"UsesVcxproj",
"(",
")",
"# TODO: Check if MSVC allows this for loadable_module targets.",
"if",
"spec",
".",
"get",
"(",
"'type'",
",",
"None",
")",
"not",
"in",
"(",
"'static_library'",
",",
"'shared_library'",
")",
":",
"return",
"sources",
"=",
"spec",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
"basenames",
"=",
"{",
"}",
"for",
"source",
"in",
"sources",
":",
"name",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"is_compiled_file",
"=",
"ext",
"in",
"[",
"'.c'",
",",
"'.cc'",
",",
"'.cpp'",
",",
"'.cxx'",
",",
"'.m'",
",",
"'.mm'",
",",
"'.s'",
",",
"'.S'",
"]",
"if",
"not",
"is_compiled_file",
":",
"continue",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"# Don't include extension.",
"basenames",
".",
"setdefault",
"(",
"basename",
",",
"[",
"]",
")",
".",
"append",
"(",
"source",
")",
"error",
"=",
"''",
"for",
"basename",
",",
"files",
"in",
"basenames",
".",
"iteritems",
"(",
")",
":",
"if",
"len",
"(",
"files",
")",
">",
"1",
":",
"error",
"+=",
"' %s: %s\\n'",
"%",
"(",
"basename",
",",
"' '",
".",
"join",
"(",
"files",
")",
")",
"if",
"error",
":",
"print",
"(",
"'static library %s has several files with the same basename:\\n'",
"%",
"spec",
"[",
"'target_name'",
"]",
"+",
"error",
"+",
"'MSVC08 cannot handle that.'",
")",
"raise",
"GypError",
"(",
"'Duplicate basenames in sources section, see list above'",
")"
] | [
946,
0
] | [
978,
76
] | python | en | ['en', 'en', 'en'] | True |
_GenerateMSVSProject | (project, options, version, generator_flags) | Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
| Generates a .vcproj file. It may create .rules and .user files too. | def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# MSVC08 and prior version cannot handle duplicate basenames in the same
# target.
# TODO: Take excluded sources into consideration if possible.
_ValidateSourcesForMSVSProject(spec, version)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources | [
"def",
"_GenerateMSVSProject",
"(",
"project",
",",
"options",
",",
"version",
",",
"generator_flags",
")",
":",
"spec",
"=",
"project",
".",
"spec",
"gyp",
".",
"common",
".",
"EnsureDirExists",
"(",
"project",
".",
"path",
")",
"platforms",
"=",
"_GetUniquePlatforms",
"(",
"spec",
")",
"p",
"=",
"MSVSProject",
".",
"Writer",
"(",
"project",
".",
"path",
",",
"version",
",",
"spec",
"[",
"'target_name'",
"]",
",",
"project",
".",
"guid",
",",
"platforms",
")",
"# Get directory project file is in.",
"project_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"project",
".",
"path",
")",
"[",
"0",
"]",
"gyp_path",
"=",
"_NormalizedSource",
"(",
"project",
".",
"build_file",
")",
"relative_path_of_gyp_file",
"=",
"gyp",
".",
"common",
".",
"RelativePath",
"(",
"gyp_path",
",",
"project_dir",
")",
"config_type",
"=",
"_GetMSVSConfigurationType",
"(",
"spec",
",",
"project",
".",
"build_file",
")",
"for",
"config_name",
",",
"config",
"in",
"spec",
"[",
"'configurations'",
"]",
".",
"iteritems",
"(",
")",
":",
"_AddConfigurationToMSVSProject",
"(",
"p",
",",
"spec",
",",
"config_type",
",",
"config_name",
",",
"config",
")",
"# MSVC08 and prior version cannot handle duplicate basenames in the same",
"# target.",
"# TODO: Take excluded sources into consideration if possible.",
"_ValidateSourcesForMSVSProject",
"(",
"spec",
",",
"version",
")",
"# Prepare list of sources and excluded sources.",
"gyp_file",
"=",
"os",
".",
"path",
".",
"split",
"(",
"project",
".",
"build_file",
")",
"[",
"1",
"]",
"sources",
",",
"excluded_sources",
"=",
"_PrepareListOfSources",
"(",
"spec",
",",
"generator_flags",
",",
"gyp_file",
")",
"# Add rules.",
"actions_to_add",
"=",
"{",
"}",
"_GenerateRulesForMSVS",
"(",
"p",
",",
"project_dir",
",",
"options",
",",
"spec",
",",
"sources",
",",
"excluded_sources",
",",
"actions_to_add",
")",
"list_excluded",
"=",
"generator_flags",
".",
"get",
"(",
"'msvs_list_excluded_files'",
",",
"True",
")",
"sources",
",",
"excluded_sources",
",",
"excluded_idl",
"=",
"(",
"_AdjustSourcesAndConvertToFilterHierarchy",
"(",
"spec",
",",
"options",
",",
"project_dir",
",",
"sources",
",",
"excluded_sources",
",",
"list_excluded",
",",
"version",
")",
")",
"# Add in files.",
"missing_sources",
"=",
"_VerifySourcesExist",
"(",
"sources",
",",
"project_dir",
")",
"p",
".",
"AddFiles",
"(",
"sources",
")",
"_AddToolFilesToMSVS",
"(",
"p",
",",
"spec",
")",
"_HandlePreCompiledHeaders",
"(",
"p",
",",
"sources",
",",
"spec",
")",
"_AddActions",
"(",
"actions_to_add",
",",
"spec",
",",
"relative_path_of_gyp_file",
")",
"_AddCopies",
"(",
"actions_to_add",
",",
"spec",
")",
"_WriteMSVSUserFile",
"(",
"project",
".",
"path",
",",
"version",
",",
"spec",
")",
"# NOTE: this stanza must appear after all actions have been decided.",
"# Don't excluded sources with actions attached, or they won't run.",
"excluded_sources",
"=",
"_FilterActionsFromExcluded",
"(",
"excluded_sources",
",",
"actions_to_add",
")",
"_ExcludeFilesFromBeingBuilt",
"(",
"p",
",",
"spec",
",",
"excluded_sources",
",",
"excluded_idl",
",",
"list_excluded",
")",
"_AddAccumulatedActionsToMSVS",
"(",
"p",
",",
"spec",
",",
"actions_to_add",
")",
"# Write it out.",
"p",
".",
"WriteIfChanged",
"(",
")",
"return",
"missing_sources"
] | [
981,
0
] | [
1048,
24
] | python | en | ['en', 'en', 'en'] | True |
_GetUniquePlatforms | (spec) | Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
| Returns the list of unique platforms for this spec, e.g ['win32', ...]. | def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms | [
"def",
"_GetUniquePlatforms",
"(",
"spec",
")",
":",
"# Gather list of unique platforms.",
"platforms",
"=",
"OrderedSet",
"(",
")",
"for",
"configuration",
"in",
"spec",
"[",
"'configurations'",
"]",
":",
"platforms",
".",
"add",
"(",
"_ConfigPlatform",
"(",
"spec",
"[",
"'configurations'",
"]",
"[",
"configuration",
"]",
")",
")",
"platforms",
"=",
"list",
"(",
"platforms",
")",
"return",
"platforms"
] | [
1051,
0
] | [
1064,
18
] | python | en | ['en', 'en', 'en'] | True |
_CreateMSVSUserFile | (proj_path, version, spec) | Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
| Generates a .user file for the user running this Gyp program. | def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file | [
"def",
"_CreateMSVSUserFile",
"(",
"proj_path",
",",
"version",
",",
"spec",
")",
":",
"(",
"domain",
",",
"username",
")",
"=",
"_GetDomainAndUserName",
"(",
")",
"vcuser_filename",
"=",
"'.'",
".",
"join",
"(",
"[",
"proj_path",
",",
"domain",
",",
"username",
",",
"'user'",
"]",
")",
"user_file",
"=",
"MSVSUserFile",
".",
"Writer",
"(",
"vcuser_filename",
",",
"version",
",",
"spec",
"[",
"'target_name'",
"]",
")",
"return",
"user_file"
] | [
1067,
0
] | [
1082,
18
] | python | en | ['en', 'en', 'en'] | True |
_GetMSVSConfigurationType | (spec, build_file) | Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
| Returns the configuration type for this project. | def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type | [
"def",
"_GetMSVSConfigurationType",
"(",
"spec",
",",
"build_file",
")",
":",
"try",
":",
"config_type",
"=",
"{",
"'executable'",
":",
"'1'",
",",
"# .exe",
"'shared_library'",
":",
"'2'",
",",
"# .dll",
"'loadable_module'",
":",
"'2'",
",",
"# .dll",
"'static_library'",
":",
"'4'",
",",
"# .lib",
"'none'",
":",
"'10'",
",",
"# Utility type",
"}",
"[",
"spec",
"[",
"'type'",
"]",
"]",
"except",
"KeyError",
":",
"if",
"spec",
".",
"get",
"(",
"'type'",
")",
":",
"raise",
"GypError",
"(",
"'Target type %s is not a valid target type for '",
"'target %s in %s.'",
"%",
"(",
"spec",
"[",
"'type'",
"]",
",",
"spec",
"[",
"'target_name'",
"]",
",",
"build_file",
")",
")",
"else",
":",
"raise",
"GypError",
"(",
"'Missing type field for target %s in %s.'",
"%",
"(",
"spec",
"[",
"'target_name'",
"]",
",",
"build_file",
")",
")",
"return",
"config_type"
] | [
1085,
0
] | [
1112,
20
] | python | en | ['en', 'en', 'en'] | True |
_AddConfigurationToMSVSProject | (p, spec, config_type, config_name, config) | Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
| Adds a configuration to the MSVS project. | def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, midl_include_dirs, resource_include_dirs = \
_GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCMIDLTool',
'AdditionalIncludeDirectories', midl_include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name) | [
"def",
"_AddConfigurationToMSVSProject",
"(",
"p",
",",
"spec",
",",
"config_type",
",",
"config_name",
",",
"config",
")",
":",
"# Get the information for this configuration",
"include_dirs",
",",
"midl_include_dirs",
",",
"resource_include_dirs",
"=",
"_GetIncludeDirs",
"(",
"config",
")",
"libraries",
"=",
"_GetLibraries",
"(",
"spec",
")",
"library_dirs",
"=",
"_GetLibraryDirs",
"(",
"config",
")",
"out_file",
",",
"vc_tool",
",",
"_",
"=",
"_GetOutputFilePathAndTool",
"(",
"spec",
",",
"msbuild",
"=",
"False",
")",
"defines",
"=",
"_GetDefines",
"(",
"config",
")",
"defines",
"=",
"[",
"_EscapeCppDefineForMSVS",
"(",
"d",
")",
"for",
"d",
"in",
"defines",
"]",
"disabled_warnings",
"=",
"_GetDisabledWarnings",
"(",
"config",
")",
"prebuild",
"=",
"config",
".",
"get",
"(",
"'msvs_prebuild'",
")",
"postbuild",
"=",
"config",
".",
"get",
"(",
"'msvs_postbuild'",
")",
"def_file",
"=",
"_GetModuleDefinition",
"(",
"spec",
")",
"precompiled_header",
"=",
"config",
".",
"get",
"(",
"'msvs_precompiled_header'",
")",
"# Prepare the list of tools as a dictionary.",
"tools",
"=",
"dict",
"(",
")",
"# Add in user specified msvs_settings.",
"msvs_settings",
"=",
"config",
".",
"get",
"(",
"'msvs_settings'",
",",
"{",
"}",
")",
"MSVSSettings",
".",
"ValidateMSVSSettings",
"(",
"msvs_settings",
")",
"# Prevent default library inheritance from the environment.",
"_ToolAppend",
"(",
"tools",
",",
"'VCLinkerTool'",
",",
"'AdditionalDependencies'",
",",
"[",
"'$(NOINHERIT)'",
"]",
")",
"for",
"tool",
"in",
"msvs_settings",
":",
"settings",
"=",
"config",
"[",
"'msvs_settings'",
"]",
"[",
"tool",
"]",
"for",
"setting",
"in",
"settings",
":",
"_ToolAppend",
"(",
"tools",
",",
"tool",
",",
"setting",
",",
"settings",
"[",
"setting",
"]",
")",
"# Add the information to the appropriate tool",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'AdditionalIncludeDirectories'",
",",
"include_dirs",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCMIDLTool'",
",",
"'AdditionalIncludeDirectories'",
",",
"midl_include_dirs",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCResourceCompilerTool'",
",",
"'AdditionalIncludeDirectories'",
",",
"resource_include_dirs",
")",
"# Add in libraries.",
"_ToolAppend",
"(",
"tools",
",",
"'VCLinkerTool'",
",",
"'AdditionalDependencies'",
",",
"libraries",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCLinkerTool'",
",",
"'AdditionalLibraryDirectories'",
",",
"library_dirs",
")",
"if",
"out_file",
":",
"_ToolAppend",
"(",
"tools",
",",
"vc_tool",
",",
"'OutputFile'",
",",
"out_file",
",",
"only_if_unset",
"=",
"True",
")",
"# Add defines.",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'PreprocessorDefinitions'",
",",
"defines",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCResourceCompilerTool'",
",",
"'PreprocessorDefinitions'",
",",
"defines",
")",
"# Change program database directory to prevent collisions.",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'ProgramDataBaseFileName'",
",",
"'$(IntDir)$(ProjectName)\\\\vc80.pdb'",
",",
"only_if_unset",
"=",
"True",
")",
"# Add disabled warnings.",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'DisableSpecificWarnings'",
",",
"disabled_warnings",
")",
"# Add Pre-build.",
"_ToolAppend",
"(",
"tools",
",",
"'VCPreBuildEventTool'",
",",
"'CommandLine'",
",",
"prebuild",
")",
"# Add Post-build.",
"_ToolAppend",
"(",
"tools",
",",
"'VCPostBuildEventTool'",
",",
"'CommandLine'",
",",
"postbuild",
")",
"# Turn on precompiled headers if appropriate.",
"if",
"precompiled_header",
":",
"precompiled_header",
"=",
"os",
".",
"path",
".",
"split",
"(",
"precompiled_header",
")",
"[",
"1",
"]",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'UsePrecompiledHeader'",
",",
"'2'",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'PrecompiledHeaderThrough'",
",",
"precompiled_header",
")",
"_ToolAppend",
"(",
"tools",
",",
"'VCCLCompilerTool'",
",",
"'ForcedIncludeFiles'",
",",
"precompiled_header",
")",
"# Loadable modules don't generate import libraries;",
"# tell dependent projects to not expect one.",
"if",
"spec",
"[",
"'type'",
"]",
"==",
"'loadable_module'",
":",
"_ToolAppend",
"(",
"tools",
",",
"'VCLinkerTool'",
",",
"'IgnoreImportLibrary'",
",",
"'true'",
")",
"# Set the module definition file if any.",
"if",
"def_file",
":",
"_ToolAppend",
"(",
"tools",
",",
"'VCLinkerTool'",
",",
"'ModuleDefinitionFile'",
",",
"def_file",
")",
"_AddConfigurationToMSVS",
"(",
"p",
",",
"spec",
",",
"tools",
",",
"config",
",",
"config_type",
",",
"config_name",
")"
] | [
1115,
0
] | [
1199,
75
] | python | en | ['en', 'en', 'en'] | True |
_GetIncludeDirs | (config) | Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
| Returns the list of directories to be used for #include directives. | def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
midl_include_dirs = (
config.get('midl_include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
midl_include_dirs = _FixPaths(midl_include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, midl_include_dirs, resource_include_dirs | [
"def",
"_GetIncludeDirs",
"(",
"config",
")",
":",
"# TODO(bradnelson): include_dirs should really be flexible enough not to",
"# require this sort of thing.",
"include_dirs",
"=",
"(",
"config",
".",
"get",
"(",
"'include_dirs'",
",",
"[",
"]",
")",
"+",
"config",
".",
"get",
"(",
"'msvs_system_include_dirs'",
",",
"[",
"]",
")",
")",
"midl_include_dirs",
"=",
"(",
"config",
".",
"get",
"(",
"'midl_include_dirs'",
",",
"[",
"]",
")",
"+",
"config",
".",
"get",
"(",
"'msvs_system_include_dirs'",
",",
"[",
"]",
")",
")",
"resource_include_dirs",
"=",
"config",
".",
"get",
"(",
"'resource_include_dirs'",
",",
"include_dirs",
")",
"include_dirs",
"=",
"_FixPaths",
"(",
"include_dirs",
")",
"midl_include_dirs",
"=",
"_FixPaths",
"(",
"midl_include_dirs",
")",
"resource_include_dirs",
"=",
"_FixPaths",
"(",
"resource_include_dirs",
")",
"return",
"include_dirs",
",",
"midl_include_dirs",
",",
"resource_include_dirs"
] | [
1202,
0
] | [
1223,
63
] | python | en | ['en', 'en', 'en'] | True |
_GetLibraryDirs | (config) | Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
| Returns the list of directories to be used for library search paths. | def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs | [
"def",
"_GetLibraryDirs",
"(",
"config",
")",
":",
"library_dirs",
"=",
"config",
".",
"get",
"(",
"'library_dirs'",
",",
"[",
"]",
")",
"library_dirs",
"=",
"_FixPaths",
"(",
"library_dirs",
")",
"return",
"library_dirs"
] | [
1226,
0
] | [
1238,
21
] | python | en | ['en', 'en', 'en'] | True |
_GetLibraries | (spec) | Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
| Returns the list of libraries for this configuration. | def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub(r'^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list | [
"def",
"_GetLibraries",
"(",
"spec",
")",
":",
"libraries",
"=",
"spec",
".",
"get",
"(",
"'libraries'",
",",
"[",
"]",
")",
"# Strip out -l, as it is not used on windows (but is needed so we can pass",
"# in libraries that are assumed to be in the default library path).",
"# Also remove duplicate entries, leaving only the last duplicate, while",
"# preserving order.",
"found",
"=",
"OrderedSet",
"(",
")",
"unique_libraries_list",
"=",
"[",
"]",
"for",
"entry",
"in",
"reversed",
"(",
"libraries",
")",
":",
"library",
"=",
"re",
".",
"sub",
"(",
"r'^\\-l'",
",",
"''",
",",
"entry",
")",
"if",
"not",
"os",
".",
"path",
".",
"splitext",
"(",
"library",
")",
"[",
"1",
"]",
":",
"library",
"+=",
"'.lib'",
"if",
"library",
"not",
"in",
"found",
":",
"found",
".",
"add",
"(",
"library",
")",
"unique_libraries_list",
".",
"append",
"(",
"library",
")",
"unique_libraries_list",
".",
"reverse",
"(",
")",
"return",
"unique_libraries_list"
] | [
1241,
0
] | [
1264,
30
] | python | en | ['en', 'en', 'en'] | True |
_GetOutputFilePathAndTool | (spec, msbuild) | Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
| Returns the path and tool to use for this target. | def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool | [
"def",
"_GetOutputFilePathAndTool",
"(",
"spec",
",",
"msbuild",
")",
":",
"# Select a name for the output file.",
"out_file",
"=",
"''",
"vc_tool",
"=",
"''",
"msbuild_tool",
"=",
"''",
"output_file_map",
"=",
"{",
"'executable'",
":",
"(",
"'VCLinkerTool'",
",",
"'Link'",
",",
"'$(OutDir)'",
",",
"'.exe'",
")",
",",
"'shared_library'",
":",
"(",
"'VCLinkerTool'",
",",
"'Link'",
",",
"'$(OutDir)'",
",",
"'.dll'",
")",
",",
"'loadable_module'",
":",
"(",
"'VCLinkerTool'",
",",
"'Link'",
",",
"'$(OutDir)'",
",",
"'.dll'",
")",
",",
"'static_library'",
":",
"(",
"'VCLibrarianTool'",
",",
"'Lib'",
",",
"'$(OutDir)lib\\\\'",
",",
"'.lib'",
")",
",",
"}",
"output_file_props",
"=",
"output_file_map",
".",
"get",
"(",
"spec",
"[",
"'type'",
"]",
")",
"if",
"output_file_props",
"and",
"int",
"(",
"spec",
".",
"get",
"(",
"'msvs_auto_output_file'",
",",
"1",
")",
")",
":",
"vc_tool",
",",
"msbuild_tool",
",",
"out_dir",
",",
"suffix",
"=",
"output_file_props",
"if",
"spec",
".",
"get",
"(",
"'standalone_static_library'",
",",
"0",
")",
":",
"out_dir",
"=",
"'$(OutDir)'",
"out_dir",
"=",
"spec",
".",
"get",
"(",
"'product_dir'",
",",
"out_dir",
")",
"product_extension",
"=",
"spec",
".",
"get",
"(",
"'product_extension'",
")",
"if",
"product_extension",
":",
"suffix",
"=",
"'.'",
"+",
"product_extension",
"elif",
"msbuild",
":",
"suffix",
"=",
"'$(TargetExt)'",
"prefix",
"=",
"spec",
".",
"get",
"(",
"'product_prefix'",
",",
"''",
")",
"product_name",
"=",
"spec",
".",
"get",
"(",
"'product_name'",
",",
"'$(ProjectName)'",
")",
"out_file",
"=",
"ntpath",
".",
"join",
"(",
"out_dir",
",",
"prefix",
"+",
"product_name",
"+",
"suffix",
")",
"return",
"out_file",
",",
"vc_tool",
",",
"msbuild_tool"
] | [
1267,
0
] | [
1302,
40
] | python | en | ['en', 'en', 'en'] | True |
_GetOutputTargetExt | (spec) | Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
| Returns the extension for this target, including the dot | def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None | [
"def",
"_GetOutputTargetExt",
"(",
"spec",
")",
":",
"target_extension",
"=",
"spec",
".",
"get",
"(",
"'product_extension'",
")",
"if",
"target_extension",
":",
"return",
"'.'",
"+",
"target_extension",
"return",
"None"
] | [
1305,
0
] | [
1320,
13
] | python | en | ['en', 'en', 'en'] | True |
_GetDefines | (config) | Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
| Returns the list of preprocessor definitions for this configuation. | def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines | [
"def",
"_GetDefines",
"(",
"config",
")",
":",
"defines",
"=",
"[",
"]",
"for",
"d",
"in",
"config",
".",
"get",
"(",
"'defines'",
",",
"[",
"]",
")",
":",
"if",
"type",
"(",
"d",
")",
"==",
"list",
":",
"fd",
"=",
"'='",
".",
"join",
"(",
"[",
"str",
"(",
"dpart",
")",
"for",
"dpart",
"in",
"d",
"]",
")",
"else",
":",
"fd",
"=",
"str",
"(",
"d",
")",
"defines",
".",
"append",
"(",
"fd",
")",
"return",
"defines"
] | [
1323,
0
] | [
1339,
16
] | python | en | ['en', 'en', 'en'] | True |
_ConvertToolsToExpectedForm | (tools) | Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
| Convert tools to a form expected by Visual Studio. | def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list | [
"def",
"_ConvertToolsToExpectedForm",
"(",
"tools",
")",
":",
"tool_list",
"=",
"[",
"]",
"for",
"tool",
",",
"settings",
"in",
"tools",
".",
"iteritems",
"(",
")",
":",
"# Collapse settings with lists.",
"settings_fixed",
"=",
"{",
"}",
"for",
"setting",
",",
"value",
"in",
"settings",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"list",
":",
"if",
"(",
"(",
"tool",
"==",
"'VCLinkerTool'",
"and",
"setting",
"==",
"'AdditionalDependencies'",
")",
"or",
"setting",
"==",
"'AdditionalOptions'",
")",
":",
"settings_fixed",
"[",
"setting",
"]",
"=",
"' '",
".",
"join",
"(",
"value",
")",
"else",
":",
"settings_fixed",
"[",
"setting",
"]",
"=",
"';'",
".",
"join",
"(",
"value",
")",
"else",
":",
"settings_fixed",
"[",
"setting",
"]",
"=",
"value",
"# Add in this tool.",
"tool_list",
".",
"append",
"(",
"MSVSProject",
".",
"Tool",
"(",
"tool",
",",
"settings_fixed",
")",
")",
"return",
"tool_list"
] | [
1360,
0
] | [
1384,
18
] | python | en | ['en', 'en', 'en'] | True |
_AddConfigurationToMSVS | (p, spec, tools, config, config_type, config_name) | Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
| Add to the project file the configuration specified by config. | def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list) | [
"def",
"_AddConfigurationToMSVS",
"(",
"p",
",",
"spec",
",",
"tools",
",",
"config",
",",
"config_type",
",",
"config_name",
")",
":",
"attributes",
"=",
"_GetMSVSAttributes",
"(",
"spec",
",",
"config",
",",
"config_type",
")",
"# Add in this configuration.",
"tool_list",
"=",
"_ConvertToolsToExpectedForm",
"(",
"tools",
")",
"p",
".",
"AddConfig",
"(",
"_ConfigFullName",
"(",
"config_name",
",",
"config",
")",
",",
"attrs",
"=",
"attributes",
",",
"tools",
"=",
"tool_list",
")"
] | [
1387,
0
] | [
1403,
48
] | python | en | ['en', 'en', 'en'] | True |
_PrepareListOfSources | (spec, generator_flags, gyp_file) | Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
| Prepare list of sources and excluded sources. | def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources) | [
"def",
"_PrepareListOfSources",
"(",
"spec",
",",
"generator_flags",
",",
"gyp_file",
")",
":",
"sources",
"=",
"OrderedSet",
"(",
")",
"_AddNormalizedSources",
"(",
"sources",
",",
"spec",
".",
"get",
"(",
"'sources'",
",",
"[",
"]",
")",
")",
"excluded_sources",
"=",
"OrderedSet",
"(",
")",
"# Add in the gyp file.",
"if",
"not",
"generator_flags",
".",
"get",
"(",
"'standalone'",
")",
":",
"sources",
".",
"add",
"(",
"gyp_file",
")",
"# Add in 'action' inputs and outputs.",
"for",
"a",
"in",
"spec",
".",
"get",
"(",
"'actions'",
",",
"[",
"]",
")",
":",
"inputs",
"=",
"a",
"[",
"'inputs'",
"]",
"inputs",
"=",
"[",
"_NormalizedSource",
"(",
"i",
")",
"for",
"i",
"in",
"inputs",
"]",
"# Add all inputs to sources and excluded sources.",
"inputs",
"=",
"OrderedSet",
"(",
"inputs",
")",
"sources",
".",
"update",
"(",
"inputs",
")",
"if",
"not",
"spec",
".",
"get",
"(",
"'msvs_external_builder'",
")",
":",
"excluded_sources",
".",
"update",
"(",
"inputs",
")",
"if",
"int",
"(",
"a",
".",
"get",
"(",
"'process_outputs_as_sources'",
",",
"False",
")",
")",
":",
"_AddNormalizedSources",
"(",
"sources",
",",
"a",
".",
"get",
"(",
"'outputs'",
",",
"[",
"]",
")",
")",
"# Add in 'copies' inputs and outputs.",
"for",
"cpy",
"in",
"spec",
".",
"get",
"(",
"'copies'",
",",
"[",
"]",
")",
":",
"_AddNormalizedSources",
"(",
"sources",
",",
"cpy",
".",
"get",
"(",
"'files'",
",",
"[",
"]",
")",
")",
"return",
"(",
"sources",
",",
"excluded_sources",
")"
] | [
1436,
0
] | [
1472,
36
] | python | en | ['en', 'en', 'en'] | True |
_AdjustSourcesAndConvertToFilterHierarchy | (
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version) | Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
| Adjusts the list of sources and excluded sources. | def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl | [
"def",
"_AdjustSourcesAndConvertToFilterHierarchy",
"(",
"spec",
",",
"options",
",",
"gyp_dir",
",",
"sources",
",",
"excluded_sources",
",",
"list_excluded",
",",
"version",
")",
":",
"# Exclude excluded sources coming into the generator.",
"excluded_sources",
".",
"update",
"(",
"OrderedSet",
"(",
"spec",
".",
"get",
"(",
"'sources_excluded'",
",",
"[",
"]",
")",
")",
")",
"# Add excluded sources into sources for good measure.",
"sources",
".",
"update",
"(",
"excluded_sources",
")",
"# Convert to proper windows form.",
"# NOTE: sources goes from being a set to a list here.",
"# NOTE: excluded_sources goes from being a set to a list here.",
"sources",
"=",
"_FixPaths",
"(",
"sources",
")",
"# Convert to proper windows form.",
"excluded_sources",
"=",
"_FixPaths",
"(",
"excluded_sources",
")",
"excluded_idl",
"=",
"_IdlFilesHandledNonNatively",
"(",
"spec",
",",
"sources",
")",
"precompiled_related",
"=",
"_GetPrecompileRelatedFiles",
"(",
"spec",
")",
"# Find the excluded ones, minus the precompiled header related ones.",
"fully_excluded",
"=",
"[",
"i",
"for",
"i",
"in",
"excluded_sources",
"if",
"i",
"not",
"in",
"precompiled_related",
"]",
"# Convert to folders and the right slashes.",
"sources",
"=",
"[",
"i",
".",
"split",
"(",
"'\\\\'",
")",
"for",
"i",
"in",
"sources",
"]",
"sources",
"=",
"_ConvertSourcesToFilterHierarchy",
"(",
"sources",
",",
"excluded",
"=",
"fully_excluded",
",",
"list_excluded",
"=",
"list_excluded",
",",
"msvs_version",
"=",
"version",
")",
"# Prune filters with a single child to flatten ugly directory structures",
"# such as ../../src/modules/module1 etc.",
"if",
"version",
".",
"UsesVcxproj",
"(",
")",
":",
"while",
"all",
"(",
"[",
"isinstance",
"(",
"s",
",",
"MSVSProject",
".",
"Filter",
")",
"for",
"s",
"in",
"sources",
"]",
")",
"and",
"len",
"(",
"set",
"(",
"[",
"s",
".",
"name",
"for",
"s",
"in",
"sources",
"]",
")",
")",
"==",
"1",
":",
"assert",
"all",
"(",
"[",
"len",
"(",
"s",
".",
"contents",
")",
"==",
"1",
"for",
"s",
"in",
"sources",
"]",
")",
"sources",
"=",
"[",
"s",
".",
"contents",
"[",
"0",
"]",
"for",
"s",
"in",
"sources",
"]",
"else",
":",
"while",
"len",
"(",
"sources",
")",
"==",
"1",
"and",
"isinstance",
"(",
"sources",
"[",
"0",
"]",
",",
"MSVSProject",
".",
"Filter",
")",
":",
"sources",
"=",
"sources",
"[",
"0",
"]",
".",
"contents",
"return",
"sources",
",",
"excluded_sources",
",",
"excluded_idl"
] | [
1475,
0
] | [
1526,
48
] | python | en | ['en', 'en', 'en'] | True |
_CreateProjectObjects | (target_list, target_dicts, options, msvs_version) | Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
| Create a MSVSProject object for the targets found in target list. | def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects | [
"def",
"_CreateProjectObjects",
"(",
"target_list",
",",
"target_dicts",
",",
"options",
",",
"msvs_version",
")",
":",
"global",
"fixpath_prefix",
"# Generate each project.",
"projects",
"=",
"{",
"}",
"for",
"qualified_target",
"in",
"target_list",
":",
"spec",
"=",
"target_dicts",
"[",
"qualified_target",
"]",
"if",
"spec",
"[",
"'toolset'",
"]",
"!=",
"'target'",
":",
"raise",
"GypError",
"(",
"'Multiple toolsets not supported in msvs build (target %s)'",
"%",
"qualified_target",
")",
"proj_path",
",",
"fixpath_prefix",
"=",
"_GetPathOfProject",
"(",
"qualified_target",
",",
"spec",
",",
"options",
",",
"msvs_version",
")",
"guid",
"=",
"_GetGuidOfProject",
"(",
"proj_path",
",",
"spec",
")",
"overrides",
"=",
"_GetPlatformOverridesOfProject",
"(",
"spec",
")",
"build_file",
"=",
"gyp",
".",
"common",
".",
"BuildFile",
"(",
"qualified_target",
")",
"# Create object for this project.",
"obj",
"=",
"MSVSNew",
".",
"MSVSProject",
"(",
"proj_path",
",",
"name",
"=",
"spec",
"[",
"'target_name'",
"]",
",",
"guid",
"=",
"guid",
",",
"spec",
"=",
"spec",
",",
"build_file",
"=",
"build_file",
",",
"config_platform_overrides",
"=",
"overrides",
",",
"fixpath_prefix",
"=",
"fixpath_prefix",
")",
"# Set project toolset if any (MS build only)",
"if",
"msvs_version",
".",
"UsesVcxproj",
"(",
")",
":",
"obj",
".",
"set_msbuild_toolset",
"(",
"_GetMsbuildToolsetOfProject",
"(",
"proj_path",
",",
"spec",
",",
"msvs_version",
")",
")",
"projects",
"[",
"qualified_target",
"]",
"=",
"obj",
"# Set all the dependencies, but not if we are using an external builder like",
"# ninja",
"for",
"project",
"in",
"projects",
".",
"values",
"(",
")",
":",
"if",
"not",
"project",
".",
"spec",
".",
"get",
"(",
"'msvs_external_builder'",
")",
":",
"deps",
"=",
"project",
".",
"spec",
".",
"get",
"(",
"'dependencies'",
",",
"[",
"]",
")",
"deps",
"=",
"[",
"projects",
"[",
"d",
"]",
"for",
"d",
"in",
"deps",
"]",
"project",
".",
"set_dependencies",
"(",
"deps",
")",
"return",
"projects"
] | [
1811,
0
] | [
1857,
17
] | python | en | ['en', 'en', 'en'] | True |
_InitNinjaFlavor | (params, target_list, target_dicts) | Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
| Initialize targets for the ninja flavor. | def _InitNinjaFlavor(params, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
params: Params provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
gyp_dir = os.path.dirname(gyp_file)
configuration = '$(Configuration)'
if params.get('target_arch') == 'x64':
configuration += '_x64'
spec['msvs_external_builder_out_dir'] = os.path.join(
gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
ninja_generator.ComputeOutputDir(params),
configuration)
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-tclean',
'$(ProjectName)',
] | [
"def",
"_InitNinjaFlavor",
"(",
"params",
",",
"target_list",
",",
"target_dicts",
")",
":",
"for",
"qualified_target",
"in",
"target_list",
":",
"spec",
"=",
"target_dicts",
"[",
"qualified_target",
"]",
"if",
"spec",
".",
"get",
"(",
"'msvs_external_builder'",
")",
":",
"# The spec explicitly defined an external builder, so don't change it.",
"continue",
"path_to_ninja",
"=",
"spec",
".",
"get",
"(",
"'msvs_path_to_ninja'",
",",
"'ninja.exe'",
")",
"spec",
"[",
"'msvs_external_builder'",
"]",
"=",
"'ninja'",
"if",
"not",
"spec",
".",
"get",
"(",
"'msvs_external_builder_out_dir'",
")",
":",
"gyp_file",
",",
"_",
",",
"_",
"=",
"gyp",
".",
"common",
".",
"ParseQualifiedTarget",
"(",
"qualified_target",
")",
"gyp_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"gyp_file",
")",
"configuration",
"=",
"'$(Configuration)'",
"if",
"params",
".",
"get",
"(",
"'target_arch'",
")",
"==",
"'x64'",
":",
"configuration",
"+=",
"'_x64'",
"spec",
"[",
"'msvs_external_builder_out_dir'",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"gyp",
".",
"common",
".",
"RelativePath",
"(",
"params",
"[",
"'options'",
"]",
".",
"toplevel_dir",
",",
"gyp_dir",
")",
",",
"ninja_generator",
".",
"ComputeOutputDir",
"(",
"params",
")",
",",
"configuration",
")",
"if",
"not",
"spec",
".",
"get",
"(",
"'msvs_external_builder_build_cmd'",
")",
":",
"spec",
"[",
"'msvs_external_builder_build_cmd'",
"]",
"=",
"[",
"path_to_ninja",
",",
"'-C'",
",",
"'$(OutDir)'",
",",
"'$(ProjectName)'",
",",
"]",
"if",
"not",
"spec",
".",
"get",
"(",
"'msvs_external_builder_clean_cmd'",
")",
":",
"spec",
"[",
"'msvs_external_builder_clean_cmd'",
"]",
"=",
"[",
"path_to_ninja",
",",
"'-C'",
",",
"'$(OutDir)'",
",",
"'-tclean'",
",",
"'$(ProjectName)'",
",",
"]"
] | [
1860,
0
] | [
1905,
7
] | python | en | ['en', 'en', 'en'] | True |
CalculateVariables | (default_variables, params) | Generated variables that require params to be known. | Generated variables that require params to be known. | def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen' | [
"def",
"CalculateVariables",
"(",
"default_variables",
",",
"params",
")",
":",
"generator_flags",
"=",
"params",
".",
"get",
"(",
"'generator_flags'",
",",
"{",
"}",
")",
"# Select project file format version (if unset, default to auto detecting).",
"msvs_version",
"=",
"MSVSVersion",
".",
"SelectVisualStudioVersion",
"(",
"generator_flags",
".",
"get",
"(",
"'msvs_version'",
",",
"'auto'",
")",
")",
"# Stash msvs_version for later (so we don't have to probe the system twice).",
"params",
"[",
"'msvs_version'",
"]",
"=",
"msvs_version",
"# Set a variable so conditions can be based on msvs_version.",
"default_variables",
"[",
"'MSVS_VERSION'",
"]",
"=",
"msvs_version",
".",
"ShortName",
"(",
")",
"# To determine processor word size on Windows, in addition to checking",
"# PROCESSOR_ARCHITECTURE (which reflects the word size of the current",
"# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which",
"# contains the actual word size of the system when running thru WOW64).",
"if",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'PROCESSOR_ARCHITECTURE'",
",",
"''",
")",
".",
"find",
"(",
"'64'",
")",
">=",
"0",
"or",
"os",
".",
"environ",
".",
"get",
"(",
"'PROCESSOR_ARCHITEW6432'",
",",
"''",
")",
".",
"find",
"(",
"'64'",
")",
">=",
"0",
")",
":",
"default_variables",
"[",
"'MSVS_OS_BITS'",
"]",
"=",
"64",
"else",
":",
"default_variables",
"[",
"'MSVS_OS_BITS'",
"]",
"=",
"32",
"if",
"gyp",
".",
"common",
".",
"GetFlavor",
"(",
"params",
")",
"==",
"'ninja'",
":",
"default_variables",
"[",
"'SHARED_INTERMEDIATE_DIR'",
"]",
"=",
"'$(OutDir)gen'"
] | [
1908,
0
] | [
1933,
65
] | python | en | ['en', 'en', 'en'] | True |
GenerateOutput | (target_list, target_dicts, data, params) | Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
| Generate .sln and .vcproj files. | def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(params, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message | [
"def",
"GenerateOutput",
"(",
"target_list",
",",
"target_dicts",
",",
"data",
",",
"params",
")",
":",
"global",
"fixpath_prefix",
"options",
"=",
"params",
"[",
"'options'",
"]",
"# Get the project file format version back out of where we stashed it in",
"# GeneratorCalculatedVariables.",
"msvs_version",
"=",
"params",
"[",
"'msvs_version'",
"]",
"generator_flags",
"=",
"params",
".",
"get",
"(",
"'generator_flags'",
",",
"{",
"}",
")",
"# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.",
"(",
"target_list",
",",
"target_dicts",
")",
"=",
"MSVSUtil",
".",
"ShardTargets",
"(",
"target_list",
",",
"target_dicts",
")",
"# Optionally use the large PDB workaround for targets marked with",
"# 'msvs_large_pdb': 1.",
"(",
"target_list",
",",
"target_dicts",
")",
"=",
"MSVSUtil",
".",
"InsertLargePdbShims",
"(",
"target_list",
",",
"target_dicts",
",",
"generator_default_variables",
")",
"# Optionally configure each spec to use ninja as the external builder.",
"if",
"params",
".",
"get",
"(",
"'flavor'",
")",
"==",
"'ninja'",
":",
"_InitNinjaFlavor",
"(",
"params",
",",
"target_list",
",",
"target_dicts",
")",
"# Prepare the set of configurations.",
"configs",
"=",
"set",
"(",
")",
"for",
"qualified_target",
"in",
"target_list",
":",
"spec",
"=",
"target_dicts",
"[",
"qualified_target",
"]",
"for",
"config_name",
",",
"config",
"in",
"spec",
"[",
"'configurations'",
"]",
".",
"iteritems",
"(",
")",
":",
"configs",
".",
"add",
"(",
"_ConfigFullName",
"(",
"config_name",
",",
"config",
")",
")",
"configs",
"=",
"list",
"(",
"configs",
")",
"# Figure out all the projects that will be generated and their guids",
"project_objects",
"=",
"_CreateProjectObjects",
"(",
"target_list",
",",
"target_dicts",
",",
"options",
",",
"msvs_version",
")",
"# Generate each project.",
"missing_sources",
"=",
"[",
"]",
"for",
"project",
"in",
"project_objects",
".",
"values",
"(",
")",
":",
"fixpath_prefix",
"=",
"project",
".",
"fixpath_prefix",
"missing_sources",
".",
"extend",
"(",
"_GenerateProject",
"(",
"project",
",",
"options",
",",
"msvs_version",
",",
"generator_flags",
")",
")",
"fixpath_prefix",
"=",
"None",
"for",
"build_file",
"in",
"data",
":",
"# Validate build_file extension",
"if",
"not",
"build_file",
".",
"endswith",
"(",
"'.gyp'",
")",
":",
"continue",
"sln_path",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"build_file",
")",
"[",
"0",
"]",
"+",
"options",
".",
"suffix",
"+",
"'.sln'",
"if",
"options",
".",
"generator_output",
":",
"sln_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"options",
".",
"generator_output",
",",
"sln_path",
")",
"# Get projects in the solution, and their dependents.",
"sln_projects",
"=",
"gyp",
".",
"common",
".",
"BuildFileTargets",
"(",
"target_list",
",",
"build_file",
")",
"sln_projects",
"+=",
"gyp",
".",
"common",
".",
"DeepDependencyTargets",
"(",
"target_dicts",
",",
"sln_projects",
")",
"# Create folder hierarchy.",
"root_entries",
"=",
"_GatherSolutionFolders",
"(",
"sln_projects",
",",
"project_objects",
",",
"flat",
"=",
"msvs_version",
".",
"FlatSolution",
"(",
")",
")",
"# Create solution.",
"sln",
"=",
"MSVSNew",
".",
"MSVSSolution",
"(",
"sln_path",
",",
"entries",
"=",
"root_entries",
",",
"variants",
"=",
"configs",
",",
"websiteProperties",
"=",
"False",
",",
"version",
"=",
"msvs_version",
")",
"sln",
".",
"Write",
"(",
")",
"if",
"missing_sources",
":",
"error_message",
"=",
"\"Missing input files:\\n\"",
"+",
"'\\n'",
".",
"join",
"(",
"set",
"(",
"missing_sources",
")",
")",
"if",
"generator_flags",
".",
"get",
"(",
"'msvs_error_on_missing_sources'",
",",
"False",
")",
":",
"raise",
"GypError",
"(",
"error_message",
")",
"else",
":",
"print",
">>",
"sys",
".",
"stdout",
",",
"\"Warning: \"",
"+",
"error_message"
] | [
1955,
0
] | [
2033,
54
] | python | it | ['en', 'it', 'it'] | True |
_GenerateMSBuildFiltersFile | (filters_path, source_files,
rule_dependencies, extension_to_rule_name) | Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
| Generate the filters file. | def _GenerateMSBuildFiltersFile(filters_path, source_files,
rule_dependencies, extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, rule_dependencies,
extension_to_rule_name, filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path) | [
"def",
"_GenerateMSBuildFiltersFile",
"(",
"filters_path",
",",
"source_files",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
")",
":",
"filter_group",
"=",
"[",
"]",
"source_group",
"=",
"[",
"]",
"_AppendFiltersForMSBuild",
"(",
"''",
",",
"source_files",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
",",
"filter_group",
",",
"source_group",
")",
"if",
"filter_group",
":",
"content",
"=",
"[",
"'Project'",
",",
"{",
"'ToolsVersion'",
":",
"'4.0'",
",",
"'xmlns'",
":",
"'http://schemas.microsoft.com/developer/msbuild/2003'",
"}",
",",
"[",
"'ItemGroup'",
"]",
"+",
"filter_group",
",",
"[",
"'ItemGroup'",
"]",
"+",
"source_group",
"]",
"easy_xml",
".",
"WriteXmlIfChanged",
"(",
"content",
",",
"filters_path",
",",
"pretty",
"=",
"True",
",",
"win32",
"=",
"True",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"filters_path",
")",
":",
"# We don't need this filter anymore. Delete the old filter file.",
"os",
".",
"unlink",
"(",
"filters_path",
")"
] | [
2036,
0
] | [
2063,
27
] | python | en | ['en', 'en', 'en'] | True |
_AppendFiltersForMSBuild | (parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group) | Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
| Creates the list of filters and sources to be added in the filter file. | def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
rule_dependencies, extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry) | [
"def",
"_AppendFiltersForMSBuild",
"(",
"parent_filter_name",
",",
"sources",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
",",
"filter_group",
",",
"source_group",
")",
":",
"for",
"source",
"in",
"sources",
":",
"if",
"isinstance",
"(",
"source",
",",
"MSVSProject",
".",
"Filter",
")",
":",
"# We have a sub-filter. Create the name of that sub-filter.",
"if",
"not",
"parent_filter_name",
":",
"filter_name",
"=",
"source",
".",
"name",
"else",
":",
"filter_name",
"=",
"'%s\\\\%s'",
"%",
"(",
"parent_filter_name",
",",
"source",
".",
"name",
")",
"# Add the filter to the group.",
"filter_group",
".",
"append",
"(",
"[",
"'Filter'",
",",
"{",
"'Include'",
":",
"filter_name",
"}",
",",
"[",
"'UniqueIdentifier'",
",",
"MSVSNew",
".",
"MakeGuid",
"(",
"source",
".",
"name",
")",
"]",
"]",
")",
"# Recurse and add its dependents.",
"_AppendFiltersForMSBuild",
"(",
"filter_name",
",",
"source",
".",
"contents",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
",",
"filter_group",
",",
"source_group",
")",
"else",
":",
"# It's a source. Create a source entry.",
"_",
",",
"element",
"=",
"_MapFileToMsBuildSourceType",
"(",
"source",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
")",
"source_entry",
"=",
"[",
"element",
",",
"{",
"'Include'",
":",
"source",
"}",
"]",
"# Specify the filter it is part of, if any.",
"if",
"parent_filter_name",
":",
"source_entry",
".",
"append",
"(",
"[",
"'Filter'",
",",
"parent_filter_name",
"]",
")",
"source_group",
".",
"append",
"(",
"source_entry",
")"
] | [
2066,
0
] | [
2102,
39
] | python | en | ['en', 'en', 'en'] | True |
_MapFileToMsBuildSourceType | (source, rule_dependencies,
extension_to_rule_name) | Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
| Returns the group and element type of the source file. | def _MapFileToMsBuildSourceType(source, rule_dependencies,
extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx', '.mm']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.asm':
group = 'masm'
element = 'MASM'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
elif source in rule_dependencies:
group = 'rule_dependency'
element = 'CustomBuild'
else:
group = 'none'
element = 'None'
return (group, element) | [
"def",
"_MapFileToMsBuildSourceType",
"(",
"source",
",",
"rule_dependencies",
",",
"extension_to_rule_name",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"source",
")",
"if",
"ext",
"in",
"extension_to_rule_name",
":",
"group",
"=",
"'rule'",
"element",
"=",
"extension_to_rule_name",
"[",
"ext",
"]",
"elif",
"ext",
"in",
"[",
"'.cc'",
",",
"'.cpp'",
",",
"'.c'",
",",
"'.cxx'",
",",
"'.mm'",
"]",
":",
"group",
"=",
"'compile'",
"element",
"=",
"'ClCompile'",
"elif",
"ext",
"in",
"[",
"'.h'",
",",
"'.hxx'",
"]",
":",
"group",
"=",
"'include'",
"element",
"=",
"'ClInclude'",
"elif",
"ext",
"==",
"'.rc'",
":",
"group",
"=",
"'resource'",
"element",
"=",
"'ResourceCompile'",
"elif",
"ext",
"==",
"'.asm'",
":",
"group",
"=",
"'masm'",
"element",
"=",
"'MASM'",
"elif",
"ext",
"==",
"'.idl'",
":",
"group",
"=",
"'midl'",
"element",
"=",
"'Midl'",
"elif",
"source",
"in",
"rule_dependencies",
":",
"group",
"=",
"'rule_dependency'",
"element",
"=",
"'CustomBuild'",
"else",
":",
"group",
"=",
"'none'",
"element",
"=",
"'None'",
"return",
"(",
"group",
",",
"element",
")"
] | [
2105,
0
] | [
2141,
25
] | python | en | ['en', 'en', 'en'] | True |
_GenerateMSBuildRulePropsFile | (props_path, msbuild_rules) | Generate the .props file. | Generate the .props file. | def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True) | [
"def",
"_GenerateMSBuildRulePropsFile",
"(",
"props_path",
",",
"msbuild_rules",
")",
":",
"content",
"=",
"[",
"'Project'",
",",
"{",
"'xmlns'",
":",
"'http://schemas.microsoft.com/developer/msbuild/2003'",
"}",
"]",
"for",
"rule",
"in",
"msbuild_rules",
":",
"content",
".",
"extend",
"(",
"[",
"[",
"'PropertyGroup'",
",",
"{",
"'Condition'",
":",
"\"'$(%s)' == '' and '$(%s)' == '' and \"",
"\"'$(ConfigurationType)' != 'Makefile'\"",
"%",
"(",
"rule",
".",
"before_targets",
",",
"rule",
".",
"after_targets",
")",
"}",
",",
"[",
"rule",
".",
"before_targets",
",",
"'Midl'",
"]",
",",
"[",
"rule",
".",
"after_targets",
",",
"'CustomBuild'",
"]",
",",
"]",
",",
"[",
"'PropertyGroup'",
",",
"[",
"rule",
".",
"depends_on",
",",
"{",
"'Condition'",
":",
"\"'$(ConfigurationType)' != 'Makefile'\"",
"}",
",",
"'_SelectedFiles;$(%s)'",
"%",
"rule",
".",
"depends_on",
"]",
",",
"]",
",",
"[",
"'ItemDefinitionGroup'",
",",
"[",
"rule",
".",
"rule_name",
",",
"[",
"'CommandLineTemplate'",
",",
"rule",
".",
"command",
"]",
",",
"[",
"'Outputs'",
",",
"rule",
".",
"outputs",
"]",
",",
"[",
"'ExecutionDescription'",
",",
"rule",
".",
"description",
"]",
",",
"[",
"'AdditionalDependencies'",
",",
"rule",
".",
"additional_dependencies",
"]",
",",
"]",
",",
"]",
"]",
")",
"easy_xml",
".",
"WriteXmlIfChanged",
"(",
"content",
",",
"props_path",
",",
"pretty",
"=",
"True",
",",
"win32",
"=",
"True",
")"
] | [
2242,
0
] | [
2271,
74
] | python | en | ['en', 'en', 'en'] | True |
_GenerateMSBuildRuleTargetsFile | (targets_path, msbuild_rules) | Generate the .targets file. | Generate the .targets file. | def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'EchoOff': 'true',
'StandardOutputImportance': 'High',
'StandardErrorImportance': 'High',
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True) | [
"def",
"_GenerateMSBuildRuleTargetsFile",
"(",
"targets_path",
",",
"msbuild_rules",
")",
":",
"content",
"=",
"[",
"'Project'",
",",
"{",
"'xmlns'",
":",
"'http://schemas.microsoft.com/developer/msbuild/2003'",
"}",
"]",
"item_group",
"=",
"[",
"'ItemGroup'",
",",
"[",
"'PropertyPageSchema'",
",",
"{",
"'Include'",
":",
"'$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'",
"}",
"]",
"]",
"for",
"rule",
"in",
"msbuild_rules",
":",
"item_group",
".",
"append",
"(",
"[",
"'AvailableItemName'",
",",
"{",
"'Include'",
":",
"rule",
".",
"rule_name",
"}",
",",
"[",
"'Targets'",
",",
"rule",
".",
"target_name",
"]",
",",
"]",
")",
"content",
".",
"append",
"(",
"item_group",
")",
"for",
"rule",
"in",
"msbuild_rules",
":",
"content",
".",
"append",
"(",
"[",
"'UsingTask'",
",",
"{",
"'TaskName'",
":",
"rule",
".",
"rule_name",
",",
"'TaskFactory'",
":",
"'XamlTaskFactory'",
",",
"'AssemblyName'",
":",
"'Microsoft.Build.Tasks.v4.0'",
"}",
",",
"[",
"'Task'",
",",
"'$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'",
"]",
",",
"]",
")",
"for",
"rule",
"in",
"msbuild_rules",
":",
"rule_name",
"=",
"rule",
".",
"rule_name",
"target_outputs",
"=",
"'%%(%s.Outputs)'",
"%",
"rule_name",
"target_inputs",
"=",
"(",
"'%%(%s.Identity);%%(%s.AdditionalDependencies);'",
"'$(MSBuildProjectFile)'",
")",
"%",
"(",
"rule_name",
",",
"rule_name",
")",
"rule_inputs",
"=",
"'%%(%s.Identity)'",
"%",
"rule_name",
"extension_condition",
"=",
"(",
"\"'%(Extension)'=='.obj' or \"",
"\"'%(Extension)'=='.res' or \"",
"\"'%(Extension)'=='.rsc' or \"",
"\"'%(Extension)'=='.lib'\"",
")",
"remove_section",
"=",
"[",
"'ItemGroup'",
",",
"{",
"'Condition'",
":",
"\"'@(SelectedFiles)' != ''\"",
"}",
",",
"[",
"rule_name",
",",
"{",
"'Remove'",
":",
"'@(%s)'",
"%",
"rule_name",
",",
"'Condition'",
":",
"\"'%(Identity)' != '@(SelectedFiles)'\"",
"}",
"]",
"]",
"inputs_section",
"=",
"[",
"'ItemGroup'",
",",
"[",
"rule",
".",
"inputs",
",",
"{",
"'Include'",
":",
"'%%(%s.AdditionalDependencies)'",
"%",
"rule_name",
"}",
"]",
"]",
"logging_section",
"=",
"[",
"'ItemGroup'",
",",
"[",
"rule",
".",
"tlog",
",",
"{",
"'Include'",
":",
"'%%(%s.Outputs)'",
"%",
"rule_name",
",",
"'Condition'",
":",
"(",
"\"'%%(%s.Outputs)' != '' and \"",
"\"'%%(%s.ExcludedFromBuild)' != 'true'\"",
"%",
"(",
"rule_name",
",",
"rule_name",
")",
")",
"}",
",",
"[",
"'Source'",
",",
"\"@(%s, '|')\"",
"%",
"rule_name",
"]",
",",
"[",
"'Inputs'",
",",
"\"@(%s -> '%%(Fullpath)', ';')\"",
"%",
"rule",
".",
"inputs",
"]",
",",
"]",
",",
"]",
"message_section",
"=",
"[",
"'Message'",
",",
"{",
"'Importance'",
":",
"'High'",
",",
"'Text'",
":",
"'%%(%s.ExecutionDescription)'",
"%",
"rule_name",
"}",
"]",
"write_tlog_section",
"=",
"[",
"'WriteLinesToFile'",
",",
"{",
"'Condition'",
":",
"\"'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != \"",
"\"'true'\"",
"%",
"(",
"rule",
".",
"tlog",
",",
"rule",
".",
"tlog",
")",
",",
"'File'",
":",
"'$(IntDir)$(ProjectName).write.1.tlog'",
",",
"'Lines'",
":",
"\"^%%(%s.Source);@(%s->'%%(Fullpath)')\"",
"%",
"(",
"rule",
".",
"tlog",
",",
"rule",
".",
"tlog",
")",
"}",
"]",
"read_tlog_section",
"=",
"[",
"'WriteLinesToFile'",
",",
"{",
"'Condition'",
":",
"\"'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != \"",
"\"'true'\"",
"%",
"(",
"rule",
".",
"tlog",
",",
"rule",
".",
"tlog",
")",
",",
"'File'",
":",
"'$(IntDir)$(ProjectName).read.1.tlog'",
",",
"'Lines'",
":",
"\"^%%(%s.Source);%%(%s.Inputs)\"",
"%",
"(",
"rule",
".",
"tlog",
",",
"rule",
".",
"tlog",
")",
"}",
"]",
"command_and_input_section",
"=",
"[",
"rule_name",
",",
"{",
"'Condition'",
":",
"\"'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != \"",
"\"'true'\"",
"%",
"(",
"rule_name",
",",
"rule_name",
")",
",",
"'EchoOff'",
":",
"'true'",
",",
"'StandardOutputImportance'",
":",
"'High'",
",",
"'StandardErrorImportance'",
":",
"'High'",
",",
"'CommandLineTemplate'",
":",
"'%%(%s.CommandLineTemplate)'",
"%",
"rule_name",
",",
"'AdditionalOptions'",
":",
"'%%(%s.AdditionalOptions)'",
"%",
"rule_name",
",",
"'Inputs'",
":",
"rule_inputs",
"}",
"]",
"content",
".",
"extend",
"(",
"[",
"[",
"'Target'",
",",
"{",
"'Name'",
":",
"rule",
".",
"target_name",
",",
"'BeforeTargets'",
":",
"'$(%s)'",
"%",
"rule",
".",
"before_targets",
",",
"'AfterTargets'",
":",
"'$(%s)'",
"%",
"rule",
".",
"after_targets",
",",
"'Condition'",
":",
"\"'@(%s)' != ''\"",
"%",
"rule_name",
",",
"'DependsOnTargets'",
":",
"'$(%s);%s'",
"%",
"(",
"rule",
".",
"depends_on",
",",
"rule",
".",
"compute_output",
")",
",",
"'Outputs'",
":",
"target_outputs",
",",
"'Inputs'",
":",
"target_inputs",
"}",
",",
"remove_section",
",",
"inputs_section",
",",
"logging_section",
",",
"message_section",
",",
"write_tlog_section",
",",
"read_tlog_section",
",",
"command_and_input_section",
",",
"]",
",",
"[",
"'PropertyGroup'",
",",
"[",
"'ComputeLinkInputsTargets'",
",",
"'$(ComputeLinkInputsTargets);'",
",",
"'%s;'",
"%",
"rule",
".",
"compute_output",
"]",
",",
"[",
"'ComputeLibInputsTargets'",
",",
"'$(ComputeLibInputsTargets);'",
",",
"'%s;'",
"%",
"rule",
".",
"compute_output",
"]",
",",
"]",
",",
"[",
"'Target'",
",",
"{",
"'Name'",
":",
"rule",
".",
"compute_output",
",",
"'Condition'",
":",
"\"'@(%s)' != ''\"",
"%",
"rule_name",
"}",
",",
"[",
"'ItemGroup'",
",",
"[",
"rule",
".",
"dirs_to_make",
",",
"{",
"'Condition'",
":",
"\"'@(%s)' != '' and \"",
"\"'%%(%s.ExcludedFromBuild)' != 'true'\"",
"%",
"(",
"rule_name",
",",
"rule_name",
")",
",",
"'Include'",
":",
"'%%(%s.Outputs)'",
"%",
"rule_name",
"}",
"]",
",",
"[",
"'Link'",
",",
"{",
"'Include'",
":",
"'%%(%s.Identity)'",
"%",
"rule",
".",
"dirs_to_make",
",",
"'Condition'",
":",
"extension_condition",
"}",
"]",
",",
"[",
"'Lib'",
",",
"{",
"'Include'",
":",
"'%%(%s.Identity)'",
"%",
"rule",
".",
"dirs_to_make",
",",
"'Condition'",
":",
"extension_condition",
"}",
"]",
",",
"[",
"'ImpLib'",
",",
"{",
"'Include'",
":",
"'%%(%s.Identity)'",
"%",
"rule",
".",
"dirs_to_make",
",",
"'Condition'",
":",
"extension_condition",
"}",
"]",
",",
"]",
",",
"[",
"'MakeDir'",
",",
"{",
"'Directories'",
":",
"(",
"\"@(%s->'%%(RootDir)%%(Directory)')\"",
"%",
"rule",
".",
"dirs_to_make",
")",
"}",
"]",
"]",
",",
"]",
")",
"easy_xml",
".",
"WriteXmlIfChanged",
"(",
"content",
",",
"targets_path",
",",
"pretty",
"=",
"True",
",",
"win32",
"=",
"True",
")"
] | [
2274,
0
] | [
2436,
76
] | python | en | ['en', 'en', 'en'] | True |
RuntimeDataConnector._get_data_reference_list | (
self, data_asset_name: Optional[str] = None
) |
List objects in the cache to create a list of data_references. If data_asset_name is passed in, method will
return all data_references for the named data_asset. If no data_asset_name is passed in, will return a list of
all data_references for all data_assets in the cache.
|
List objects in the cache to create a list of data_references. If data_asset_name is passed in, method will
return all data_references for the named data_asset. If no data_asset_name is passed in, will return a list of
all data_references for all data_assets in the cache.
| def _get_data_reference_list(
self, data_asset_name: Optional[str] = None
) -> List[str]:
"""
List objects in the cache to create a list of data_references. If data_asset_name is passed in, method will
return all data_references for the named data_asset. If no data_asset_name is passed in, will return a list of
all data_references for all data_assets in the cache.
"""
if data_asset_name:
return self._get_data_reference_list_from_cache_by_data_asset_name(
data_asset_name
)
else:
data_reference_list = [
self._get_data_reference_list_from_cache_by_data_asset_name(
data_asset_name
)
for data_asset_name in self.get_available_data_asset_names()
]
return data_reference_list | [
"def",
"_get_data_reference_list",
"(",
"self",
",",
"data_asset_name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"data_asset_name",
":",
"return",
"self",
".",
"_get_data_reference_list_from_cache_by_data_asset_name",
"(",
"data_asset_name",
")",
"else",
":",
"data_reference_list",
"=",
"[",
"self",
".",
"_get_data_reference_list_from_cache_by_data_asset_name",
"(",
"data_asset_name",
")",
"for",
"data_asset_name",
"in",
"self",
".",
"get_available_data_asset_names",
"(",
")",
"]",
"return",
"data_reference_list"
] | [
65,
4
] | [
84,
38
] | python | en | ['en', 'error', 'th'] | False |
RuntimeDataConnector._get_data_reference_list_from_cache_by_data_asset_name | (
self, data_asset_name: str
) | Fetch data_references corresponding to data_asset_name from the cache. | Fetch data_references corresponding to data_asset_name from the cache. | def _get_data_reference_list_from_cache_by_data_asset_name(
self, data_asset_name: str
) -> List[str]:
"""Fetch data_references corresponding to data_asset_name from the cache."""
data_references_for_data_asset_name = self._data_references_cache.get(
data_asset_name
)
if data_references_for_data_asset_name is not None:
return list(data_references_for_data_asset_name.keys())
else:
return [] | [
"def",
"_get_data_reference_list_from_cache_by_data_asset_name",
"(",
"self",
",",
"data_asset_name",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"data_references_for_data_asset_name",
"=",
"self",
".",
"_data_references_cache",
".",
"get",
"(",
"data_asset_name",
")",
"if",
"data_references_for_data_asset_name",
"is",
"not",
"None",
":",
"return",
"list",
"(",
"data_references_for_data_asset_name",
".",
"keys",
"(",
")",
")",
"else",
":",
"return",
"[",
"]"
] | [
86,
4
] | [
96,
21
] | python | en | ['en', 'en', 'en'] | True |
RuntimeDataConnector.get_data_reference_list_count | (self) |
Get number of data_references corresponding to all data_asset_names in cache. In cases where the
RuntimeDataConnector has been passed a BatchRequest with the same data_asset_name but different
batch_identifiers, it is possible to have more than one data_reference for a data_asset.
|
Get number of data_references corresponding to all data_asset_names in cache. In cases where the
RuntimeDataConnector has been passed a BatchRequest with the same data_asset_name but different
batch_identifiers, it is possible to have more than one data_reference for a data_asset.
| def get_data_reference_list_count(self) -> int:
"""
Get number of data_references corresponding to all data_asset_names in cache. In cases where the
RuntimeDataConnector has been passed a BatchRequest with the same data_asset_name but different
batch_identifiers, it is possible to have more than one data_reference for a data_asset.
"""
return sum(
len(data_reference_dict)
for key, data_reference_dict in self._data_references_cache.items()
) | [
"def",
"get_data_reference_list_count",
"(",
"self",
")",
"->",
"int",
":",
"return",
"sum",
"(",
"len",
"(",
"data_reference_dict",
")",
"for",
"key",
",",
"data_reference_dict",
"in",
"self",
".",
"_data_references_cache",
".",
"items",
"(",
")",
")"
] | [
98,
4
] | [
107,
9
] | python | en | ['en', 'error', 'th'] | False |
RuntimeDataConnector.get_available_data_asset_names | (self) | Please see note in : _get_batch_definition_list_from_batch_request() | Please see note in : _get_batch_definition_list_from_batch_request() | def get_available_data_asset_names(self) -> List[str]:
"""Please see note in : _get_batch_definition_list_from_batch_request()"""
return list(self._data_references_cache.keys()) | [
"def",
"get_available_data_asset_names",
"(",
"self",
")",
"->",
"List",
"[",
"str",
"]",
":",
"return",
"list",
"(",
"self",
".",
"_data_references_cache",
".",
"keys",
"(",
")",
")"
] | [
112,
4
] | [
114,
55
] | python | en | ['en', 'en', 'en'] | True |
RuntimeDataConnector._get_batch_definition_list_from_batch_request | (
self,
batch_request: BatchRequest,
) |
<Will> 202103. The following behavior of the _data_references_cache follows a pattern that we are using for
other data_connectors, including variations of FilePathDataConnector. When BatchRequest contains batch_data
that is passed in as a in-memory dataframe, the cache will contain the names of all data_assets
(and data_references) that have been passed into the RuntimeDataConnector in this session, even though technically
only the most recent batch_data is available. This can be misleading. However, allowing the RuntimeDataConnector
to keep a record of all data_assets (and data_references) that have been passed in will allow for the proposed
behavior of RuntimeBatchRequest which will allow for paths and queries to be passed in as part of the BatchRequest.
Therefore this behavior will be revisited when the design of RuntimeBatchRequest and related classes are complete.
|
<Will> 202103. The following behavior of the _data_references_cache follows a pattern that we are using for
other data_connectors, including variations of FilePathDataConnector. When BatchRequest contains batch_data
that is passed in as a in-memory dataframe, the cache will contain the names of all data_assets
(and data_references) that have been passed into the RuntimeDataConnector in this session, even though technically
only the most recent batch_data is available. This can be misleading. However, allowing the RuntimeDataConnector
to keep a record of all data_assets (and data_references) that have been passed in will allow for the proposed
behavior of RuntimeBatchRequest which will allow for paths and queries to be passed in as part of the BatchRequest.
Therefore this behavior will be revisited when the design of RuntimeBatchRequest and related classes are complete.
| def _get_batch_definition_list_from_batch_request(
self,
batch_request: BatchRequest,
) -> List[BatchDefinition]:
"""
<Will> 202103. The following behavior of the _data_references_cache follows a pattern that we are using for
other data_connectors, including variations of FilePathDataConnector. When BatchRequest contains batch_data
that is passed in as a in-memory dataframe, the cache will contain the names of all data_assets
(and data_references) that have been passed into the RuntimeDataConnector in this session, even though technically
only the most recent batch_data is available. This can be misleading. However, allowing the RuntimeDataConnector
to keep a record of all data_assets (and data_references) that have been passed in will allow for the proposed
behavior of RuntimeBatchRequest which will allow for paths and queries to be passed in as part of the BatchRequest.
Therefore this behavior will be revisited when the design of RuntimeBatchRequest and related classes are complete.
"""
self._validate_batch_request(batch_request=batch_request)
batch_identifiers: Optional[dict] = None
if batch_request.batch_identifiers:
self._validate_batch_identifiers(
batch_identifiers=batch_request.batch_identifiers
)
batch_identifiers = batch_request.batch_identifiers
if not batch_identifiers:
batch_identifiers = {}
batch_definition_list: List[BatchDefinition]
batch_definition: BatchDefinition = BatchDefinition(
datasource_name=self.datasource_name,
data_connector_name=self.name,
data_asset_name=batch_request.data_asset_name,
batch_identifiers=IDDict(batch_identifiers),
batch_spec_passthrough=batch_request.batch_spec_passthrough,
)
batch_definition_list = [batch_definition]
self._update_data_references_cache(
batch_request.data_asset_name, batch_definition_list, batch_identifiers
)
return batch_definition_list | [
"def",
"_get_batch_definition_list_from_batch_request",
"(",
"self",
",",
"batch_request",
":",
"BatchRequest",
",",
")",
"->",
"List",
"[",
"BatchDefinition",
"]",
":",
"self",
".",
"_validate_batch_request",
"(",
"batch_request",
"=",
"batch_request",
")",
"batch_identifiers",
":",
"Optional",
"[",
"dict",
"]",
"=",
"None",
"if",
"batch_request",
".",
"batch_identifiers",
":",
"self",
".",
"_validate_batch_identifiers",
"(",
"batch_identifiers",
"=",
"batch_request",
".",
"batch_identifiers",
")",
"batch_identifiers",
"=",
"batch_request",
".",
"batch_identifiers",
"if",
"not",
"batch_identifiers",
":",
"batch_identifiers",
"=",
"{",
"}",
"batch_definition_list",
":",
"List",
"[",
"BatchDefinition",
"]",
"batch_definition",
":",
"BatchDefinition",
"=",
"BatchDefinition",
"(",
"datasource_name",
"=",
"self",
".",
"datasource_name",
",",
"data_connector_name",
"=",
"self",
".",
"name",
",",
"data_asset_name",
"=",
"batch_request",
".",
"data_asset_name",
",",
"batch_identifiers",
"=",
"IDDict",
"(",
"batch_identifiers",
")",
",",
"batch_spec_passthrough",
"=",
"batch_request",
".",
"batch_spec_passthrough",
",",
")",
"batch_definition_list",
"=",
"[",
"batch_definition",
"]",
"self",
".",
"_update_data_references_cache",
"(",
"batch_request",
".",
"data_asset_name",
",",
"batch_definition_list",
",",
"batch_identifiers",
")",
"return",
"batch_definition_list"
] | [
143,
4
] | [
180,
36
] | python | en | ['en', 'error', 'th'] | False |
Anonymizer._is_parent_class_recognized | (
self,
classes_to_check,
object_=None,
object_class=None,
object_config=None,
) |
Check if the parent class is a subclass of any core GE class.
This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
Returns:
The name of the parent class found, or None if no parent class was found
|
Check if the parent class is a subclass of any core GE class.
This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
Returns:
The name of the parent class found, or None if no parent class was found
| def _is_parent_class_recognized(
self,
classes_to_check,
object_=None,
object_class=None,
object_config=None,
) -> Optional[str]:
"""
Check if the parent class is a subclass of any core GE class.
This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
Returns:
The name of the parent class found, or None if no parent class was found
"""
assert (
object_ or object_class or object_config
), "Must pass either object_ or object_class or object_config."
try:
if object_class is None and object_ is not None:
object_class = object_.__class__
elif object_class is None and object_config is not None:
object_class_name = object_config.get("class_name")
object_module_name = object_config.get("module_name")
object_class = load_class(object_class_name, object_module_name)
for class_to_check in classes_to_check:
if issubclass(object_class, class_to_check):
return class_to_check.__name__
return None
except AttributeError:
return None | [
"def",
"_is_parent_class_recognized",
"(",
"self",
",",
"classes_to_check",
",",
"object_",
"=",
"None",
",",
"object_class",
"=",
"None",
",",
"object_config",
"=",
"None",
",",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"assert",
"(",
"object_",
"or",
"object_class",
"or",
"object_config",
")",
",",
"\"Must pass either object_ or object_class or object_config.\"",
"try",
":",
"if",
"object_class",
"is",
"None",
"and",
"object_",
"is",
"not",
"None",
":",
"object_class",
"=",
"object_",
".",
"__class__",
"elif",
"object_class",
"is",
"None",
"and",
"object_config",
"is",
"not",
"None",
":",
"object_class_name",
"=",
"object_config",
".",
"get",
"(",
"\"class_name\"",
")",
"object_module_name",
"=",
"object_config",
".",
"get",
"(",
"\"module_name\"",
")",
"object_class",
"=",
"load_class",
"(",
"object_class_name",
",",
"object_module_name",
")",
"for",
"class_to_check",
"in",
"classes_to_check",
":",
"if",
"issubclass",
"(",
"object_class",
",",
"class_to_check",
")",
":",
"return",
"class_to_check",
".",
"__name__",
"return",
"None",
"except",
"AttributeError",
":",
"return",
"None"
] | [
71,
4
] | [
102,
23
] | python | en | ['en', 'error', 'th'] | False |
BasePreProcessor.process | (
self,
document: dict,
clean_whitespace: Optional[bool] = True,
clean_header_footer: Optional[bool] = False,
clean_empty_lines: Optional[bool] = True,
split_by: Optional[str] = "word",
split_length: Optional[int] = 1000,
split_overlap: Optional[int] = None,
split_respect_sentence_boundary: Optional[bool] = True,
) |
Perform document cleaning and splitting. Takes a single document as input and returns a list of documents.
|
Perform document cleaning and splitting. Takes a single document as input and returns a list of documents.
| def process(
self,
document: dict,
clean_whitespace: Optional[bool] = True,
clean_header_footer: Optional[bool] = False,
clean_empty_lines: Optional[bool] = True,
split_by: Optional[str] = "word",
split_length: Optional[int] = 1000,
split_overlap: Optional[int] = None,
split_respect_sentence_boundary: Optional[bool] = True,
) -> List[dict]:
"""
Perform document cleaning and splitting. Takes a single document as input and returns a list of documents.
"""
raise NotImplementedError | [
"def",
"process",
"(",
"self",
",",
"document",
":",
"dict",
",",
"clean_whitespace",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"clean_header_footer",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"clean_empty_lines",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
"split_by",
":",
"Optional",
"[",
"str",
"]",
"=",
"\"word\"",
",",
"split_length",
":",
"Optional",
"[",
"int",
"]",
"=",
"1000",
",",
"split_overlap",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"split_respect_sentence_boundary",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
")",
"->",
"List",
"[",
"dict",
"]",
":",
"raise",
"NotImplementedError"
] | [
8,
4
] | [
22,
33
] | python | en | ['en', 'error', 'th'] | False |
start | (benchmark, benchmark_input_size, output, deployments, remove_containers, **kwargs) |
Start a given number of function instances and a storage instance.
|
Start a given number of function instances and a storage instance.
| def start(benchmark, benchmark_input_size, output, deployments, remove_containers, **kwargs):
"""
Start a given number of function instances and a storage instance.
"""
(
config,
output_dir,
logging_filename,
sebs_client,
deployment_client
) = parse_common_params(
ignore_cache = True,
update_code = False,
update_storage = False,
deployment = "local",
**kwargs
)
deployment_client = cast(sebs.local.Local, deployment_client)
deployment_client.remove_containers = remove_containers
result = sebs.local.Deployment()
experiment_config = sebs_client.get_experiment_config(config["experiments"])
benchmark_obj = sebs_client.get_benchmark(
benchmark,
deployment_client,
experiment_config,
logging_filename=logging_filename,
)
storage = deployment_client.get_storage(
replace_existing=experiment_config.update_storage
)
result.set_storage(storage)
input_config = benchmark_obj.prepare_input(
storage=storage, size=benchmark_input_size
)
result.add_input(input_config)
for i in range(deployments):
func = deployment_client.get_function(
benchmark_obj, deployment_client.default_function_name(benchmark_obj)
)
result.add_function(func)
# Disable shutdown of storage only after we succed
# Otherwise we want to clean up as much as possible
deployment_client.shutdown_storage = False
result.serialize(output)
sebs_client.logging.info(f"Save results to {os.path.abspath(output)}") | [
"def",
"start",
"(",
"benchmark",
",",
"benchmark_input_size",
",",
"output",
",",
"deployments",
",",
"remove_containers",
",",
"*",
"*",
"kwargs",
")",
":",
"(",
"config",
",",
"output_dir",
",",
"logging_filename",
",",
"sebs_client",
",",
"deployment_client",
")",
"=",
"parse_common_params",
"(",
"ignore_cache",
"=",
"True",
",",
"update_code",
"=",
"False",
",",
"update_storage",
"=",
"False",
",",
"deployment",
"=",
"\"local\"",
",",
"*",
"*",
"kwargs",
")",
"deployment_client",
"=",
"cast",
"(",
"sebs",
".",
"local",
".",
"Local",
",",
"deployment_client",
")",
"deployment_client",
".",
"remove_containers",
"=",
"remove_containers",
"result",
"=",
"sebs",
".",
"local",
".",
"Deployment",
"(",
")",
"experiment_config",
"=",
"sebs_client",
".",
"get_experiment_config",
"(",
"config",
"[",
"\"experiments\"",
"]",
")",
"benchmark_obj",
"=",
"sebs_client",
".",
"get_benchmark",
"(",
"benchmark",
",",
"deployment_client",
",",
"experiment_config",
",",
"logging_filename",
"=",
"logging_filename",
",",
")",
"storage",
"=",
"deployment_client",
".",
"get_storage",
"(",
"replace_existing",
"=",
"experiment_config",
".",
"update_storage",
")",
"result",
".",
"set_storage",
"(",
"storage",
")",
"input_config",
"=",
"benchmark_obj",
".",
"prepare_input",
"(",
"storage",
"=",
"storage",
",",
"size",
"=",
"benchmark_input_size",
")",
"result",
".",
"add_input",
"(",
"input_config",
")",
"for",
"i",
"in",
"range",
"(",
"deployments",
")",
":",
"func",
"=",
"deployment_client",
".",
"get_function",
"(",
"benchmark_obj",
",",
"deployment_client",
".",
"default_function_name",
"(",
"benchmark_obj",
")",
")",
"result",
".",
"add_function",
"(",
"func",
")",
"# Disable shutdown of storage only after we succed",
"# Otherwise we want to clean up as much as possible",
"deployment_client",
".",
"shutdown_storage",
"=",
"False",
"result",
".",
"serialize",
"(",
"output",
")",
"sebs_client",
".",
"logging",
".",
"info",
"(",
"f\"Save results to {os.path.abspath(output)}\"",
")"
] | [
298,
0
] | [
345,
74
] | python | en | ['en', 'error', 'th'] | False |
stop | (input_json, **kwargs) |
Stop function and storage containers.
|
Stop function and storage containers.
| def stop(input_json, **kwargs):
"""
Stop function and storage containers.
"""
sebs.utils.global_logging()
logging.info(f"Stopping deployment from {os.path.abspath(input_json)}")
deployment = sebs.local.Deployment.deserialize(input_json, None)
deployment.shutdown()
logging.info(f"Stopped deployment from {os.path.abspath(input_json)}") | [
"def",
"stop",
"(",
"input_json",
",",
"*",
"*",
"kwargs",
")",
":",
"sebs",
".",
"utils",
".",
"global_logging",
"(",
")",
"logging",
".",
"info",
"(",
"f\"Stopping deployment from {os.path.abspath(input_json)}\"",
")",
"deployment",
"=",
"sebs",
".",
"local",
".",
"Deployment",
".",
"deserialize",
"(",
"input_json",
",",
"None",
")",
"deployment",
".",
"shutdown",
"(",
")",
"logging",
".",
"info",
"(",
"f\"Stopped deployment from {os.path.abspath(input_json)}\"",
")"
] | [
350,
0
] | [
360,
74
] | python | en | ['en', 'error', 'th'] | False |
base_version | (release_version: str) | Given 'X.Y.Z[-rc.N]', return 'X.Y'. | Given 'X.Y.Z[-rc.N]', return 'X.Y'. | def base_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y'."""
return build_version(release_version).rsplit(sep='.', maxsplit=1)[0] | [
"def",
"base_version",
"(",
"release_version",
":",
"str",
")",
"->",
"str",
":",
"return",
"build_version",
"(",
"release_version",
")",
".",
"rsplit",
"(",
"sep",
"=",
"'.'",
",",
"maxsplit",
"=",
"1",
")",
"[",
"0",
"]"
] | [
21,
0
] | [
23,
72
] | python | en | ['en', 'cy', 'en'] | True |
build_version | (release_version: str) | Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'. | Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'. | def build_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'."""
return release_version.split('-')[0] | [
"def",
"build_version",
"(",
"release_version",
":",
"str",
")",
"->",
"str",
":",
"return",
"release_version",
".",
"split",
"(",
"'-'",
")",
"[",
"0",
"]"
] | [
26,
0
] | [
28,
40
] | python | en | ['en', 'cy', 'en'] | True |
assert_eq | (actual: Any, expected: Any) | `assert_eq(a, b)` is like `assert a == b`, but has a useful error
message when they're not equal.
| `assert_eq(a, b)` is like `assert a == b`, but has a useful error
message when they're not equal.
| def assert_eq(actual: Any, expected: Any) -> None:
"""`assert_eq(a, b)` is like `assert a == b`, but has a useful error
message when they're not equal.
"""
if actual != expected:
raise AssertionError(f"wanted '{expected}', got '{actual}'") | [
"def",
"assert_eq",
"(",
"actual",
":",
"Any",
",",
"expected",
":",
"Any",
")",
"->",
"None",
":",
"if",
"actual",
"!=",
"expected",
":",
"raise",
"AssertionError",
"(",
"f\"wanted '{expected}', got '{actual}'\"",
")"
] | [
31,
0
] | [
36,
68
] | python | en | ['en', 'lb', 'en'] | True |
get_is_private | () | Return whether we're in a "private" Git checkout, for doing
embargoed work.
| Return whether we're in a "private" Git checkout, for doing
embargoed work.
| def get_is_private() -> bool:
"""Return whether we're in a "private" Git checkout, for doing
embargoed work.
"""
remote_names = run_txtcapture(['git', 'remote']).split()
remote_urls: List[str] = []
for remote_name in remote_names:
remote_urls += run_txtcapture(['git', 'remote', 'get-url', '--all', remote_name]).split()
return 'private' in "\n".join(remote_urls) | [
"def",
"get_is_private",
"(",
")",
"->",
"bool",
":",
"remote_names",
"=",
"run_txtcapture",
"(",
"[",
"'git'",
",",
"'remote'",
"]",
")",
".",
"split",
"(",
")",
"remote_urls",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"for",
"remote_name",
"in",
"remote_names",
":",
"remote_urls",
"+=",
"run_txtcapture",
"(",
"[",
"'git'",
",",
"'remote'",
",",
"'get-url'",
",",
"'--all'",
",",
"remote_name",
"]",
")",
".",
"split",
"(",
")",
"return",
"'private'",
"in",
"\"\\n\"",
".",
"join",
"(",
"remote_urls",
")"
] | [
39,
0
] | [
47,
46
] | python | en | ['en', 'en', 'en'] | True |
match_prods_res | (dc, products, method='min') |
Determines a resolution that matches a set of Data Cube products -
either the minimum or maximum resolution along the x and y dimensions.
Product resolutions are derived from Data Cube metadata for those products.
Parameters
----------
dc: datacube.Datacube
A connection to the Data Cube to determine the resolution of
individual products from.
products: list of str
The names of the products to find a matching resolution for.
method: str
The method of finding a matching resolution. The options are
['min', 'max'], which separately determine the y and x resolutions
as the minimum or maximum among all selected products.
Returns
-------
res: list
A list of the y and x resolutions, in that order.
|
Determines a resolution that matches a set of Data Cube products -
either the minimum or maximum resolution along the x and y dimensions.
Product resolutions are derived from Data Cube metadata for those products. | def match_prods_res(dc, products, method='min'):
"""
Determines a resolution that matches a set of Data Cube products -
either the minimum or maximum resolution along the x and y dimensions.
Product resolutions are derived from Data Cube metadata for those products.
Parameters
----------
dc: datacube.Datacube
A connection to the Data Cube to determine the resolution of
individual products from.
products: list of str
The names of the products to find a matching resolution for.
method: str
The method of finding a matching resolution. The options are
['min', 'max'], which separately determine the y and x resolutions
as the minimum or maximum among all selected products.
Returns
-------
res: list
A list of the y and x resolutions, in that order.
"""
if method not in ['min', 'max']:
raise ValueError("The method \"{}\" is not supported. "
"Please choose one of ['min', 'max'].".format(method))
prod_info = dc.list_products()
resolutions = prod_info[prod_info['name'].isin(products)] \
['resolution'].values
# The first resolution is for y and is negative.
# The second resolution is for x and is positive.
if method == 'min':
# Determine the minimum resolution, which is actually the maximum
# value resolution, since resolution is measured in degrees per pixel.
matching_res = [0] * 2
for res in resolutions:
matching_res[0] = res[0] if res[0] < matching_res[0] else matching_res[0]
matching_res[1] = res[1] if matching_res[1] < res[1] else matching_res[1]
else:
matching_res = [-np.inf, np.inf]
for res in resolutions:
matching_res[0] = res[0] if matching_res[0] < res[0] else matching_res[0]
matching_res[1] = res[1] if res[1] < matching_res[1] else matching_res[1]
return matching_res | [
"def",
"match_prods_res",
"(",
"dc",
",",
"products",
",",
"method",
"=",
"'min'",
")",
":",
"if",
"method",
"not",
"in",
"[",
"'min'",
",",
"'max'",
"]",
":",
"raise",
"ValueError",
"(",
"\"The method \\\"{}\\\" is not supported. \"",
"\"Please choose one of ['min', 'max'].\"",
".",
"format",
"(",
"method",
")",
")",
"prod_info",
"=",
"dc",
".",
"list_products",
"(",
")",
"resolutions",
"=",
"prod_info",
"[",
"prod_info",
"[",
"'name'",
"]",
".",
"isin",
"(",
"products",
")",
"]",
"[",
"'resolution'",
"]",
".",
"values",
"# The first resolution is for y and is negative.",
"# The second resolution is for x and is positive.",
"if",
"method",
"==",
"'min'",
":",
"# Determine the minimum resolution, which is actually the maximum",
"# value resolution, since resolution is measured in degrees per pixel.",
"matching_res",
"=",
"[",
"0",
"]",
"*",
"2",
"for",
"res",
"in",
"resolutions",
":",
"matching_res",
"[",
"0",
"]",
"=",
"res",
"[",
"0",
"]",
"if",
"res",
"[",
"0",
"]",
"<",
"matching_res",
"[",
"0",
"]",
"else",
"matching_res",
"[",
"0",
"]",
"matching_res",
"[",
"1",
"]",
"=",
"res",
"[",
"1",
"]",
"if",
"matching_res",
"[",
"1",
"]",
"<",
"res",
"[",
"1",
"]",
"else",
"matching_res",
"[",
"1",
"]",
"else",
":",
"matching_res",
"=",
"[",
"-",
"np",
".",
"inf",
",",
"np",
".",
"inf",
"]",
"for",
"res",
"in",
"resolutions",
":",
"matching_res",
"[",
"0",
"]",
"=",
"res",
"[",
"0",
"]",
"if",
"matching_res",
"[",
"0",
"]",
"<",
"res",
"[",
"0",
"]",
"else",
"matching_res",
"[",
"0",
"]",
"matching_res",
"[",
"1",
"]",
"=",
"res",
"[",
"1",
"]",
"if",
"res",
"[",
"1",
"]",
"<",
"matching_res",
"[",
"1",
"]",
"else",
"matching_res",
"[",
"1",
"]",
"return",
"matching_res"
] | [
26,
0
] | [
70,
23
] | python | en | ['en', 'error', 'th'] | False |
match_dim_sizes | (dc, products, x, y, x_y_coords=['longitude', 'latitude'], method='min') |
Returns the x and y dimension sizes that match some x and y extents for some products.
This is useful when determining an absolute resolution to scale products to with
`xr_scale_res()` in the `aggregate.py` utility file.
Parameters
----------
dc: datacube.Datacube
A connection to the Data Cube to determine the resolution of
individual products from.
products: list of str
The names of the products to find a matching resolution for.
x: list-like
A list-like of the minimum and maximum x-axis (e.g. longitude) extents for the products.
y: list-like
A list-like of the minimum and maximum y-axis (e.g. latitude) extents for the products.
x_y_coords: list-like or dict
Either a list-like of the x and y coordinate names or a dictionary mapping product names
to such list-likes.
method: str
The method of finding a matching resolution. The options are
['min', 'max'], which separately determine the y and x resolutions
as the minimum or maximum among all selected products.
Returns
-------
abs_res: list
A list of desired y and x dimension sizes, in that order.
same_dim_sizes: bool
Whether all of the dimension sizes were the same.
|
Returns the x and y dimension sizes that match some x and y extents for some products.
This is useful when determining an absolute resolution to scale products to with
`xr_scale_res()` in the `aggregate.py` utility file. | def match_dim_sizes(dc, products, x, y, x_y_coords=['longitude', 'latitude'], method='min'):
"""
Returns the x and y dimension sizes that match some x and y extents for some products.
This is useful when determining an absolute resolution to scale products to with
`xr_scale_res()` in the `aggregate.py` utility file.
Parameters
----------
dc: datacube.Datacube
A connection to the Data Cube to determine the resolution of
individual products from.
products: list of str
The names of the products to find a matching resolution for.
x: list-like
A list-like of the minimum and maximum x-axis (e.g. longitude) extents for the products.
y: list-like
A list-like of the minimum and maximum y-axis (e.g. latitude) extents for the products.
x_y_coords: list-like or dict
Either a list-like of the x and y coordinate names or a dictionary mapping product names
to such list-likes.
method: str
The method of finding a matching resolution. The options are
['min', 'max'], which separately determine the y and x resolutions
as the minimum or maximum among all selected products.
Returns
-------
abs_res: list
A list of desired y and x dimension sizes, in that order.
same_dim_sizes: bool
Whether all of the dimension sizes were the same.
"""
coords = []
if isinstance(x_y_coords, dict):
for product in products:
coords.append(x_y_coords[product])
else:
coords = [x_y_coords] * len(products)
datasets_empty = [dc.load(product=product, lon=x, lat=y, measurements=[]) for product in products]
# First check if all datasets will load with the same x and y dimension sizes.
same_dim_sizes = True
first_dataset_dim_size = [datasets_empty[0][coords[0][0]].size, datasets_empty[0][coords[0][1]].size]
for i in range(1, len(datasets_empty)):
if first_dataset_dim_size != [datasets_empty[i][coords[i][0]].size, datasets_empty[i][coords[i][1]].size]:
same_dim_sizes = False
break
if method == 'min':
abs_res = [np.inf, np.inf]
for i in range(len(datasets_empty)):
res = [datasets_empty[i][coords[i][0]].size, datasets_empty[i][coords[i][1]].size]
abs_res[0] = res[0] if res[0] < abs_res[0] else abs_res[0]
abs_res[1] = res[1] if res[1] < abs_res[1] else abs_res[1]
else:
abs_res = [0] * 2
for i in range(len(datasets_empty)):
res = [datasets_empty[i][coords[i][0]].size, datasets_empty[i][coords[i][1]].size]
abs_res[0] = res[0] if abs_res[0] < res[0] else abs_res[0]
abs_res[1] = res[1] if abs_res[1] < res[1] else abs_res[1]
return abs_res, same_dim_sizes | [
"def",
"match_dim_sizes",
"(",
"dc",
",",
"products",
",",
"x",
",",
"y",
",",
"x_y_coords",
"=",
"[",
"'longitude'",
",",
"'latitude'",
"]",
",",
"method",
"=",
"'min'",
")",
":",
"coords",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"x_y_coords",
",",
"dict",
")",
":",
"for",
"product",
"in",
"products",
":",
"coords",
".",
"append",
"(",
"x_y_coords",
"[",
"product",
"]",
")",
"else",
":",
"coords",
"=",
"[",
"x_y_coords",
"]",
"*",
"len",
"(",
"products",
")",
"datasets_empty",
"=",
"[",
"dc",
".",
"load",
"(",
"product",
"=",
"product",
",",
"lon",
"=",
"x",
",",
"lat",
"=",
"y",
",",
"measurements",
"=",
"[",
"]",
")",
"for",
"product",
"in",
"products",
"]",
"# First check if all datasets will load with the same x and y dimension sizes.",
"same_dim_sizes",
"=",
"True",
"first_dataset_dim_size",
"=",
"[",
"datasets_empty",
"[",
"0",
"]",
"[",
"coords",
"[",
"0",
"]",
"[",
"0",
"]",
"]",
".",
"size",
",",
"datasets_empty",
"[",
"0",
"]",
"[",
"coords",
"[",
"0",
"]",
"[",
"1",
"]",
"]",
".",
"size",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"datasets_empty",
")",
")",
":",
"if",
"first_dataset_dim_size",
"!=",
"[",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
".",
"size",
",",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"1",
"]",
"]",
".",
"size",
"]",
":",
"same_dim_sizes",
"=",
"False",
"break",
"if",
"method",
"==",
"'min'",
":",
"abs_res",
"=",
"[",
"np",
".",
"inf",
",",
"np",
".",
"inf",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"datasets_empty",
")",
")",
":",
"res",
"=",
"[",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
".",
"size",
",",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"1",
"]",
"]",
".",
"size",
"]",
"abs_res",
"[",
"0",
"]",
"=",
"res",
"[",
"0",
"]",
"if",
"res",
"[",
"0",
"]",
"<",
"abs_res",
"[",
"0",
"]",
"else",
"abs_res",
"[",
"0",
"]",
"abs_res",
"[",
"1",
"]",
"=",
"res",
"[",
"1",
"]",
"if",
"res",
"[",
"1",
"]",
"<",
"abs_res",
"[",
"1",
"]",
"else",
"abs_res",
"[",
"1",
"]",
"else",
":",
"abs_res",
"=",
"[",
"0",
"]",
"*",
"2",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"datasets_empty",
")",
")",
":",
"res",
"=",
"[",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"0",
"]",
"]",
".",
"size",
",",
"datasets_empty",
"[",
"i",
"]",
"[",
"coords",
"[",
"i",
"]",
"[",
"1",
"]",
"]",
".",
"size",
"]",
"abs_res",
"[",
"0",
"]",
"=",
"res",
"[",
"0",
"]",
"if",
"abs_res",
"[",
"0",
"]",
"<",
"res",
"[",
"0",
"]",
"else",
"abs_res",
"[",
"0",
"]",
"abs_res",
"[",
"1",
"]",
"=",
"res",
"[",
"1",
"]",
"if",
"abs_res",
"[",
"1",
"]",
"<",
"res",
"[",
"1",
"]",
"else",
"abs_res",
"[",
"1",
"]",
"return",
"abs_res",
",",
"same_dim_sizes"
] | [
73,
0
] | [
135,
34
] | python | en | ['en', 'error', 'th'] | False |
xarray_concat_and_merge | (*args, concat_dim='time', sort_dim='time') |
Given parameters that are each a list of `xarray.Dataset` objects, merge each list
into an `xarray.Dataset` object and return all such objects in the same order.
Parameters
----------
*args: list of lists of `xarray.Dataset`.
A list of lists of `xarray.Dataset` objects to merge.
concat_dim, sort_dim: str
The string name of the dimension to concatenate or sort by the data.
Returns
-------
merged: list of `xarray.Dataset`
A tuple of the same length as `*args`, containing the merged data.
|
Given parameters that are each a list of `xarray.Dataset` objects, merge each list
into an `xarray.Dataset` object and return all such objects in the same order. | def xarray_concat_and_merge(*args, concat_dim='time', sort_dim='time'):
"""
Given parameters that are each a list of `xarray.Dataset` objects, merge each list
into an `xarray.Dataset` object and return all such objects in the same order.
Parameters
----------
*args: list of lists of `xarray.Dataset`.
A list of lists of `xarray.Dataset` objects to merge.
concat_dim, sort_dim: str
The string name of the dimension to concatenate or sort by the data.
Returns
-------
merged: list of `xarray.Dataset`
A tuple of the same length as `*args`, containing the merged data.
"""
merged = []
for i, arg in enumerate(args):
dataset_temp = xr.concat(arg, dim=concat_dim)
merged.append(xarray_sortby_coord(dataset_temp, coord=sort_dim))
return merged | [
"def",
"xarray_concat_and_merge",
"(",
"*",
"args",
",",
"concat_dim",
"=",
"'time'",
",",
"sort_dim",
"=",
"'time'",
")",
":",
"merged",
"=",
"[",
"]",
"for",
"i",
",",
"arg",
"in",
"enumerate",
"(",
"args",
")",
":",
"dataset_temp",
"=",
"xr",
".",
"concat",
"(",
"arg",
",",
"dim",
"=",
"concat_dim",
")",
"merged",
".",
"append",
"(",
"xarray_sortby_coord",
"(",
"dataset_temp",
",",
"coord",
"=",
"sort_dim",
")",
")",
"return",
"merged"
] | [
137,
0
] | [
158,
17
] | python | en | ['en', 'error', 'th'] | False |
merge_datasets | (datasets_temp, clean_masks_temp, masks_per_platform=None,
x_coord='longitude', y_coord='latitude') |
Merges dictionaries of platform names mapping to datasets, dataset clean masks,
and lists of other masks into one dataset, one dataset clean mask, and one
of each type of other mask, ordering all by time.
Parameters
----------
datasets_temp: dict
Dictionary that maps platforms to `xarray.Dataset` or `xarray.DataArray`
objects to merge to make the output `dataset`.
Must have a 'time' dimension.
clean_masks_temp: dict
Dictionary that maps platforms to `xarray.DataArray` masks to merge to make the output `clean_mask`.
Must have a 'time' dimension.
masks_per_platform: dict
Dictionary that maps platforms to `xarray.DataArray` masks to merge to make the output `masks`.
Must have a 'time' dimension.
x_coord, y_coord: str
Names of the x and y coordinates in the datasets in `datasets_temp`.
Returns
-------
dataset: xarray.Dataset or xarray.DataArray
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask.
masks: list of xarray.DataArray
A list of individual masks.
Raises
------
AssertionError: If no data was retrieved for any query
(i.e. `len(datasets_temp) == 0`).
|
Merges dictionaries of platform names mapping to datasets, dataset clean masks,
and lists of other masks into one dataset, one dataset clean mask, and one
of each type of other mask, ordering all by time. | def merge_datasets(datasets_temp, clean_masks_temp, masks_per_platform=None,
x_coord='longitude', y_coord='latitude'):
"""
Merges dictionaries of platform names mapping to datasets, dataset clean masks,
and lists of other masks into one dataset, one dataset clean mask, and one
of each type of other mask, ordering all by time.
Parameters
----------
datasets_temp: dict
Dictionary that maps platforms to `xarray.Dataset` or `xarray.DataArray`
objects to merge to make the output `dataset`.
Must have a 'time' dimension.
clean_masks_temp: dict
Dictionary that maps platforms to `xarray.DataArray` masks to merge to make the output `clean_mask`.
Must have a 'time' dimension.
masks_per_platform: dict
Dictionary that maps platforms to `xarray.DataArray` masks to merge to make the output `masks`.
Must have a 'time' dimension.
x_coord, y_coord: str
Names of the x and y coordinates in the datasets in `datasets_temp`.
Returns
-------
dataset: xarray.Dataset or xarray.DataArray
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask.
masks: list of xarray.DataArray
A list of individual masks.
Raises
------
AssertionError: If no data was retrieved for any query
(i.e. `len(datasets_temp) == 0`).
"""
def xr_set_same_coords(datasets):
first_ds = datasets[0]
for i, ds in enumerate(datasets):
datasets[i] = \
ds.assign_coords(**{x_coord: first_ds[x_coord],
y_coord: first_ds[y_coord]})
masks = None
if len(datasets_temp) == 0: # No data was retrieved.
return xr.Dataset(), xr.DataArray(np.array(None)), np.array(None) if masks_per_platform is not None else None
elif len(datasets_temp) == 1: # Select the only dataset.
dataset = datasets_temp[list(datasets_temp.keys())[0]]
clean_mask = clean_masks_temp[list(clean_masks_temp.keys())[0]]
if masks_per_platform is not None:
masks = masks_per_platform[list(masks_per_platform.keys())[0]]
else: # Merge datasets.
# Make sure all datasets have the same sizes in the x and y dimensions.
datasets_temp_list = list(datasets_temp.values())
max_num_x = max([len(dataset[x_coord]) for dataset in datasets_temp_list])
max_num_y = max([len(dataset[y_coord]) for dataset in datasets_temp_list])
datasets_temp_list = [xr_scale_res(dataset, x_coord=x_coord, y_coord=y_coord,
abs_res=(max_num_x, max_num_y))
for dataset in datasets_temp_list]
# Set same x and y coords so `xr.concat()` concatenates as intended.
xr_set_same_coords(datasets_temp_list)
dataset = xr.concat(datasets_temp_list, dim='time')
dataset = xarray_sortby_coord(dataset, 'time')
# Merge clean masks.
# Make sure all clean masks have the same sizes in the x and y dimensions.
clean_masks_temp_list = list(clean_masks_temp.values())
clean_masks_temp_list = [xr_scale_res(clean_mask.astype(np.int8), x_coord=x_coord, y_coord=y_coord,
abs_res=(max_num_x, max_num_y))
for clean_mask in clean_masks_temp_list]
# Set same x and y coords so `xr.concat()` concatenates as intended.
xr_set_same_coords(clean_masks_temp_list)
clean_mask = xr.concat(clean_masks_temp_list, dim='time')
clean_mask = xarray_sortby_coord(clean_mask, 'time').astype(np.bool)
# Merge masks.
if masks_per_platform is not None:
num_platforms = len(masks_per_platform.keys())
num_masks = len(list(masks_per_platform.values())[0])
np_platform_masks = np.empty((num_platforms, num_masks), dtype=object)
for i, mask_list in enumerate(masks_per_platform.values()):
np_platform_masks[i] = mask_list
masks = []
for j in range(num_masks):
masks.append(xr.concat(list(np_platform_masks[:, j]), dim='time'))
return dataset, clean_mask, masks | [
"def",
"merge_datasets",
"(",
"datasets_temp",
",",
"clean_masks_temp",
",",
"masks_per_platform",
"=",
"None",
",",
"x_coord",
"=",
"'longitude'",
",",
"y_coord",
"=",
"'latitude'",
")",
":",
"def",
"xr_set_same_coords",
"(",
"datasets",
")",
":",
"first_ds",
"=",
"datasets",
"[",
"0",
"]",
"for",
"i",
",",
"ds",
"in",
"enumerate",
"(",
"datasets",
")",
":",
"datasets",
"[",
"i",
"]",
"=",
"ds",
".",
"assign_coords",
"(",
"*",
"*",
"{",
"x_coord",
":",
"first_ds",
"[",
"x_coord",
"]",
",",
"y_coord",
":",
"first_ds",
"[",
"y_coord",
"]",
"}",
")",
"masks",
"=",
"None",
"if",
"len",
"(",
"datasets_temp",
")",
"==",
"0",
":",
"# No data was retrieved.",
"return",
"xr",
".",
"Dataset",
"(",
")",
",",
"xr",
".",
"DataArray",
"(",
"np",
".",
"array",
"(",
"None",
")",
")",
",",
"np",
".",
"array",
"(",
"None",
")",
"if",
"masks_per_platform",
"is",
"not",
"None",
"else",
"None",
"elif",
"len",
"(",
"datasets_temp",
")",
"==",
"1",
":",
"# Select the only dataset.",
"dataset",
"=",
"datasets_temp",
"[",
"list",
"(",
"datasets_temp",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
"clean_mask",
"=",
"clean_masks_temp",
"[",
"list",
"(",
"clean_masks_temp",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
"if",
"masks_per_platform",
"is",
"not",
"None",
":",
"masks",
"=",
"masks_per_platform",
"[",
"list",
"(",
"masks_per_platform",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
"else",
":",
"# Merge datasets.",
"# Make sure all datasets have the same sizes in the x and y dimensions.",
"datasets_temp_list",
"=",
"list",
"(",
"datasets_temp",
".",
"values",
"(",
")",
")",
"max_num_x",
"=",
"max",
"(",
"[",
"len",
"(",
"dataset",
"[",
"x_coord",
"]",
")",
"for",
"dataset",
"in",
"datasets_temp_list",
"]",
")",
"max_num_y",
"=",
"max",
"(",
"[",
"len",
"(",
"dataset",
"[",
"y_coord",
"]",
")",
"for",
"dataset",
"in",
"datasets_temp_list",
"]",
")",
"datasets_temp_list",
"=",
"[",
"xr_scale_res",
"(",
"dataset",
",",
"x_coord",
"=",
"x_coord",
",",
"y_coord",
"=",
"y_coord",
",",
"abs_res",
"=",
"(",
"max_num_x",
",",
"max_num_y",
")",
")",
"for",
"dataset",
"in",
"datasets_temp_list",
"]",
"# Set same x and y coords so `xr.concat()` concatenates as intended.",
"xr_set_same_coords",
"(",
"datasets_temp_list",
")",
"dataset",
"=",
"xr",
".",
"concat",
"(",
"datasets_temp_list",
",",
"dim",
"=",
"'time'",
")",
"dataset",
"=",
"xarray_sortby_coord",
"(",
"dataset",
",",
"'time'",
")",
"# Merge clean masks.",
"# Make sure all clean masks have the same sizes in the x and y dimensions.",
"clean_masks_temp_list",
"=",
"list",
"(",
"clean_masks_temp",
".",
"values",
"(",
")",
")",
"clean_masks_temp_list",
"=",
"[",
"xr_scale_res",
"(",
"clean_mask",
".",
"astype",
"(",
"np",
".",
"int8",
")",
",",
"x_coord",
"=",
"x_coord",
",",
"y_coord",
"=",
"y_coord",
",",
"abs_res",
"=",
"(",
"max_num_x",
",",
"max_num_y",
")",
")",
"for",
"clean_mask",
"in",
"clean_masks_temp_list",
"]",
"# Set same x and y coords so `xr.concat()` concatenates as intended.",
"xr_set_same_coords",
"(",
"clean_masks_temp_list",
")",
"clean_mask",
"=",
"xr",
".",
"concat",
"(",
"clean_masks_temp_list",
",",
"dim",
"=",
"'time'",
")",
"clean_mask",
"=",
"xarray_sortby_coord",
"(",
"clean_mask",
",",
"'time'",
")",
".",
"astype",
"(",
"np",
".",
"bool",
")",
"# Merge masks.",
"if",
"masks_per_platform",
"is",
"not",
"None",
":",
"num_platforms",
"=",
"len",
"(",
"masks_per_platform",
".",
"keys",
"(",
")",
")",
"num_masks",
"=",
"len",
"(",
"list",
"(",
"masks_per_platform",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"np_platform_masks",
"=",
"np",
".",
"empty",
"(",
"(",
"num_platforms",
",",
"num_masks",
")",
",",
"dtype",
"=",
"object",
")",
"for",
"i",
",",
"mask_list",
"in",
"enumerate",
"(",
"masks_per_platform",
".",
"values",
"(",
")",
")",
":",
"np_platform_masks",
"[",
"i",
"]",
"=",
"mask_list",
"masks",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"num_masks",
")",
":",
"masks",
".",
"append",
"(",
"xr",
".",
"concat",
"(",
"list",
"(",
"np_platform_masks",
"[",
":",
",",
"j",
"]",
")",
",",
"dim",
"=",
"'time'",
")",
")",
"return",
"dataset",
",",
"clean_mask",
",",
"masks"
] | [
160,
0
] | [
245,
37
] | python | en | ['en', 'error', 'th'] | False |
load_simple | (dc, platform, product, frac_res=None, abs_res=None,
load_params={}, masking_params={}, indiv_masks=None) |
**This function is DEPRECATED.**
Simplifies loading from the Data Cube by retrieving a dataset along
with its mask. Currently only tested on Landsat data.
Parameters
----------
dc: datacube.api.core.Datacube
The Datacube instance to load data with.
platform, product: str
Strings denoting the platform and product to retrieve data for.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
load_params: dict, optional
A dictionary of parameters for `dc.load()`.
Here are some common load parameters:
*lat, lon: list-like 2-tuples of minimum and maximum values for
latitude and longitude, respectively.*
*time: list-like A 2-tuple of the minimum and maximum times
for acquisitions.*
*measurements: list-like The list of measurements to retrieve
from the Datacube.*
masking_params: dict, optional
A dictionary of keyword arguments for corresponding masking functions.
For example: {'cover_types':['cloud']} would retain only clouds for Landsat products,
because `landsat_qa_clean_mask()` is used for the Landsat family of platforms.
indiv_masks: list
A list of masks to return (e.g. ['water']).
These do not have to be the same used to create `clean_mask`.
Returns
-------
dataset: xarray.Dataset
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask, formed as a logical AND of all masks used.
masks: list of xarray.DataArray
A list of the masks requested by `indiv_masks`,
or `None` if `indiv_masks` is not specified.
Raises
------
AssertionError: If no data is retrieved for any platform query.
|
**This function is DEPRECATED.**
Simplifies loading from the Data Cube by retrieving a dataset along
with its mask. Currently only tested on Landsat data. | def load_simple(dc, platform, product, frac_res=None, abs_res=None,
load_params={}, masking_params={}, indiv_masks=None):
"""
**This function is DEPRECATED.**
Simplifies loading from the Data Cube by retrieving a dataset along
with its mask. Currently only tested on Landsat data.
Parameters
----------
dc: datacube.api.core.Datacube
The Datacube instance to load data with.
platform, product: str
Strings denoting the platform and product to retrieve data for.
frac_res: float
The fraction of the original resolution to scale to. Must be postive.
Note that this can be greater than 1.0, in which case the resolution
is upsampled.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
Overrides `frac_res` if specified.
load_params: dict, optional
A dictionary of parameters for `dc.load()`.
Here are some common load parameters:
*lat, lon: list-like 2-tuples of minimum and maximum values for
latitude and longitude, respectively.*
*time: list-like A 2-tuple of the minimum and maximum times
for acquisitions.*
*measurements: list-like The list of measurements to retrieve
from the Datacube.*
masking_params: dict, optional
A dictionary of keyword arguments for corresponding masking functions.
For example: {'cover_types':['cloud']} would retain only clouds for Landsat products,
because `landsat_qa_clean_mask()` is used for the Landsat family of platforms.
indiv_masks: list
A list of masks to return (e.g. ['water']).
These do not have to be the same used to create `clean_mask`.
Returns
-------
dataset: xarray.Dataset
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask, formed as a logical AND of all masks used.
masks: list of xarray.DataArray
A list of the masks requested by `indiv_masks`,
or `None` if `indiv_masks` is not specified.
Raises
------
AssertionError: If no data is retrieved for any platform query.
"""
current_load_params = dict(platform=platform, product=product)
current_load_params.update(load_params)
dataset = dc.load(**current_load_params)
assert len(dataset.dims) > 0, "No data was retrieved."
# Scale resolution if specified.
if frac_res is not None or abs_res is not None:
dataset = xr_scale_res(dataset, frac_res=frac_res, abs_res=abs_res)
# Get the clean mask for the appropriate LANDSAT satellite platform.
clean_mask = landsat_qa_clean_mask(dataset, platform, **masking_params)
# Get the mask for removing data ouside the accepted range of LANDSAT 7 and 8.
clean_mask = xr_and(clean_mask, landsat_clean_mask_invalid(dataset))
# Retrieve individual masks.
if indiv_masks is None:
masks = None
else:
masks = []
for mask in indiv_masks:
masks.append(landsat_qa_clean_mask(dataset, platform, cover_types=[mask]))
return dataset, clean_mask, masks | [
"def",
"load_simple",
"(",
"dc",
",",
"platform",
",",
"product",
",",
"frac_res",
"=",
"None",
",",
"abs_res",
"=",
"None",
",",
"load_params",
"=",
"{",
"}",
",",
"masking_params",
"=",
"{",
"}",
",",
"indiv_masks",
"=",
"None",
")",
":",
"current_load_params",
"=",
"dict",
"(",
"platform",
"=",
"platform",
",",
"product",
"=",
"product",
")",
"current_load_params",
".",
"update",
"(",
"load_params",
")",
"dataset",
"=",
"dc",
".",
"load",
"(",
"*",
"*",
"current_load_params",
")",
"assert",
"len",
"(",
"dataset",
".",
"dims",
")",
">",
"0",
",",
"\"No data was retrieved.\"",
"# Scale resolution if specified.",
"if",
"frac_res",
"is",
"not",
"None",
"or",
"abs_res",
"is",
"not",
"None",
":",
"dataset",
"=",
"xr_scale_res",
"(",
"dataset",
",",
"frac_res",
"=",
"frac_res",
",",
"abs_res",
"=",
"abs_res",
")",
"# Get the clean mask for the appropriate LANDSAT satellite platform.",
"clean_mask",
"=",
"landsat_qa_clean_mask",
"(",
"dataset",
",",
"platform",
",",
"*",
"*",
"masking_params",
")",
"# Get the mask for removing data ouside the accepted range of LANDSAT 7 and 8.",
"clean_mask",
"=",
"xr_and",
"(",
"clean_mask",
",",
"landsat_clean_mask_invalid",
"(",
"dataset",
")",
")",
"# Retrieve individual masks.",
"if",
"indiv_masks",
"is",
"None",
":",
"masks",
"=",
"None",
"else",
":",
"masks",
"=",
"[",
"]",
"for",
"mask",
"in",
"indiv_masks",
":",
"masks",
".",
"append",
"(",
"landsat_qa_clean_mask",
"(",
"dataset",
",",
"platform",
",",
"cover_types",
"=",
"[",
"mask",
"]",
")",
")",
"return",
"dataset",
",",
"clean_mask",
",",
"masks"
] | [
251,
0
] | [
320,
37
] | python | en | ['en', 'error', 'th'] | False |
load_multiplatform | (dc, platforms, products, frac_res=None, abs_res=None,
load_params={}, masking_params={}, indiv_masks=None) |
**This function is DEPRECATED.**
Load and merge data as well as clean masks given a list of platforms
and products. Currently only tested on Landsat data.
Parameters
----------
dc: datacube.api.core.Datacube
The Datacube instance to load data with.
platforms, products: list-like
A list-like of platforms and products. Both must have the same length.
frac_res: float
The fraction of the original resolution to scale to. Must be positive.
The x and y dimensions are scaled by the square root of this factor.
Note that this can be greater than 1.0, in which case the resolution
is upsampled. The base resolution used for all products will be the
minimum resolution for latitude and longitude (considered separately -
i.e. one resolution for each dimension) among all of them.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
That is, it is a list-like of 2 numbers. Overrides `frac_res` if specified.
load_params: dict, optional
A dictionary of parameters for `dc.load()` or a dictionary of
dictionaries of such parameters, mapping platform names to parameter
dictionaries (primarily useful for selecting different time ranges).
Here are some common load parameters:
*lat, lon: list-like 2-tuples of minimum and maximum values for
latitude and longitude, respectively.*
*time: list-like A pair of the minimum and maximum times
for acquisitions or a list of such pairs.*
*measurements: list-like The list of measurements to retrieve from
the Datacube.*
For example, to load data with different time ranges for different
platforms, we could pass the following:
`{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range),
'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`,
where `common_load_params` is a dictionary of load parameters common
to both - most notably 'lat', 'lon', and 'measurements' - and the
'date_range' variables are list-likes of start and end dates.
masking_params: dict, optional
A dictionary mapping platform names to a dictionary of keyword
arguments for corresponding masking functions.
For example: {'LANDSAT_7': {'cover_types':['cloud']},
'LANDSAT_8': {'cover_types': ['cloud']}}
would retain only clouds, because `landsat_qa_clean_mask()` is used
to create clean masks for the Landsat family of platforms.
indiv_masks: list
A list of masks to return (e.g. ['water']). These do not have to be
the same used to create the returned clean mask.
Returns
-------
dataset: xarray.Dataset
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask, formed as a logical AND of all masks used.
masks: list of xarray.DataArray
A list of the masks requested by `indiv_masks`,
or `None` if `indiv_masks` is not specified.
Raises
------
AssertionError: If no data is retrieved from any product.
|
**This function is DEPRECATED.**
Load and merge data as well as clean masks given a list of platforms
and products. Currently only tested on Landsat data.
Parameters
----------
dc: datacube.api.core.Datacube
The Datacube instance to load data with.
platforms, products: list-like
A list-like of platforms and products. Both must have the same length.
frac_res: float
The fraction of the original resolution to scale to. Must be positive.
The x and y dimensions are scaled by the square root of this factor.
Note that this can be greater than 1.0, in which case the resolution
is upsampled. The base resolution used for all products will be the
minimum resolution for latitude and longitude (considered separately -
i.e. one resolution for each dimension) among all of them.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
That is, it is a list-like of 2 numbers. Overrides `frac_res` if specified.
load_params: dict, optional
A dictionary of parameters for `dc.load()` or a dictionary of
dictionaries of such parameters, mapping platform names to parameter
dictionaries (primarily useful for selecting different time ranges).
Here are some common load parameters:
*lat, lon: list-like 2-tuples of minimum and maximum values for
latitude and longitude, respectively.*
*time: list-like A pair of the minimum and maximum times
for acquisitions or a list of such pairs.*
*measurements: list-like The list of measurements to retrieve from
the Datacube.*
For example, to load data with different time ranges for different
platforms, we could pass the following:
`{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range),
'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`,
where `common_load_params` is a dictionary of load parameters common
to both - most notably 'lat', 'lon', and 'measurements' - and the
'date_range' variables are list-likes of start and end dates.
masking_params: dict, optional
A dictionary mapping platform names to a dictionary of keyword
arguments for corresponding masking functions.
For example: {'LANDSAT_7': {'cover_types':['cloud']},
'LANDSAT_8': {'cover_types': ['cloud']}}
would retain only clouds, because `landsat_qa_clean_mask()` is used
to create clean masks for the Landsat family of platforms.
indiv_masks: list
A list of masks to return (e.g. ['water']). These do not have to be
the same used to create the returned clean mask.
Returns
-------
dataset: xarray.Dataset
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask, formed as a logical AND of all masks used.
masks: list of xarray.DataArray
A list of the masks requested by `indiv_masks`,
or `None` if `indiv_masks` is not specified. | def load_multiplatform(dc, platforms, products, frac_res=None, abs_res=None,
load_params={}, masking_params={}, indiv_masks=None):
"""
**This function is DEPRECATED.**
Load and merge data as well as clean masks given a list of platforms
and products. Currently only tested on Landsat data.
Parameters
----------
dc: datacube.api.core.Datacube
The Datacube instance to load data with.
platforms, products: list-like
A list-like of platforms and products. Both must have the same length.
frac_res: float
The fraction of the original resolution to scale to. Must be positive.
The x and y dimensions are scaled by the square root of this factor.
Note that this can be greater than 1.0, in which case the resolution
is upsampled. The base resolution used for all products will be the
minimum resolution for latitude and longitude (considered separately -
i.e. one resolution for each dimension) among all of them.
abs_res: list-like
A list-like of the number of pixels for the x and y axes, respectively.
That is, it is a list-like of 2 numbers. Overrides `frac_res` if specified.
load_params: dict, optional
A dictionary of parameters for `dc.load()` or a dictionary of
dictionaries of such parameters, mapping platform names to parameter
dictionaries (primarily useful for selecting different time ranges).
Here are some common load parameters:
*lat, lon: list-like 2-tuples of minimum and maximum values for
latitude and longitude, respectively.*
*time: list-like A pair of the minimum and maximum times
for acquisitions or a list of such pairs.*
*measurements: list-like The list of measurements to retrieve from
the Datacube.*
For example, to load data with different time ranges for different
platforms, we could pass the following:
`{'LANDSAT_7': dict(**common_load_params, time=ls7_date_range),
'LANDSAT_8': dict(**common_load_params, time=ls8_date_range)}`,
where `common_load_params` is a dictionary of load parameters common
to both - most notably 'lat', 'lon', and 'measurements' - and the
'date_range' variables are list-likes of start and end dates.
masking_params: dict, optional
A dictionary mapping platform names to a dictionary of keyword
arguments for corresponding masking functions.
For example: {'LANDSAT_7': {'cover_types':['cloud']},
'LANDSAT_8': {'cover_types': ['cloud']}}
would retain only clouds, because `landsat_qa_clean_mask()` is used
to create clean masks for the Landsat family of platforms.
indiv_masks: list
A list of masks to return (e.g. ['water']). These do not have to be
the same used to create the returned clean mask.
Returns
-------
dataset: xarray.Dataset
The raw data requested. Can be cleaned with `dataset.where(clean_mask)`.
clean_mask: xarray.DataArray
The clean mask, formed as a logical AND of all masks used.
masks: list of xarray.DataArray
A list of the masks requested by `indiv_masks`,
or `None` if `indiv_masks` is not specified.
Raises
------
AssertionError: If no data is retrieved from any product.
"""
# Determine what resolution the data will be scaled to.
if frac_res is not None and abs_res is None:
prod_info = dc.list_products()
resolutions = prod_info[prod_info['name'].isin(products)]\
['resolution'].values
# Determine the minimum resolution, which is actually the maximum
# value resolution, since resolution is measured in degrees per pixel.
# The first resolution is for latitude (y) and is negative.
# The second resolution is for longitude (x) and is positive.
min_res = [0]*2
for res in resolutions:
min_res[0] = res[0] if res[0] < min_res[0] else min_res[0]
min_res[1] = res[1] if min_res[1] < res[1] else min_res[1]
# Take reciprocal to convert degrees per pixel to pixels per degree.
# Reverse to be in order (x, y).
min_res = [abs(frac_res*(1/res)) for res in min_res][::-1]
# Calculate the absolute resolution.
x, y = load_params.get('lon', None), load_params.get('lat', None)
x, y = load_params.get('longitude', x), load_params.get('latitude', y)
x_y_rng = abs(x[1] - x[0]), abs(y[1] - y[0])
abs_res = [round(res*rng) for res, rng in zip(min_res, x_y_rng)]
datasets_temp = {} # Maps platforms to datasets to merge.
clean_masks_temp = {} # Maps platforms to clean masks to merge.
masks_per_platform = {} if indiv_masks is not None else None # Maps platforms to lists of masks.
for product,platform in zip(products, platforms):
current_load_params = dict(platform=platform, product=product)
current_masking_params = masking_params.get(platform, masking_params)
# Handle `load_params` as a dict of dicts of platforms mapping to load params.
if isinstance(list(load_params.values())[0], dict):
current_load_params.update(load_params.get(platform, {}))
else: # Handle `load_params` as a dict of load params.
current_load_params.update(load_params)
# Load each time range of data.
time = current_load_params.get('time')
if isinstance(time[0], tuple) or \
isinstance(time[0], list): # Handle `time` as a list of time ranges.
datasets_time_parts = []
clean_masks_time_parts = []
masks_time_parts = np.empty((len(time), len(indiv_masks)), dtype=object)\
if indiv_masks is not None else None
for i, time_range in enumerate(time):
time_range_load_params = current_load_params
time_range_load_params['time'] = time_range
try:
dataset_time_part, clean_mask_time_part, masks_time_part = \
load_simple(dc, platform, product, abs_res=abs_res,
load_params=time_range_load_params,
masking_params=masking_params,
indiv_masks=indiv_masks)
datasets_time_parts.append(dataset_time_part)
clean_masks_time_parts.append(clean_mask_time_part)
if indiv_masks is not None:
masks_time_parts[i] = masks_time_part
except (AssertionError):
continue
datasets_temp[platform], clean_masks_temp[platform] = \
xarray_concat_and_merge(datasets_time_parts, clean_masks_time_parts)
if indiv_masks is not None:
masks_per_platform[platform] = xarray_concat_and_merge(*masks_time_parts.T)
else: # Handle `time` as a single time range.
try:
datasets_temp[platform], clean_masks_temp[platform], masks = \
load_simple(dc, platform, product, abs_res=abs_res,
load_params=current_load_params,
masking_params=masking_params,
indiv_masks=indiv_masks)
if indiv_masks is not None:
masks_per_platform[platform] = masks
except (AssertionError):
continue
return merge_datasets(datasets_temp, clean_masks_temp, masks_per_platform) | [
"def",
"load_multiplatform",
"(",
"dc",
",",
"platforms",
",",
"products",
",",
"frac_res",
"=",
"None",
",",
"abs_res",
"=",
"None",
",",
"load_params",
"=",
"{",
"}",
",",
"masking_params",
"=",
"{",
"}",
",",
"indiv_masks",
"=",
"None",
")",
":",
"# Determine what resolution the data will be scaled to.",
"if",
"frac_res",
"is",
"not",
"None",
"and",
"abs_res",
"is",
"None",
":",
"prod_info",
"=",
"dc",
".",
"list_products",
"(",
")",
"resolutions",
"=",
"prod_info",
"[",
"prod_info",
"[",
"'name'",
"]",
".",
"isin",
"(",
"products",
")",
"]",
"[",
"'resolution'",
"]",
".",
"values",
"# Determine the minimum resolution, which is actually the maximum",
"# value resolution, since resolution is measured in degrees per pixel.",
"# The first resolution is for latitude (y) and is negative.",
"# The second resolution is for longitude (x) and is positive.",
"min_res",
"=",
"[",
"0",
"]",
"*",
"2",
"for",
"res",
"in",
"resolutions",
":",
"min_res",
"[",
"0",
"]",
"=",
"res",
"[",
"0",
"]",
"if",
"res",
"[",
"0",
"]",
"<",
"min_res",
"[",
"0",
"]",
"else",
"min_res",
"[",
"0",
"]",
"min_res",
"[",
"1",
"]",
"=",
"res",
"[",
"1",
"]",
"if",
"min_res",
"[",
"1",
"]",
"<",
"res",
"[",
"1",
"]",
"else",
"min_res",
"[",
"1",
"]",
"# Take reciprocal to convert degrees per pixel to pixels per degree.",
"# Reverse to be in order (x, y).",
"min_res",
"=",
"[",
"abs",
"(",
"frac_res",
"*",
"(",
"1",
"/",
"res",
")",
")",
"for",
"res",
"in",
"min_res",
"]",
"[",
":",
":",
"-",
"1",
"]",
"# Calculate the absolute resolution.",
"x",
",",
"y",
"=",
"load_params",
".",
"get",
"(",
"'lon'",
",",
"None",
")",
",",
"load_params",
".",
"get",
"(",
"'lat'",
",",
"None",
")",
"x",
",",
"y",
"=",
"load_params",
".",
"get",
"(",
"'longitude'",
",",
"x",
")",
",",
"load_params",
".",
"get",
"(",
"'latitude'",
",",
"y",
")",
"x_y_rng",
"=",
"abs",
"(",
"x",
"[",
"1",
"]",
"-",
"x",
"[",
"0",
"]",
")",
",",
"abs",
"(",
"y",
"[",
"1",
"]",
"-",
"y",
"[",
"0",
"]",
")",
"abs_res",
"=",
"[",
"round",
"(",
"res",
"*",
"rng",
")",
"for",
"res",
",",
"rng",
"in",
"zip",
"(",
"min_res",
",",
"x_y_rng",
")",
"]",
"datasets_temp",
"=",
"{",
"}",
"# Maps platforms to datasets to merge.",
"clean_masks_temp",
"=",
"{",
"}",
"# Maps platforms to clean masks to merge.",
"masks_per_platform",
"=",
"{",
"}",
"if",
"indiv_masks",
"is",
"not",
"None",
"else",
"None",
"# Maps platforms to lists of masks.",
"for",
"product",
",",
"platform",
"in",
"zip",
"(",
"products",
",",
"platforms",
")",
":",
"current_load_params",
"=",
"dict",
"(",
"platform",
"=",
"platform",
",",
"product",
"=",
"product",
")",
"current_masking_params",
"=",
"masking_params",
".",
"get",
"(",
"platform",
",",
"masking_params",
")",
"# Handle `load_params` as a dict of dicts of platforms mapping to load params.",
"if",
"isinstance",
"(",
"list",
"(",
"load_params",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"dict",
")",
":",
"current_load_params",
".",
"update",
"(",
"load_params",
".",
"get",
"(",
"platform",
",",
"{",
"}",
")",
")",
"else",
":",
"# Handle `load_params` as a dict of load params.",
"current_load_params",
".",
"update",
"(",
"load_params",
")",
"# Load each time range of data.",
"time",
"=",
"current_load_params",
".",
"get",
"(",
"'time'",
")",
"if",
"isinstance",
"(",
"time",
"[",
"0",
"]",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"time",
"[",
"0",
"]",
",",
"list",
")",
":",
"# Handle `time` as a list of time ranges.",
"datasets_time_parts",
"=",
"[",
"]",
"clean_masks_time_parts",
"=",
"[",
"]",
"masks_time_parts",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"time",
")",
",",
"len",
"(",
"indiv_masks",
")",
")",
",",
"dtype",
"=",
"object",
")",
"if",
"indiv_masks",
"is",
"not",
"None",
"else",
"None",
"for",
"i",
",",
"time_range",
"in",
"enumerate",
"(",
"time",
")",
":",
"time_range_load_params",
"=",
"current_load_params",
"time_range_load_params",
"[",
"'time'",
"]",
"=",
"time_range",
"try",
":",
"dataset_time_part",
",",
"clean_mask_time_part",
",",
"masks_time_part",
"=",
"load_simple",
"(",
"dc",
",",
"platform",
",",
"product",
",",
"abs_res",
"=",
"abs_res",
",",
"load_params",
"=",
"time_range_load_params",
",",
"masking_params",
"=",
"masking_params",
",",
"indiv_masks",
"=",
"indiv_masks",
")",
"datasets_time_parts",
".",
"append",
"(",
"dataset_time_part",
")",
"clean_masks_time_parts",
".",
"append",
"(",
"clean_mask_time_part",
")",
"if",
"indiv_masks",
"is",
"not",
"None",
":",
"masks_time_parts",
"[",
"i",
"]",
"=",
"masks_time_part",
"except",
"(",
"AssertionError",
")",
":",
"continue",
"datasets_temp",
"[",
"platform",
"]",
",",
"clean_masks_temp",
"[",
"platform",
"]",
"=",
"xarray_concat_and_merge",
"(",
"datasets_time_parts",
",",
"clean_masks_time_parts",
")",
"if",
"indiv_masks",
"is",
"not",
"None",
":",
"masks_per_platform",
"[",
"platform",
"]",
"=",
"xarray_concat_and_merge",
"(",
"*",
"masks_time_parts",
".",
"T",
")",
"else",
":",
"# Handle `time` as a single time range.",
"try",
":",
"datasets_temp",
"[",
"platform",
"]",
",",
"clean_masks_temp",
"[",
"platform",
"]",
",",
"masks",
"=",
"load_simple",
"(",
"dc",
",",
"platform",
",",
"product",
",",
"abs_res",
"=",
"abs_res",
",",
"load_params",
"=",
"current_load_params",
",",
"masking_params",
"=",
"masking_params",
",",
"indiv_masks",
"=",
"indiv_masks",
")",
"if",
"indiv_masks",
"is",
"not",
"None",
":",
"masks_per_platform",
"[",
"platform",
"]",
"=",
"masks",
"except",
"(",
"AssertionError",
")",
":",
"continue",
"return",
"merge_datasets",
"(",
"datasets_temp",
",",
"clean_masks_temp",
",",
"masks_per_platform",
")"
] | [
322,
0
] | [
461,
78
] | python | en | ['en', 'error', 'th'] | False |
get_product_extents | (api, platform, product, **kwargs) |
Returns the minimum and maximum latitude, longitude, and date range of a product.
Parameters
----------
api: DataAccessApi
An instance of `DataAccessApi` to get query metadata from.
platform, product: str
Names of the platform and product to query extent information for.
**kwargs: dict
Keyword arguments for `api.get_query_metadata()`.
Returns
-------
full_lat, full_lon: tuple
Two 2-tuples of the minimum and maximum latitude and longitude, respectively.
min_max_dates: tuple of datetime.datetime
A 2-tuple of the minimum and maximum time available.
|
Returns the minimum and maximum latitude, longitude, and date range of a product. | def get_product_extents(api, platform, product, **kwargs):
"""
Returns the minimum and maximum latitude, longitude, and date range of a product.
Parameters
----------
api: DataAccessApi
An instance of `DataAccessApi` to get query metadata from.
platform, product: str
Names of the platform and product to query extent information for.
**kwargs: dict
Keyword arguments for `api.get_query_metadata()`.
Returns
-------
full_lat, full_lon: tuple
Two 2-tuples of the minimum and maximum latitude and longitude, respectively.
min_max_dates: tuple of datetime.datetime
A 2-tuple of the minimum and maximum time available.
"""
# Get the extents of the cube
descriptor = api.get_query_metadata(platform=platform, product=product, **kwargs)
min_max_lat = descriptor['lat_extents']
min_max_lon = descriptor['lon_extents']
min_max_dates = descriptor['time_extents']
return min_max_lat, min_max_lon, min_max_dates | [
"def",
"get_product_extents",
"(",
"api",
",",
"platform",
",",
"product",
",",
"*",
"*",
"kwargs",
")",
":",
"# Get the extents of the cube",
"descriptor",
"=",
"api",
".",
"get_query_metadata",
"(",
"platform",
"=",
"platform",
",",
"product",
"=",
"product",
",",
"*",
"*",
"kwargs",
")",
"min_max_lat",
"=",
"descriptor",
"[",
"'lat_extents'",
"]",
"min_max_lon",
"=",
"descriptor",
"[",
"'lon_extents'",
"]",
"min_max_dates",
"=",
"descriptor",
"[",
"'time_extents'",
"]",
"return",
"min_max_lat",
",",
"min_max_lon",
",",
"min_max_dates"
] | [
467,
0
] | [
492,
50
] | python | en | ['en', 'error', 'th'] | False |
get_overlapping_area | (api, platforms, products, **product_kwargs) |
Returns the minimum and maximum latitude, longitude, and date range of the overlapping
area for a set of products.
Parameters
----------
api: DataAccessApi
An instance of `DataAccessApi` to get query metadata from.
platforms, products: list-like of str
A list-like of names of platforms and products to query extent information for.
These lists must have the same length.
**product_kwargs: dict
A dictionary mapping product names to keyword arguments for
`get_product_extents()`
Returns
-------
full_lat, full_lon: tuple
Two 2-tuples of the minimum and maximum latitude and longitude, respectively.
min_max_dates: numpy.ndarray of datetime.datetime
A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum
and maximum time available for corresponding products.
|
Returns the minimum and maximum latitude, longitude, and date range of the overlapping
area for a set of products.
Parameters
----------
api: DataAccessApi
An instance of `DataAccessApi` to get query metadata from.
platforms, products: list-like of str
A list-like of names of platforms and products to query extent information for.
These lists must have the same length.
**product_kwargs: dict
A dictionary mapping product names to keyword arguments for
`get_product_extents()`
Returns
-------
full_lat, full_lon: tuple
Two 2-tuples of the minimum and maximum latitude and longitude, respectively.
min_max_dates: numpy.ndarray of datetime.datetime
A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum
and maximum time available for corresponding products.
| def get_overlapping_area(api, platforms, products, **product_kwargs):
"""
Returns the minimum and maximum latitude, longitude, and date range of the overlapping
area for a set of products.
Parameters
----------
api: DataAccessApi
An instance of `DataAccessApi` to get query metadata from.
platforms, products: list-like of str
A list-like of names of platforms and products to query extent information for.
These lists must have the same length.
**product_kwargs: dict
A dictionary mapping product names to keyword arguments for
`get_product_extents()`
Returns
-------
full_lat, full_lon: tuple
Two 2-tuples of the minimum and maximum latitude and longitude, respectively.
min_max_dates: numpy.ndarray of datetime.datetime
A 2D NumPy array with shape (len(products), 2), in which rows contain the minimum
and maximum time available for corresponding products.
"""
min_max_dates = np.empty((len(platforms), 2), dtype=object)
min_max_lat = np.empty((len(platforms), 2))
min_max_lon = np.empty((len(platforms), 2))
for i, (platform, product) in enumerate(zip(platforms, products)):
min_max_lat[i], min_max_lon[i], min_max_dates[i] = \
get_product_extents(api, platform, product,
**product_kwargs.get(product, dict()))
# Determine minimum and maximum lat and lon extents that bound a common area among the
# products, which are the greatest minimums and smallest maximums.
min_lon, max_lon = np.max(min_max_lon[:,0]), np.min(min_max_lon[:,1])
min_lat, max_lat = np.max(min_max_lat[:,0]), np.min(min_max_lat[:,1])
full_lon = (min_lon, max_lon)
full_lat = (min_lat, max_lat)
return full_lat, full_lon, min_max_dates | [
"def",
"get_overlapping_area",
"(",
"api",
",",
"platforms",
",",
"products",
",",
"*",
"*",
"product_kwargs",
")",
":",
"min_max_dates",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"platforms",
")",
",",
"2",
")",
",",
"dtype",
"=",
"object",
")",
"min_max_lat",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"platforms",
")",
",",
"2",
")",
")",
"min_max_lon",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"platforms",
")",
",",
"2",
")",
")",
"for",
"i",
",",
"(",
"platform",
",",
"product",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"platforms",
",",
"products",
")",
")",
":",
"min_max_lat",
"[",
"i",
"]",
",",
"min_max_lon",
"[",
"i",
"]",
",",
"min_max_dates",
"[",
"i",
"]",
"=",
"get_product_extents",
"(",
"api",
",",
"platform",
",",
"product",
",",
"*",
"*",
"product_kwargs",
".",
"get",
"(",
"product",
",",
"dict",
"(",
")",
")",
")",
"# Determine minimum and maximum lat and lon extents that bound a common area among the",
"# products, which are the greatest minimums and smallest maximums.",
"min_lon",
",",
"max_lon",
"=",
"np",
".",
"max",
"(",
"min_max_lon",
"[",
":",
",",
"0",
"]",
")",
",",
"np",
".",
"min",
"(",
"min_max_lon",
"[",
":",
",",
"1",
"]",
")",
"min_lat",
",",
"max_lat",
"=",
"np",
".",
"max",
"(",
"min_max_lat",
"[",
":",
",",
"0",
"]",
")",
",",
"np",
".",
"min",
"(",
"min_max_lat",
"[",
":",
",",
"1",
"]",
")",
"full_lon",
"=",
"(",
"min_lon",
",",
"max_lon",
")",
"full_lat",
"=",
"(",
"min_lat",
",",
"max_lat",
")",
"return",
"full_lat",
",",
"full_lon",
",",
"min_max_dates"
] | [
494,
0
] | [
531,
44
] | python | en | ['en', 'error', 'th'] | False |
find_desired_acq_inds | (dataset=None, clean_mask=None, time_dim='time', pct_clean=None, not_empty=False) |
Returns indices of acquisitions that meet a specified set of criteria in
an `xarray.Dataset` or `xarray.DataArray`.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The `xarray` object to remove undesired acquisitions from.
clean_mask: xarray.DataArray
A boolean `xarray.DataArray` denoting the "clean" values in `dataset`.
More generally, in this mask, `True` values are considered desirable.
time_dim: str
The string name of the time dimension.
pct_clean: float
The minimum percent of "clean" (or "desired") pixels required to keep an acquisition.
Requires `clean_mask` to be supplied.
not_empty: bool
Whether to remove empty acquisitions or not.
Here, an empty acquisition is one that contains all NaN values.
Requires `dataset` to be supplied.
Returns
-------
acq_inds_to_keep: list of int
A list of indices of acquisitions that meet the specified criteria.
|
Returns indices of acquisitions that meet a specified set of criteria in
an `xarray.Dataset` or `xarray.DataArray`. | def find_desired_acq_inds(dataset=None, clean_mask=None, time_dim='time', pct_clean=None, not_empty=False):
"""
Returns indices of acquisitions that meet a specified set of criteria in
an `xarray.Dataset` or `xarray.DataArray`.
Parameters
----------
dataset: xarray.Dataset or xarray.DataArray
The `xarray` object to remove undesired acquisitions from.
clean_mask: xarray.DataArray
A boolean `xarray.DataArray` denoting the "clean" values in `dataset`.
More generally, in this mask, `True` values are considered desirable.
time_dim: str
The string name of the time dimension.
pct_clean: float
The minimum percent of "clean" (or "desired") pixels required to keep an acquisition.
Requires `clean_mask` to be supplied.
not_empty: bool
Whether to remove empty acquisitions or not.
Here, an empty acquisition is one that contains all NaN values.
Requires `dataset` to be supplied.
Returns
-------
acq_inds_to_keep: list of int
A list of indices of acquisitions that meet the specified criteria.
"""
if pct_clean is not None:
assert clean_mask is not None, "If `pct_clean` is supplied, then `clean_mask` must also be supplied."
if not_empty:
assert dataset is not None, "If `not_empty==True`, then `dataset` must be supplied."
acq_inds_to_keep = []
for time_ind in range(len(dataset[time_dim])):
remove_acq = False
if pct_clean is not None:
acq_pct_clean = clean_mask.isel(time=time_ind).mean()
remove_acq = acq_pct_clean < pct_clean
if not_empty:
remove_acq = is_dataset_empty(dataset.isel(time=time_ind))
if not remove_acq:
acq_inds_to_keep.append(time_ind)
return acq_inds_to_keep | [
"def",
"find_desired_acq_inds",
"(",
"dataset",
"=",
"None",
",",
"clean_mask",
"=",
"None",
",",
"time_dim",
"=",
"'time'",
",",
"pct_clean",
"=",
"None",
",",
"not_empty",
"=",
"False",
")",
":",
"if",
"pct_clean",
"is",
"not",
"None",
":",
"assert",
"clean_mask",
"is",
"not",
"None",
",",
"\"If `pct_clean` is supplied, then `clean_mask` must also be supplied.\"",
"if",
"not_empty",
":",
"assert",
"dataset",
"is",
"not",
"None",
",",
"\"If `not_empty==True`, then `dataset` must be supplied.\"",
"acq_inds_to_keep",
"=",
"[",
"]",
"for",
"time_ind",
"in",
"range",
"(",
"len",
"(",
"dataset",
"[",
"time_dim",
"]",
")",
")",
":",
"remove_acq",
"=",
"False",
"if",
"pct_clean",
"is",
"not",
"None",
":",
"acq_pct_clean",
"=",
"clean_mask",
".",
"isel",
"(",
"time",
"=",
"time_ind",
")",
".",
"mean",
"(",
")",
"remove_acq",
"=",
"acq_pct_clean",
"<",
"pct_clean",
"if",
"not_empty",
":",
"remove_acq",
"=",
"is_dataset_empty",
"(",
"dataset",
".",
"isel",
"(",
"time",
"=",
"time_ind",
")",
")",
"if",
"not",
"remove_acq",
":",
"acq_inds_to_keep",
".",
"append",
"(",
"time_ind",
")",
"return",
"acq_inds_to_keep"
] | [
537,
0
] | [
578,
27
] | python | en | ['en', 'error', 'th'] | False |
group_dates_by_day | (dates) |
Given a list of dates, return the list of lists of dates grouped by day.
Parameters
----------
dates: List[np.datetime64]
Returns
-------
grouped_dates: List[List[np.datetime64]]
|
Given a list of dates, return the list of lists of dates grouped by day. | def group_dates_by_day(dates):
"""
Given a list of dates, return the list of lists of dates grouped by day.
Parameters
----------
dates: List[np.datetime64]
Returns
-------
grouped_dates: List[List[np.datetime64]]
"""
generate_key = lambda b: ((b - np.datetime64('1970-01-01T00:00:00Z')) / (np.timedelta64(1, 'h') * 24)).astype(int)
return [list(group) for key, group in itertools.groupby(dates, key=generate_key)] | [
"def",
"group_dates_by_day",
"(",
"dates",
")",
":",
"generate_key",
"=",
"lambda",
"b",
":",
"(",
"(",
"b",
"-",
"np",
".",
"datetime64",
"(",
"'1970-01-01T00:00:00Z'",
")",
")",
"/",
"(",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'h'",
")",
"*",
"24",
")",
")",
".",
"astype",
"(",
"int",
")",
"return",
"[",
"list",
"(",
"group",
")",
"for",
"key",
",",
"group",
"in",
"itertools",
".",
"groupby",
"(",
"dates",
",",
"key",
"=",
"generate_key",
")",
"]"
] | [
581,
0
] | [
594,
85
] | python | en | ['en', 'error', 'th'] | False |
reduce_on_day | (ds, reduction_func=np.nanmean) |
Combine data in an `xarray.Dataset` for dates with the same day
Parameters
----------
ds: xr.Dataset
reduction_func: np.ufunc
Returns
-------
reduced_ds: xr.Dataset
|
Combine data in an `xarray.Dataset` for dates with the same day | def reduce_on_day(ds, reduction_func=np.nanmean):
"""
Combine data in an `xarray.Dataset` for dates with the same day
Parameters
----------
ds: xr.Dataset
reduction_func: np.ufunc
Returns
-------
reduced_ds: xr.Dataset
"""
# Save dtypes to convert back to them.
dataset_in_dtypes = {}
for band in ds.data_vars:
dataset_in_dtypes[band] = ds[band].dtype
# Group dates by day into date_groups
day_groups = group_dates_by_day(ds.time.values)
# slice large dataset into many smaller datasets by date_group
group_chunks = (ds.sel(time=t) for t in day_groups)
# reduce each dataset using something like "average" or "median" such that many values for a day become one value
group_slices = (_ds.reduce(reduction_func, dim="time") for _ds in group_chunks if "time" in dict(ds.dims).keys())
# recombine slices into larger dataset
new_dataset = xr.concat(group_slices, dim="time")
# rename times values using the first time in each date_group
new_times = [day_group[0] for day_group in day_groups] # list(map(get_first, day_groups))
new_dataset = new_dataset.reindex(dict(time=np.array(new_times)))
restore_or_convert_dtypes(None, None, dataset_in_dtypes, new_dataset)
return new_dataset | [
"def",
"reduce_on_day",
"(",
"ds",
",",
"reduction_func",
"=",
"np",
".",
"nanmean",
")",
":",
"# Save dtypes to convert back to them.",
"dataset_in_dtypes",
"=",
"{",
"}",
"for",
"band",
"in",
"ds",
".",
"data_vars",
":",
"dataset_in_dtypes",
"[",
"band",
"]",
"=",
"ds",
"[",
"band",
"]",
".",
"dtype",
"# Group dates by day into date_groups",
"day_groups",
"=",
"group_dates_by_day",
"(",
"ds",
".",
"time",
".",
"values",
")",
"# slice large dataset into many smaller datasets by date_group",
"group_chunks",
"=",
"(",
"ds",
".",
"sel",
"(",
"time",
"=",
"t",
")",
"for",
"t",
"in",
"day_groups",
")",
"# reduce each dataset using something like \"average\" or \"median\" such that many values for a day become one value",
"group_slices",
"=",
"(",
"_ds",
".",
"reduce",
"(",
"reduction_func",
",",
"dim",
"=",
"\"time\"",
")",
"for",
"_ds",
"in",
"group_chunks",
"if",
"\"time\"",
"in",
"dict",
"(",
"ds",
".",
"dims",
")",
".",
"keys",
"(",
")",
")",
"# recombine slices into larger dataset",
"new_dataset",
"=",
"xr",
".",
"concat",
"(",
"group_slices",
",",
"dim",
"=",
"\"time\"",
")",
"# rename times values using the first time in each date_group",
"new_times",
"=",
"[",
"day_group",
"[",
"0",
"]",
"for",
"day_group",
"in",
"day_groups",
"]",
"# list(map(get_first, day_groups))",
"new_dataset",
"=",
"new_dataset",
".",
"reindex",
"(",
"dict",
"(",
"time",
"=",
"np",
".",
"array",
"(",
"new_times",
")",
")",
")",
"restore_or_convert_dtypes",
"(",
"None",
",",
"None",
",",
"dataset_in_dtypes",
",",
"new_dataset",
")",
"return",
"new_dataset"
] | [
597,
0
] | [
633,
22
] | python | en | ['en', 'error', 'th'] | False |
SeBS.ignore_cache | (self) |
The cache will only store code packages,
and won't update new functions and storage.
|
The cache will only store code packages,
and won't update new functions and storage.
| def ignore_cache(self):
"""
The cache will only store code packages,
and won't update new functions and storage.
"""
self._cache_client.ignore_storage = True
self._cache_client.ignore_functions = True | [
"def",
"ignore_cache",
"(",
"self",
")",
":",
"self",
".",
"_cache_client",
".",
"ignore_storage",
"=",
"True",
"self",
".",
"_cache_client",
".",
"ignore_functions",
"=",
"True"
] | [
66,
4
] | [
72,
50
] | python | en | ['en', 'error', 'th'] | False |
StoreBackend.__init__ | (
self,
fixed_length_key=False,
suppress_store_backend_id=False,
manually_initialize_store_backend_id: str = "",
store_name="no_store_name",
) |
Initialize a StoreBackend
Args:
fixed_length_key:
suppress_store_backend_id: skip construction of a StoreBackend.store_backend_id
manually_initialize_store_backend_id: UUID as a string to use if the store_backend_id is not already set
store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
|
Initialize a StoreBackend
Args:
fixed_length_key:
suppress_store_backend_id: skip construction of a StoreBackend.store_backend_id
manually_initialize_store_backend_id: UUID as a string to use if the store_backend_id is not already set
store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
| def __init__(
self,
fixed_length_key=False,
suppress_store_backend_id=False,
manually_initialize_store_backend_id: str = "",
store_name="no_store_name",
):
"""
Initialize a StoreBackend
Args:
fixed_length_key:
suppress_store_backend_id: skip construction of a StoreBackend.store_backend_id
manually_initialize_store_backend_id: UUID as a string to use if the store_backend_id is not already set
store_name: store name given in the DataContextConfig (via either in-code or yaml configuration)
"""
self._fixed_length_key = fixed_length_key
self._suppress_store_backend_id = suppress_store_backend_id
self._manually_initialize_store_backend_id = (
manually_initialize_store_backend_id
)
self._store_name = store_name | [
"def",
"__init__",
"(",
"self",
",",
"fixed_length_key",
"=",
"False",
",",
"suppress_store_backend_id",
"=",
"False",
",",
"manually_initialize_store_backend_id",
":",
"str",
"=",
"\"\"",
",",
"store_name",
"=",
"\"no_store_name\"",
",",
")",
":",
"self",
".",
"_fixed_length_key",
"=",
"fixed_length_key",
"self",
".",
"_suppress_store_backend_id",
"=",
"suppress_store_backend_id",
"self",
".",
"_manually_initialize_store_backend_id",
"=",
"(",
"manually_initialize_store_backend_id",
")",
"self",
".",
"_store_name",
"=",
"store_name"
] | [
27,
4
] | [
47,
37
] | python | en | ['en', 'error', 'th'] | False |
StoreBackend._construct_store_backend_id | (
self, suppress_warning: bool = False
) |
Create a store_backend_id if one does not exist, and return it if it exists
If a valid UUID store_backend_id is passed in param manually_initialize_store_backend_id
and there is not already an existing store_backend_id then the store_backend_id
from param manually_initialize_store_backend_id is used to create it.
Args:
suppress_warning: boolean flag for whether warnings are logged
Returns:
store_backend_id which is a UUID(version=4)
|
Create a store_backend_id if one does not exist, and return it if it exists
If a valid UUID store_backend_id is passed in param manually_initialize_store_backend_id
and there is not already an existing store_backend_id then the store_backend_id
from param manually_initialize_store_backend_id is used to create it.
Args:
suppress_warning: boolean flag for whether warnings are logged | def _construct_store_backend_id(
self, suppress_warning: bool = False
) -> Optional[str]:
"""
Create a store_backend_id if one does not exist, and return it if it exists
If a valid UUID store_backend_id is passed in param manually_initialize_store_backend_id
and there is not already an existing store_backend_id then the store_backend_id
from param manually_initialize_store_backend_id is used to create it.
Args:
suppress_warning: boolean flag for whether warnings are logged
Returns:
store_backend_id which is a UUID(version=4)
"""
if self._suppress_store_backend_id:
if not suppress_warning:
logger.warning(
f"You are attempting to access the store_backend_id of a store or store_backend named {self.store_name} that has been explicitly suppressed."
)
return None
try:
try:
return self.get(key=self.STORE_BACKEND_ID_KEY).replace(
self.STORE_BACKEND_ID_PREFIX, ""
)
except InvalidKeyError:
store_id = (
self._manually_initialize_store_backend_id
if self._manually_initialize_store_backend_id
else str(uuid.uuid4())
)
self.set(
key=self.STORE_BACKEND_ID_KEY,
value=f"{self.STORE_BACKEND_ID_PREFIX}{store_id}",
)
return store_id
except Exception:
if not suppress_warning:
logger.warning(
f"Invalid store configuration: Please check the configuration of your {self.__class__.__name__} named {self.store_name}"
)
return self.STORE_BACKEND_INVALID_CONFIGURATION_ID | [
"def",
"_construct_store_backend_id",
"(",
"self",
",",
"suppress_warning",
":",
"bool",
"=",
"False",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"if",
"self",
".",
"_suppress_store_backend_id",
":",
"if",
"not",
"suppress_warning",
":",
"logger",
".",
"warning",
"(",
"f\"You are attempting to access the store_backend_id of a store or store_backend named {self.store_name} that has been explicitly suppressed.\"",
")",
"return",
"None",
"try",
":",
"try",
":",
"return",
"self",
".",
"get",
"(",
"key",
"=",
"self",
".",
"STORE_BACKEND_ID_KEY",
")",
".",
"replace",
"(",
"self",
".",
"STORE_BACKEND_ID_PREFIX",
",",
"\"\"",
")",
"except",
"InvalidKeyError",
":",
"store_id",
"=",
"(",
"self",
".",
"_manually_initialize_store_backend_id",
"if",
"self",
".",
"_manually_initialize_store_backend_id",
"else",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"self",
".",
"set",
"(",
"key",
"=",
"self",
".",
"STORE_BACKEND_ID_KEY",
",",
"value",
"=",
"f\"{self.STORE_BACKEND_ID_PREFIX}{store_id}\"",
",",
")",
"return",
"store_id",
"except",
"Exception",
":",
"if",
"not",
"suppress_warning",
":",
"logger",
".",
"warning",
"(",
"f\"Invalid store configuration: Please check the configuration of your {self.__class__.__name__} named {self.store_name}\"",
")",
"return",
"self",
".",
"STORE_BACKEND_INVALID_CONFIGURATION_ID"
] | [
57,
4
] | [
98,
62
] | python | en | ['en', 'error', 'th'] | False |
ColumnQuantileValues._pandas | (cls, column, quantiles, allow_relative_error, **kwargs) | Quantile Function | Quantile Function | def _pandas(cls, column, quantiles, allow_relative_error, **kwargs):
"""Quantile Function"""
interpolation_options = ("linear", "lower", "higher", "midpoint", "nearest")
if not allow_relative_error:
allow_relative_error = "nearest"
if allow_relative_error not in interpolation_options:
raise ValueError(
f"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'"
f"parameter of .quantile() (one of {interpolation_options})"
)
return column.quantile(quantiles, interpolation=allow_relative_error).tolist() | [
"def",
"_pandas",
"(",
"cls",
",",
"column",
",",
"quantiles",
",",
"allow_relative_error",
",",
"*",
"*",
"kwargs",
")",
":",
"interpolation_options",
"=",
"(",
"\"linear\"",
",",
"\"lower\"",
",",
"\"higher\"",
",",
"\"midpoint\"",
",",
"\"nearest\"",
")",
"if",
"not",
"allow_relative_error",
":",
"allow_relative_error",
"=",
"\"nearest\"",
"if",
"allow_relative_error",
"not",
"in",
"interpolation_options",
":",
"raise",
"ValueError",
"(",
"f\"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'\"",
"f\"parameter of .quantile() (one of {interpolation_options})\"",
")",
"return",
"column",
".",
"quantile",
"(",
"quantiles",
",",
"interpolation",
"=",
"allow_relative_error",
")",
".",
"tolist",
"(",
")"
] | [
62,
4
] | [
74,
86
] | python | en | ['en', 'la', 'en'] | False |
Config._reset | (self) |
Resets this Config to the empty, default state so it can load a new config.
|
Resets this Config to the empty, default state so it can load a new config.
| def _reset(self) -> None:
"""
Resets this Config to the empty, default state so it can load a new config.
"""
self.logger.debug("ACONF RESET")
self.current_resource = None
self.helm_chart = None
self.validators = {}
self.config = {}
self.breakers = {}
self.outliers = {}
self.counters = collections.defaultdict(lambda: 0)
self.sources = {}
# Save our magic internal sources.
self.save_source(ACResource.internal_resource())
self.save_source(ACResource.diagnostics_resource())
self.errors = {}
self.notices = {}
self.fatal_errors = 0
self.object_errors = 0
self.fast_validation_disagreements = {}
# Build up the Ambassador node name.
#
# XXX This should be overrideable by the Ambassador module.
self.ambassador_nodename = "%s-%s" % (os.environ.get('AMBASSADOR_ID', 'ambassador'),
Config.ambassador_namespace) | [
"def",
"_reset",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"ACONF RESET\"",
")",
"self",
".",
"current_resource",
"=",
"None",
"self",
".",
"helm_chart",
"=",
"None",
"self",
".",
"validators",
"=",
"{",
"}",
"self",
".",
"config",
"=",
"{",
"}",
"self",
".",
"breakers",
"=",
"{",
"}",
"self",
".",
"outliers",
"=",
"{",
"}",
"self",
".",
"counters",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"self",
".",
"sources",
"=",
"{",
"}",
"# Save our magic internal sources.",
"self",
".",
"save_source",
"(",
"ACResource",
".",
"internal_resource",
"(",
")",
")",
"self",
".",
"save_source",
"(",
"ACResource",
".",
"diagnostics_resource",
"(",
")",
")",
"self",
".",
"errors",
"=",
"{",
"}",
"self",
".",
"notices",
"=",
"{",
"}",
"self",
".",
"fatal_errors",
"=",
"0",
"self",
".",
"object_errors",
"=",
"0",
"self",
".",
"fast_validation_disagreements",
"=",
"{",
"}",
"# Build up the Ambassador node name.",
"#",
"# XXX This should be overrideable by the Ambassador module.",
"self",
".",
"ambassador_nodename",
"=",
"\"%s-%s\"",
"%",
"(",
"os",
".",
"environ",
".",
"get",
"(",
"'AMBASSADOR_ID'",
",",
"'ambassador'",
")",
",",
"Config",
".",
"ambassador_namespace",
")"
] | [
153,
4
] | [
188,
74
] | python | en | ['en', 'error', 'th'] | False |
Config.save_source | (self, resource: ACResource) |
Save a given ACResource as a source of Ambassador config information.
|
Save a given ACResource as a source of Ambassador config information.
| def save_source(self, resource: ACResource) -> None:
"""
Save a given ACResource as a source of Ambassador config information.
"""
self.sources[resource.rkey] = resource | [
"def",
"save_source",
"(",
"self",
",",
"resource",
":",
"ACResource",
")",
"->",
"None",
":",
"self",
".",
"sources",
"[",
"resource",
".",
"rkey",
"]",
"=",
"resource"
] | [
278,
4
] | [
282,
46
] | python | en | ['en', 'error', 'th'] | False |
Config.load_all | (self, resources: Iterable[ACResource]) |
Loads all of a set of ACResources. It is the caller's responsibility to arrange for
the set of ACResources to be sorted in some way that makes sense.
|
Loads all of a set of ACResources. It is the caller's responsibility to arrange for
the set of ACResources to be sorted in some way that makes sense.
| def load_all(self, resources: Iterable[ACResource]) -> None:
"""
Loads all of a set of ACResources. It is the caller's responsibility to arrange for
the set of ACResources to be sorted in some way that makes sense.
"""
self.logger.debug(f"Loading config; legacy mode is {'enabled' if Config.legacy_mode else 'disabled'}")
rcount = 0
for resource in resources:
self.logger.debug(f"Trying to parse resource: {resource}")
rcount += 1
if not self.good_ambassador_id(resource):
continue
self.logger.debug("LOAD_ALL: %s @ %s" % (resource, resource.location))
rc = self.process(resource)
if not rc:
# Object error. Not good but we'll allow the system to start.
self.post_error(rc, resource=resource)
self.logger.debug("LOAD_ALL: processed %d resource%s" % (rcount, "" if (rcount == 1) else "s"))
if self.fatal_errors:
# Kaboom.
raise Exception("ERROR ERROR ERROR Unparseable configuration; exiting")
if self.errors:
self.logger.error("ERROR ERROR ERROR Starting with configuration errors") | [
"def",
"load_all",
"(",
"self",
",",
"resources",
":",
"Iterable",
"[",
"ACResource",
"]",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Loading config; legacy mode is {'enabled' if Config.legacy_mode else 'disabled'}\"",
")",
"rcount",
"=",
"0",
"for",
"resource",
"in",
"resources",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Trying to parse resource: {resource}\"",
")",
"rcount",
"+=",
"1",
"if",
"not",
"self",
".",
"good_ambassador_id",
"(",
"resource",
")",
":",
"continue",
"self",
".",
"logger",
".",
"debug",
"(",
"\"LOAD_ALL: %s @ %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"location",
")",
")",
"rc",
"=",
"self",
".",
"process",
"(",
"resource",
")",
"if",
"not",
"rc",
":",
"# Object error. Not good but we'll allow the system to start.",
"self",
".",
"post_error",
"(",
"rc",
",",
"resource",
"=",
"resource",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"LOAD_ALL: processed %d resource%s\"",
"%",
"(",
"rcount",
",",
"\"\"",
"if",
"(",
"rcount",
"==",
"1",
")",
"else",
"\"s\"",
")",
")",
"if",
"self",
".",
"fatal_errors",
":",
"# Kaboom.",
"raise",
"Exception",
"(",
"\"ERROR ERROR ERROR Unparseable configuration; exiting\"",
")",
"if",
"self",
".",
"errors",
":",
"self",
".",
"logger",
".",
"error",
"(",
"\"ERROR ERROR ERROR Starting with configuration errors\"",
")"
] | [
284,
4
] | [
317,
85
] | python | en | ['en', 'error', 'th'] | False |
Config.safe_store | (self, storage_name: str, resource: ACResource, allow_log: bool=True) |
Safely store a ACResource under a given storage name. The storage_name is separate
because we may need to e.g. store a Module under the 'ratelimit' name or the like.
Within a storage_name bucket, the ACResource will be stored under its name.
:param storage_name: where shall we file this?
:param resource: what shall we file?
:param allow_log: if True, logs that we're saving this thing.
|
Safely store a ACResource under a given storage name. The storage_name is separate
because we may need to e.g. store a Module under the 'ratelimit' name or the like.
Within a storage_name bucket, the ACResource will be stored under its name. | def safe_store(self, storage_name: str, resource: ACResource, allow_log: bool=True) -> None:
"""
Safely store a ACResource under a given storage name. The storage_name is separate
because we may need to e.g. store a Module under the 'ratelimit' name or the like.
Within a storage_name bucket, the ACResource will be stored under its name.
:param storage_name: where shall we file this?
:param resource: what shall we file?
:param allow_log: if True, logs that we're saving this thing.
"""
storage = self.config.setdefault(storage_name, {})
if resource.name in storage:
if resource.namespace == storage[resource.name].get('namespace'):
# If the name and namespace, both match, then it's definitely an error.
# Oooops.
self.post_error("%s defines %s %s, which is already defined by %s" %
(resource, resource.kind, resource.name, storage[resource.name].location),
resource=resource)
else:
# Here, we deal with the case when multiple resources have the same name but they exist in different
# namespaces. Our current data structure to store resources is a flat string. Till we move to
# identifying resources with both, name and namespace, we change names of any subsequent resources with
# the same name here.
resource.name = f'{resource.name}.{resource.namespace}'
if allow_log:
self.logger.debug("%s: saving %s %s" %
(resource, resource.kind, resource.name))
storage[resource.name] = resource | [
"def",
"safe_store",
"(",
"self",
",",
"storage_name",
":",
"str",
",",
"resource",
":",
"ACResource",
",",
"allow_log",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"storage",
"=",
"self",
".",
"config",
".",
"setdefault",
"(",
"storage_name",
",",
"{",
"}",
")",
"if",
"resource",
".",
"name",
"in",
"storage",
":",
"if",
"resource",
".",
"namespace",
"==",
"storage",
"[",
"resource",
".",
"name",
"]",
".",
"get",
"(",
"'namespace'",
")",
":",
"# If the name and namespace, both match, then it's definitely an error.",
"# Oooops.",
"self",
".",
"post_error",
"(",
"\"%s defines %s %s, which is already defined by %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
",",
"storage",
"[",
"resource",
".",
"name",
"]",
".",
"location",
")",
",",
"resource",
"=",
"resource",
")",
"else",
":",
"# Here, we deal with the case when multiple resources have the same name but they exist in different",
"# namespaces. Our current data structure to store resources is a flat string. Till we move to",
"# identifying resources with both, name and namespace, we change names of any subsequent resources with",
"# the same name here.",
"resource",
".",
"name",
"=",
"f'{resource.name}.{resource.namespace}'",
"if",
"allow_log",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s: saving %s %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"kind",
",",
"resource",
".",
"name",
")",
")",
"storage",
"[",
"resource",
".",
"name",
"]",
"=",
"resource"
] | [
674,
4
] | [
705,
41
] | python | en | ['en', 'error', 'th'] | False |
Config.save_object | (self, resource: ACResource, allow_log: bool=False) |
Saves a ACResource using its kind as the storage class name. Sort of the
defaulted version of safe_store.
:param resource: what shall we file?
:param allow_log: if True, logs that we're saving this thing.
|
Saves a ACResource using its kind as the storage class name. Sort of the
defaulted version of safe_store. | def save_object(self, resource: ACResource, allow_log: bool=False) -> None:
"""
Saves a ACResource using its kind as the storage class name. Sort of the
defaulted version of safe_store.
:param resource: what shall we file?
:param allow_log: if True, logs that we're saving this thing.
"""
self.safe_store(resource.kind, resource, allow_log=allow_log) | [
"def",
"save_object",
"(",
"self",
",",
"resource",
":",
"ACResource",
",",
"allow_log",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"self",
".",
"safe_store",
"(",
"resource",
".",
"kind",
",",
"resource",
",",
"allow_log",
"=",
"allow_log",
")"
] | [
707,
4
] | [
716,
69
] | python | en | ['en', 'error', 'th'] | False |
Config.get_module | (self, module_name: str) |
Fetch a module from the module store. Can return None if no
such module exists.
:param module_name: name of the module you want.
|
Fetch a module from the module store. Can return None if no
such module exists. | def get_module(self, module_name: str) -> Optional[ACResource]:
"""
Fetch a module from the module store. Can return None if no
such module exists.
:param module_name: name of the module you want.
"""
modules = self.get_config("modules")
if modules:
return modules.get(module_name, None)
else:
return None | [
"def",
"get_module",
"(",
"self",
",",
"module_name",
":",
"str",
")",
"->",
"Optional",
"[",
"ACResource",
"]",
":",
"modules",
"=",
"self",
".",
"get_config",
"(",
"\"modules\"",
")",
"if",
"modules",
":",
"return",
"modules",
".",
"get",
"(",
"module_name",
",",
"None",
")",
"else",
":",
"return",
"None"
] | [
721,
4
] | [
734,
23
] | python | en | ['en', 'error', 'th'] | False |
Config.module_lookup | (self, module_name: str, key: str, default: Any=None) |
Look up a specific key in a given module. If the named module doesn't
exist, or if the key doesn't exist in the module, return the default.
:param module_name: name of the module you want.
:param key: key to look up within the module
:param default: default value if the module is missing or has no such key
|
Look up a specific key in a given module. If the named module doesn't
exist, or if the key doesn't exist in the module, return the default. | def module_lookup(self, module_name: str, key: str, default: Any=None) -> Any:
"""
Look up a specific key in a given module. If the named module doesn't
exist, or if the key doesn't exist in the module, return the default.
:param module_name: name of the module you want.
:param key: key to look up within the module
:param default: default value if the module is missing or has no such key
"""
module = self.get_module(module_name)
if module:
return module.get(key, default)
return default | [
"def",
"module_lookup",
"(",
"self",
",",
"module_name",
":",
"str",
",",
"key",
":",
"str",
",",
"default",
":",
"Any",
"=",
"None",
")",
"->",
"Any",
":",
"module",
"=",
"self",
".",
"get_module",
"(",
"module_name",
")",
"if",
"module",
":",
"return",
"module",
".",
"get",
"(",
"key",
",",
"default",
")",
"return",
"default"
] | [
736,
4
] | [
751,
22
] | python | en | ['en', 'error', 'th'] | False |
Config.handle_module | (self, resource: ACResource) |
Handles a Module resource.
|
Handles a Module resource.
| def handle_module(self, resource: ACResource) -> None:
"""
Handles a Module resource.
"""
# Make a new ACResource from the 'config' element of this ACResource
# Note that we leave the original serialization intact, since it will
# indeed show a human the YAML that defined this module.
#
# XXX This should be Module.from_resource()...
module_resource = ACResource.from_resource(resource, kind="Module", **resource.config)
self.safe_store("modules", module_resource) | [
"def",
"handle_module",
"(",
"self",
",",
"resource",
":",
"ACResource",
")",
"->",
"None",
":",
"# Make a new ACResource from the 'config' element of this ACResource",
"# Note that we leave the original serialization intact, since it will",
"# indeed show a human the YAML that defined this module.",
"#",
"# XXX This should be Module.from_resource()...",
"module_resource",
"=",
"ACResource",
".",
"from_resource",
"(",
"resource",
",",
"kind",
"=",
"\"Module\"",
",",
"*",
"*",
"resource",
".",
"config",
")",
"self",
".",
"safe_store",
"(",
"\"modules\"",
",",
"module_resource",
")"
] | [
753,
4
] | [
765,
51
] | python | en | ['en', 'error', 'th'] | False |
Config.handle_secret | (self, resource: ACResource) |
Handles a Secret resource. We need a handler for this because the key needs to be
the rkey, not the name.
|
Handles a Secret resource. We need a handler for this because the key needs to be
the rkey, not the name.
| def handle_secret(self, resource: ACResource) -> None:
"""
Handles a Secret resource. We need a handler for this because the key needs to be
the rkey, not the name.
"""
self.logger.debug(f"Handling secret resource {resource.as_dict()}")
storage = self.config.setdefault('secrets', {})
key = resource.rkey
if key in storage:
self.post_error("%s defines %s %s, which is already defined by %s" %
(resource, resource.kind, key, storage[key].location),
resource=resource)
storage[key] = resource | [
"def",
"handle_secret",
"(",
"self",
",",
"resource",
":",
"ACResource",
")",
"->",
"None",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"f\"Handling secret resource {resource.as_dict()}\"",
")",
"storage",
"=",
"self",
".",
"config",
".",
"setdefault",
"(",
"'secrets'",
",",
"{",
"}",
")",
"key",
"=",
"resource",
".",
"rkey",
"if",
"key",
"in",
"storage",
":",
"self",
".",
"post_error",
"(",
"\"%s defines %s %s, which is already defined by %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"kind",
",",
"key",
",",
"storage",
"[",
"key",
"]",
".",
"location",
")",
",",
"resource",
"=",
"resource",
")",
"storage",
"[",
"key",
"]",
"=",
"resource"
] | [
767,
4
] | [
783,
31
] | python | en | ['en', 'error', 'th'] | False |
Config.handle_service | (self, resource: ACResource) |
Handles a Service resource. We need a handler for this because the key needs to be
the rkey, not the name, and because we need to check the helm_chart attribute.
|
Handles a Service resource. We need a handler for this because the key needs to be
the rkey, not the name, and because we need to check the helm_chart attribute.
| def handle_service(self, resource: ACResource) -> None:
"""
Handles a Service resource. We need a handler for this because the key needs to be
the rkey, not the name, and because we need to check the helm_chart attribute.
"""
storage = self.config.setdefault('service', {})
key = resource.rkey
if key in storage:
self.post_error("%s defines %s %s, which is already defined by %s" %
(resource, resource.kind, key, storage[key].location),
resource=resource)
self.logger.debug("%s: saving %s %s" %
(resource, resource.kind, key))
storage[key] = resource
chart = resource.get('helm_chart', None)
if chart:
self.helm_chart = chart | [
"def",
"handle_service",
"(",
"self",
",",
"resource",
":",
"ACResource",
")",
"->",
"None",
":",
"storage",
"=",
"self",
".",
"config",
".",
"setdefault",
"(",
"'service'",
",",
"{",
"}",
")",
"key",
"=",
"resource",
".",
"rkey",
"if",
"key",
"in",
"storage",
":",
"self",
".",
"post_error",
"(",
"\"%s defines %s %s, which is already defined by %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"kind",
",",
"key",
",",
"storage",
"[",
"key",
"]",
".",
"location",
")",
",",
"resource",
"=",
"resource",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s: saving %s %s\"",
"%",
"(",
"resource",
",",
"resource",
".",
"kind",
",",
"key",
")",
")",
"storage",
"[",
"key",
"]",
"=",
"resource",
"chart",
"=",
"resource",
".",
"get",
"(",
"'helm_chart'",
",",
"None",
")",
"if",
"chart",
":",
"self",
".",
"helm_chart",
"=",
"chart"
] | [
796,
4
] | [
818,
35
] | python | en | ['en', 'error', 'th'] | False |
OrderedProfilerCardinality.get_basic_column_cardinality | (cls, num_unique=0, pct_unique=0) |
Takes the number and percentage of unique values in a column and returns the column cardinality.
If you are unexpectedly returning a cardinality of "None", ensure that you are passing in values for both
num_unique and pct_unique.
Args:
num_unique: The number of unique values in a column
pct_unique: The percentage of unique values in a column
Returns:
The column cardinality
|
Takes the number and percentage of unique values in a column and returns the column cardinality.
If you are unexpectedly returning a cardinality of "None", ensure that you are passing in values for both
num_unique and pct_unique.
Args:
num_unique: The number of unique values in a column
pct_unique: The percentage of unique values in a column | def get_basic_column_cardinality(cls, num_unique=0, pct_unique=0):
"""
Takes the number and percentage of unique values in a column and returns the column cardinality.
If you are unexpectedly returning a cardinality of "None", ensure that you are passing in values for both
num_unique and pct_unique.
Args:
num_unique: The number of unique values in a column
pct_unique: The percentage of unique values in a column
Returns:
The column cardinality
"""
if pct_unique == 1.0:
cardinality = cls.UNIQUE
elif num_unique == 1:
cardinality = cls.ONE
elif num_unique == 2:
cardinality = cls.TWO
elif 0 < num_unique < 20:
cardinality = cls.VERY_FEW
elif 0 < num_unique < 60:
cardinality = cls.FEW
elif num_unique is None or num_unique == 0 or pct_unique is None:
cardinality = cls.NONE
elif pct_unique > 0.1:
cardinality = cls.VERY_MANY
else:
cardinality = cls.MANY
return cardinality | [
"def",
"get_basic_column_cardinality",
"(",
"cls",
",",
"num_unique",
"=",
"0",
",",
"pct_unique",
"=",
"0",
")",
":",
"if",
"pct_unique",
"==",
"1.0",
":",
"cardinality",
"=",
"cls",
".",
"UNIQUE",
"elif",
"num_unique",
"==",
"1",
":",
"cardinality",
"=",
"cls",
".",
"ONE",
"elif",
"num_unique",
"==",
"2",
":",
"cardinality",
"=",
"cls",
".",
"TWO",
"elif",
"0",
"<",
"num_unique",
"<",
"20",
":",
"cardinality",
"=",
"cls",
".",
"VERY_FEW",
"elif",
"0",
"<",
"num_unique",
"<",
"60",
":",
"cardinality",
"=",
"cls",
".",
"FEW",
"elif",
"num_unique",
"is",
"None",
"or",
"num_unique",
"==",
"0",
"or",
"pct_unique",
"is",
"None",
":",
"cardinality",
"=",
"cls",
".",
"NONE",
"elif",
"pct_unique",
">",
"0.1",
":",
"cardinality",
"=",
"cls",
".",
"VERY_MANY",
"else",
":",
"cardinality",
"=",
"cls",
".",
"MANY",
"return",
"cardinality"
] | [
52,
4
] | [
80,
26
] | python | en | ['en', 'error', 'th'] | False |
ErbLexer.get_tokens_unprocessed | (self, text) |
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
|
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
| def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return | [
"def",
"get_tokens_unprocessed",
"(",
"self",
",",
"text",
")",
":",
"tokens",
"=",
"self",
".",
"_block_re",
".",
"split",
"(",
"text",
")",
"tokens",
".",
"reverse",
"(",
")",
"state",
"=",
"idx",
"=",
"0",
"try",
":",
"while",
"True",
":",
"# text",
"if",
"state",
"==",
"0",
":",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
",",
"Other",
",",
"val",
"idx",
"+=",
"len",
"(",
"val",
")",
"state",
"=",
"1",
"# block starts",
"elif",
"state",
"==",
"1",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"# literals",
"if",
"tag",
"in",
"(",
"'<%%'",
",",
"'%%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"idx",
"+=",
"3",
"state",
"=",
"0",
"# comment",
"elif",
"tag",
"==",
"'<%#'",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"val",
"=",
"tokens",
".",
"pop",
"(",
")",
"yield",
"idx",
"+",
"3",
",",
"Comment",
",",
"val",
"idx",
"+=",
"3",
"+",
"len",
"(",
"val",
")",
"state",
"=",
"2",
"# blocks or output",
"elif",
"tag",
"in",
"(",
"'<%'",
",",
"'<%='",
",",
"'<%-'",
")",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"data",
"=",
"tokens",
".",
"pop",
"(",
")",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"data",
")",
":",
"yield",
"r_idx",
"+",
"idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"data",
")",
"state",
"=",
"2",
"elif",
"tag",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Error",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# % raw ruby statements",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"[",
"0",
"]",
"r_idx",
"=",
"0",
"for",
"r_idx",
",",
"r_token",
",",
"r_value",
"in",
"self",
".",
"ruby_lexer",
".",
"get_tokens_unprocessed",
"(",
"tag",
"[",
"1",
":",
"]",
")",
":",
"yield",
"idx",
"+",
"1",
"+",
"r_idx",
",",
"r_token",
",",
"r_value",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"# block ends",
"elif",
"state",
"==",
"2",
":",
"tag",
"=",
"tokens",
".",
"pop",
"(",
")",
"if",
"tag",
"not",
"in",
"(",
"'%>'",
",",
"'-%>'",
")",
":",
"yield",
"idx",
",",
"Other",
",",
"tag",
"else",
":",
"yield",
"idx",
",",
"Comment",
".",
"Preproc",
",",
"tag",
"idx",
"+=",
"len",
"(",
"tag",
")",
"state",
"=",
"0",
"except",
"IndexError",
":",
"return"
] | [
71,
4
] | [
137,
18
] | python | en | ['en', 'error', 'th'] | False |
TeamcityReport._get_capture_plugin | (self) |
:rtype: nose.plugins.capture.Capture
|
:rtype: nose.plugins.capture.Capture
| def _get_capture_plugin(self):
"""
:rtype: nose.plugins.capture.Capture
"""
for plugin in self.config.plugins.plugins:
if plugin.name == "capture":
return plugin
return None | [
"def",
"_get_capture_plugin",
"(",
"self",
")",
":",
"for",
"plugin",
"in",
"self",
".",
"config",
".",
"plugins",
".",
"plugins",
":",
"if",
"plugin",
".",
"name",
"==",
"\"capture\"",
":",
"return",
"plugin",
"return",
"None"
] | [
123,
4
] | [
130,
19
] | python | en | ['en', 'error', 'th'] | False |
TeamcityReport.prepareTestLoader | (self, loader) | Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkey-patch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
from https://github.com/erikrose/nose-progressive/
:type loader: nose.loader.TestLoader
| Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkey-patch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count. | def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkey-patch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
from https://github.com/erikrose/nose-progressive/
:type loader: nose.loader.TestLoader
"""
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
#
# nose 1.3.4 contains required fix:
# Another fix for Python 3.4: Call super in LazySuite to access _removed_tests variable
if hasattr(loader, 'loadTestsFromNames') and nose.__versioninfo__ >= (1, 3, 4):
old_loadTestsFromNames = loader.loadTestsFromNames
def _loadTestsFromNames(*args, **kwargs):
suite = old_loadTestsFromNames(*args, **kwargs)
self.total_tests += suite.countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return old_loadTestsFromNames(*args, **kwargs)
loader.loadTestsFromNames = _loadTestsFromNames | [
"def",
"prepareTestLoader",
"(",
"self",
",",
"loader",
")",
":",
"# TODO: If there's ever a practical need, also patch loader.suiteClass",
"# or even TestProgram.createTests. createTests seems to be main top-",
"# level caller of loader methods, and nose.core.collector() (which",
"# isn't even called in nose) is an alternate one.",
"#",
"# nose 1.3.4 contains required fix:",
"# Another fix for Python 3.4: Call super in LazySuite to access _removed_tests variable",
"if",
"hasattr",
"(",
"loader",
",",
"'loadTestsFromNames'",
")",
"and",
"nose",
".",
"__versioninfo__",
">=",
"(",
"1",
",",
"3",
",",
"4",
")",
":",
"old_loadTestsFromNames",
"=",
"loader",
".",
"loadTestsFromNames",
"def",
"_loadTestsFromNames",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"suite",
"=",
"old_loadTestsFromNames",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"total_tests",
"+=",
"suite",
".",
"countTestCases",
"(",
")",
"# Clear out the loader's cache. Otherwise, it never finds any tests",
"# for the actual test run:",
"loader",
".",
"_visitedPaths",
"=",
"set",
"(",
")",
"return",
"old_loadTestsFromNames",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"loader",
".",
"loadTestsFromNames",
"=",
"_loadTestsFromNames"
] | [
190,
4
] | [
221,
59
] | python | en | ['en', 'en', 'en'] | True |
test_subclass_pandas_subset_retains_subclass | () | A subclass of PandasDataset should still be that subclass after a Pandas subsetting operation | A subclass of PandasDataset should still be that subclass after a Pandas subsetting operation | def test_subclass_pandas_subset_retains_subclass():
"""A subclass of PandasDataset should still be that subclass after a Pandas subsetting operation"""
class CustomPandasDataset(ge.dataset.PandasDataset):
@ge.dataset.MetaPandasDataset.column_map_expectation
def expect_column_values_to_be_odd(self, column):
return column.map(lambda x: x % 2)
@ge.dataset.MetaPandasDataset.column_map_expectation
def expectation_that_crashes_on_sixes(self, column):
return column.map(lambda x: (x - 6) / 0 != "duck")
df = CustomPandasDataset(
{
"all_odd": [1, 3, 5, 5, 5, 7, 9, 9, 9, 11],
"mostly_odd": [1, 3, 5, 7, 9, 2, 4, 1, 3, 5],
"all_even": [2, 4, 4, 6, 6, 6, 8, 8, 8, 8],
"odd_missing": [1, 3, 5, None, None, None, None, 1, 3, None],
"mixed_missing": [1, 3, 5, None, None, 2, 4, 1, 3, None],
"all_missing": [None, None, None, None, None, None, None, None, None, None],
}
)
df2 = df.sample(frac=0.5)
assert type(df2) == type(df)
def test_validate_map_expectation_on_categorical_column(self):
"""Map expectations should work on categorical columns"""
D = ge.dataset.PandasDataset(
{
"cat_column_1": [
"cat_one",
"cat_two",
"cat_one",
"cat_two",
"cat_one",
"cat_two",
"cat_one",
"cat_two",
],
}
)
D["cat_column_1"] = D["cat_column_1"].astype("category")
D.set_default_expectation_argument("result_format", "COMPLETE")
out = D.expect_column_value_lengths_to_equal("cat_column_1", 7)
self.assertEqual(out["success"], True) | [
"def",
"test_subclass_pandas_subset_retains_subclass",
"(",
")",
":",
"class",
"CustomPandasDataset",
"(",
"ge",
".",
"dataset",
".",
"PandasDataset",
")",
":",
"@",
"ge",
".",
"dataset",
".",
"MetaPandasDataset",
".",
"column_map_expectation",
"def",
"expect_column_values_to_be_odd",
"(",
"self",
",",
"column",
")",
":",
"return",
"column",
".",
"map",
"(",
"lambda",
"x",
":",
"x",
"%",
"2",
")",
"@",
"ge",
".",
"dataset",
".",
"MetaPandasDataset",
".",
"column_map_expectation",
"def",
"expectation_that_crashes_on_sixes",
"(",
"self",
",",
"column",
")",
":",
"return",
"column",
".",
"map",
"(",
"lambda",
"x",
":",
"(",
"x",
"-",
"6",
")",
"/",
"0",
"!=",
"\"duck\"",
")",
"df",
"=",
"CustomPandasDataset",
"(",
"{",
"\"all_odd\"",
":",
"[",
"1",
",",
"3",
",",
"5",
",",
"5",
",",
"5",
",",
"7",
",",
"9",
",",
"9",
",",
"9",
",",
"11",
"]",
",",
"\"mostly_odd\"",
":",
"[",
"1",
",",
"3",
",",
"5",
",",
"7",
",",
"9",
",",
"2",
",",
"4",
",",
"1",
",",
"3",
",",
"5",
"]",
",",
"\"all_even\"",
":",
"[",
"2",
",",
"4",
",",
"4",
",",
"6",
",",
"6",
",",
"6",
",",
"8",
",",
"8",
",",
"8",
",",
"8",
"]",
",",
"\"odd_missing\"",
":",
"[",
"1",
",",
"3",
",",
"5",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"1",
",",
"3",
",",
"None",
"]",
",",
"\"mixed_missing\"",
":",
"[",
"1",
",",
"3",
",",
"5",
",",
"None",
",",
"None",
",",
"2",
",",
"4",
",",
"1",
",",
"3",
",",
"None",
"]",
",",
"\"all_missing\"",
":",
"[",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
"]",
",",
"}",
")",
"df2",
"=",
"df",
".",
"sample",
"(",
"frac",
"=",
"0.5",
")",
"assert",
"type",
"(",
"df2",
")",
"==",
"type",
"(",
"df",
")",
"def",
"test_validate_map_expectation_on_categorical_column",
"(",
"self",
")",
":",
"\"\"\"Map expectations should work on categorical columns\"\"\"",
"D",
"=",
"ge",
".",
"dataset",
".",
"PandasDataset",
"(",
"{",
"\"cat_column_1\"",
":",
"[",
"\"cat_one\"",
",",
"\"cat_two\"",
",",
"\"cat_one\"",
",",
"\"cat_two\"",
",",
"\"cat_one\"",
",",
"\"cat_two\"",
",",
"\"cat_one\"",
",",
"\"cat_two\"",
",",
"]",
",",
"}",
")",
"D",
"[",
"\"cat_column_1\"",
"]",
"=",
"D",
"[",
"\"cat_column_1\"",
"]",
".",
"astype",
"(",
"\"category\"",
")",
"D",
".",
"set_default_expectation_argument",
"(",
"\"result_format\"",
",",
"\"COMPLETE\"",
")",
"out",
"=",
"D",
".",
"expect_column_value_lengths_to_equal",
"(",
"\"cat_column_1\"",
",",
"7",
")",
"self",
".",
"assertEqual",
"(",
"out",
"[",
"\"success\"",
"]",
",",
"True",
")"
] | [
801,
0
] | [
851,
46
] | python | en | ['en', 'en', 'en'] | True |
test_ge_value_count_of_object_dtype_column_with_mixed_types | () |
Having mixed type values in a object dtype column (e.g., strings and floats)
used to raise a TypeError when sorting value_counts. This test verifies
that the issue is fixed.
|
Having mixed type values in a object dtype column (e.g., strings and floats)
used to raise a TypeError when sorting value_counts. This test verifies
that the issue is fixed.
| def test_ge_value_count_of_object_dtype_column_with_mixed_types():
"""
Having mixed type values in a object dtype column (e.g., strings and floats)
used to raise a TypeError when sorting value_counts. This test verifies
that the issue is fixed.
"""
df = ge.dataset.PandasDataset(
{
"A": [1.5, 0.009, 0.5, "I am a string in an otherwise float column"],
}
)
value_counts = df.get_column_value_counts("A")
assert value_counts["I am a string in an otherwise float column"] == 1 | [
"def",
"test_ge_value_count_of_object_dtype_column_with_mixed_types",
"(",
")",
":",
"df",
"=",
"ge",
".",
"dataset",
".",
"PandasDataset",
"(",
"{",
"\"A\"",
":",
"[",
"1.5",
",",
"0.009",
",",
"0.5",
",",
"\"I am a string in an otherwise float column\"",
"]",
",",
"}",
")",
"value_counts",
"=",
"df",
".",
"get_column_value_counts",
"(",
"\"A\"",
")",
"assert",
"value_counts",
"[",
"\"I am a string in an otherwise float column\"",
"]",
"==",
"1"
] | [
868,
0
] | [
881,
74
] | python | en | ['en', 'error', 'th'] | False |
test_expect_values_to_be_of_type_list | () |
Having lists in a Pandas column used to raise a ValueError when parsing to
see if any rows had missing values. This test verifies that the issue is fixed.
|
Having lists in a Pandas column used to raise a ValueError when parsing to
see if any rows had missing values. This test verifies that the issue is fixed.
| def test_expect_values_to_be_of_type_list():
"""
Having lists in a Pandas column used to raise a ValueError when parsing to
see if any rows had missing values. This test verifies that the issue is fixed.
"""
df = ge.dataset.PandasDataset(
{
"A": [[1, 2], None, [4, 5], 6],
}
)
validation = df.expect_column_values_to_be_of_type("A", "list")
assert not validation.success | [
"def",
"test_expect_values_to_be_of_type_list",
"(",
")",
":",
"df",
"=",
"ge",
".",
"dataset",
".",
"PandasDataset",
"(",
"{",
"\"A\"",
":",
"[",
"[",
"1",
",",
"2",
"]",
",",
"None",
",",
"[",
"4",
",",
"5",
"]",
",",
"6",
"]",
",",
"}",
")",
"validation",
"=",
"df",
".",
"expect_column_values_to_be_of_type",
"(",
"\"A\"",
",",
"\"list\"",
")",
"assert",
"not",
"validation",
".",
"success"
] | [
884,
0
] | [
896,
33
] | python | en | ['en', 'error', 'th'] | False |
test_expect_values_quantiles_to_be_between | () |
Test that quantile bounds set to zero actually get interpreted as such. Zero
used to be interpreted as None (and thus +-inf) and we'd get false negatives.
|
Test that quantile bounds set to zero actually get interpreted as such. Zero
used to be interpreted as None (and thus +-inf) and we'd get false negatives.
| def test_expect_values_quantiles_to_be_between():
"""
Test that quantile bounds set to zero actually get interpreted as such. Zero
used to be interpreted as None (and thus +-inf) and we'd get false negatives.
"""
T = [
([1, 2, 3, 4, 5], [0.5], [[0, 0]], False),
([0, 0, 0, 0, 0], [0.5], [[0, 0]], True),
]
for data, quantiles, value_ranges, success in T:
df = ge.dataset.PandasDataset({"A": data})
validation = df.expect_column_quantile_values_to_be_between(
"A",
{
"quantiles": quantiles,
"value_ranges": value_ranges,
},
)
assert validation.success is success | [
"def",
"test_expect_values_quantiles_to_be_between",
"(",
")",
":",
"T",
"=",
"[",
"(",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
",",
"5",
"]",
",",
"[",
"0.5",
"]",
",",
"[",
"[",
"0",
",",
"0",
"]",
"]",
",",
"False",
")",
",",
"(",
"[",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
",",
"[",
"0.5",
"]",
",",
"[",
"[",
"0",
",",
"0",
"]",
"]",
",",
"True",
")",
",",
"]",
"for",
"data",
",",
"quantiles",
",",
"value_ranges",
",",
"success",
"in",
"T",
":",
"df",
"=",
"ge",
".",
"dataset",
".",
"PandasDataset",
"(",
"{",
"\"A\"",
":",
"data",
"}",
")",
"validation",
"=",
"df",
".",
"expect_column_quantile_values_to_be_between",
"(",
"\"A\"",
",",
"{",
"\"quantiles\"",
":",
"quantiles",
",",
"\"value_ranges\"",
":",
"value_ranges",
",",
"}",
",",
")",
"assert",
"validation",
".",
"success",
"is",
"success"
] | [
899,
0
] | [
919,
44
] | python | en | ['en', 'error', 'th'] | False |
Path.open | (self, *args, **kwargs) | Open the file pointed to by the path, like the :func:`trio.open_file`
function does.
| Open the file pointed to by the path, like the :func:`trio.open_file`
function does. | async def open(self, *args, **kwargs):
"""Open the file pointed to by the path, like the :func:`trio.open_file`
function does.
"""
func = partial(self._wrapped.open, *args, **kwargs)
value = await trio.to_thread.run_sync(func)
return trio.wrap_file(value) | [
"async",
"def",
"open",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"func",
"=",
"partial",
"(",
"self",
".",
"_wrapped",
".",
"open",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"value",
"=",
"await",
"trio",
".",
"to_thread",
".",
"run_sync",
"(",
"func",
")",
"return",
"trio",
".",
"wrap_file",
"(",
"value",
")"
] | [
174,
4
] | [
182,
36
] | python | en | ['en', 'en', 'en'] | True |
declaration_t.__str__ | (self) |
Default __str__ method.
This version just returns the decl_string and the class.
Derived classes may override this method to provide more detailed
information.
A __str__ method for a declaration should always provide enough
information so that it uniquely identifies the declaration and
the user is able to find the declaration in his source code.
|
Default __str__ method. | def __str__(self):
"""
Default __str__ method.
This version just returns the decl_string and the class.
Derived classes may override this method to provide more detailed
information.
A __str__ method for a declaration should always provide enough
information so that it uniquely identifies the declaration and
the user is able to find the declaration in his source code.
"""
name = self.decl_string
if name[:2] == "::":
name = name[2:]
# Append the declaration class
cls = self.__class__.__name__
if cls[-2:] == "_t":
cls = cls[:-2]
cls = cls.replace('_', ' ')
return "%s [%s]" % (name, cls) | [
"def",
"__str__",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"decl_string",
"if",
"name",
"[",
":",
"2",
"]",
"==",
"\"::\"",
":",
"name",
"=",
"name",
"[",
"2",
":",
"]",
"# Append the declaration class",
"cls",
"=",
"self",
".",
"__class__",
".",
"__name__",
"if",
"cls",
"[",
"-",
"2",
":",
"]",
"==",
"\"_t\"",
":",
"cls",
"=",
"cls",
"[",
":",
"-",
"2",
"]",
"cls",
"=",
"cls",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"return",
"\"%s [%s]\"",
"%",
"(",
"name",
",",
"cls",
")"
] | [
42,
4
] | [
65,
38
] | python | en | ['en', 'error', 'th'] | False |
declaration_t._get__cmp__items | (self) |
Implementation detail.
|
Implementation detail. | def _get__cmp__items(self):
"""
Implementation detail.
"""
# Every derived class should implement this method. This method should
# return a list of items, that should be compared.
print(
'_get__cmp__items not implemented for class ',
self.__class__.__name__)
raise NotImplementedError() | [
"def",
"_get__cmp__items",
"(",
"self",
")",
":",
"# Every derived class should implement this method. This method should",
"# return a list of items, that should be compared.",
"print",
"(",
"'_get__cmp__items not implemented for class '",
",",
"self",
".",
"__class__",
".",
"__name__",
")",
"raise",
"NotImplementedError",
"(",
")"
] | [
67,
4
] | [
79,
35
] | python | en | ['en', 'error', 'th'] | False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.