response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Given a list of numbers, return the population standard deviation.
For a population x_1, x_2, ..., x_N with mean M, the standard deviation
is defined as
sqrt( 1/N * [ (x_1 - M)^2 + (x_2 - M)^2 + ... + (x_N - M)^2 ] ) | def pstdev(sample):
"""Given a list of numbers, return the population standard deviation.
For a population x_1, x_2, ..., x_N with mean M, the standard deviation
is defined as
sqrt( 1/N * [ (x_1 - M)^2 + (x_2 - M)^2 + ... + (x_N - M)^2 ] )
"""
if len(sample) == 0:
raise ValueError("Cannot calculate the standard deviation of an "
"empty list!")
mean = sum(sample) / float(len(sample))
inner = 1.0 / len(sample) * (sum((x - mean) ** 2 for x in sample))
return math.sqrt(inner) |
Hack: runs a command several times, clearing the module cache if we get
an error about header files being modified during the run.
This shouldn't be necessary (the cached PCM files should automatically be
regenerated) but there seems to still be a bug in Clang that we haven't
tracked down yet. | def run_with_module_cache_retry(command_args, module_cache_path, dry_run):
"""Hack: runs a command several times, clearing the module cache if we get
an error about header files being modified during the run.
This shouldn't be necessary (the cached PCM files should automatically be
regenerated) but there seems to still be a bug in Clang that we haven't
tracked down yet.
"""
RETRIES = 3
attempts_stderr = ""
for r in range(RETRIES):
status, stdout, stderr = run_command(command_args, dry_run)
if status == 0:
break
if not should_retry_compilation(stderr):
break
if module_cache_path:
shutil.rmtree(module_cache_path, ignore_errors=True)
# If all retries fail, output information for each instance.
attempts_stderr += (
"\n*** Compilation attempt {}/{} failed with modules bugs. "
"Error output:\n".format(r + 1, RETRIES))
attempts_stderr += stderr
stderr = attempts_stderr
return (status, stdout, stderr) |
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails. | def shell(args):
"""
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails.
"""
return subprocess.check_output(['adb', 'shell'] + args) |
Remove all files in the device directory at `path`. | def rmdir(path):
"""Remove all files in the device directory at `path`."""
shell(['rm', '-rf', '{}/*'.format(path)]) |
Move the files at the given local paths to the path on the device. | def push(local_paths, device_path):
"""Move the files at the given local paths to the path on the device."""
if isinstance(local_paths, str):
local_paths = [local_paths]
try:
# In recent versions of ADB, push supports --sync, which checksums the
# files to be transmitted and skip the ones that haven't changed, which
# improves the effective transfer speed.
return subprocess.check_output(
['adb', 'push', '--sync'] + local_paths + [device_path],
stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError as e:
if "unrecognized option '--sync'" in e.output:
return subprocess.check_output(
['adb', 'push'] + local_paths + [device_path],
stderr=subprocess.STDOUT).strip()
else:
raise e |
Reboot the connected Android device, waiting for it to return online. | def reboot():
"""Reboot the connected Android device, waiting for it to return online."""
subprocess.check_call(['adb', 'reboot'])
subprocess.check_call(['adb', 'wait-for-device']) |
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device. | def execute_on_device(executable_path, executable_arguments):
"""
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device.
"""
# We'll be running the executable in a temporary directory in
# /data/local/tmp. `adb shell` has trouble with commands that
# exceed a certain length, so to err on the safe side we only
# use the first 10 characters of the UUID.
uuid_dir = '{}/{}'.format(DEVICE_TEMP_DIR, str(uuid.uuid4())[:10])
shell(['mkdir', '-p', uuid_dir])
# `adb` can only handle commands under a certain length. That's why we
# hide the arguments and piping/status in executable files. However, at
# least one resilience test relies on checking the executable name, so we
# need to use the same name as the one provided.
executable_name = os.path.basename(executable_path)
executable = '{}/{}'.format(uuid_dir, executable_name)
push(executable_path, executable)
child_environment = ['{}="{}"'.format(k.replace(ENV_PREFIX, '', 1), v)
for (k, v) in os.environ.items()
if k.startswith(ENV_PREFIX)]
# The executables are sometimes passed arguments, and sometimes those
# arguments are files that have to be pushed, but also the argument values
# have to be changed to the new path in the Android device.
translated_executable_arguments = []
for executable_argument in executable_arguments:
# Currently we only support arguments that are file paths themselves.
# Things like `--foo=/path/to/file` or directories are not supported.
# Relative paths from the executable to the arguments are not kept.
if os.path.isfile(executable_argument):
final_path = '{}/{}'.format(uuid_dir,
os.path.basename(executable_argument))
push(executable_argument, final_path)
translated_executable_arguments.append(final_path)
else:
translated_executable_arguments.append(executable_argument)
# When running the executable on the device, we need to pass it the same
# arguments, as well as specify the correct LD_LIBRARY_PATH. Save these
# to a file we can easily call multiple times.
executable_with_args = '{}/__executable_with_args'.format(uuid_dir)
_create_executable_on_device(
executable_with_args,
'LD_LIBRARY_PATH={uuid_dir}:{tmp_dir} '
'{child_environment} {executable} {executable_arguments}'.format(
uuid_dir=uuid_dir,
tmp_dir=DEVICE_TEMP_DIR,
child_environment=' '.join(child_environment),
executable=executable,
executable_arguments=' '.join(translated_executable_arguments)))
# Write the output from the test executable to a file named '__stdout', and
# if the test executable succeeds, write 'SUCCEEDED' to a file
# named '__succeeded'. We do this because `adb shell` does not report
# the exit code of the command it executes on the device, so instead we
# check the '__succeeded' file for our string.
executable_stdout = '{}/__stdout'.format(uuid_dir)
succeeded_token = 'SUCCEEDED'
executable_succeeded = '{}/__succeeded'.format(uuid_dir)
executable_piped = '{}/__executable_piped'.format(uuid_dir)
_create_executable_on_device(
executable_piped,
'{executable_with_args} > {executable_stdout} && '
'echo "{succeeded_token}" > {executable_succeeded}'.format(
executable_with_args=executable_with_args,
executable_stdout=executable_stdout,
succeeded_token=succeeded_token,
executable_succeeded=executable_succeeded))
# We've pushed everything we need to the device.
# Now execute the wrapper script.
shell([executable_piped])
# Grab the results of running the executable on device.
stdout = shell(['cat', executable_stdout])
exitcode = shell(['cat', executable_succeeded])
if not exitcode.startswith(succeeded_token):
debug_command = '$ adb shell {}'.format(executable_with_args)
print('Executable exited with a non-zero code on the Android device.\n'
'Device stdout:\n'
'{stdout}\n'
'To debug, run:\n'
'{debug_command}\n'.format(
stdout=stdout,
debug_command=debug_command))
# Exit early so that the output isn't passed to FileCheck, nor are any
# temporary directories removed; this allows the user to re-run
# the executable on the device.
return 1
print(stdout, end='')
shell(['rm', '-rf', uuid_dir])
return 0 |
Return an argument parser for this script. | def argument_parser():
"""Return an argument parser for this script."""
parser = argparse.ArgumentParser(
description='Convenience script for pushing Swift build products to '
'an Android device.')
parser.add_argument(
'paths',
nargs='+',
help='One or more paths to build products that should be pushed to '
'the device. If you specify a directory, all files in the '
'directory that end in ".so" will be pushed to the device.')
parser.add_argument(
'-d', '--destination',
help='The directory on the device the files will be pushed to.',
default=adb.commands.DEVICE_TEMP_DIR)
parser.add_argument(
'-n', '--ndk',
help='The path to an Android NDK. If specified, the libc++ library '
'in that NDK will be pushed to the device.',
default=os.getenv('ANDROID_NDK_HOME', None))
parser.add_argument(
'-a', '--destination-arch',
help='The architecture of the host device. Used to determine the '
'right library versions to send to the device.',
choices=['armv7', 'aarch64'],
default='armv7')
return parser |
The main entry point for adb_push_built_products.
Parse arguments and kick off the script. Return zero to indicate success.
Raises an exception otherwise. | def main():
"""
The main entry point for adb_push_built_products.
Parse arguments and kick off the script. Return zero to indicate success.
Raises an exception otherwise.
"""
parser = argument_parser()
args = parser.parse_args()
for path in args.paths:
if os.path.isdir(path):
full_paths = [
os.path.join(path, basename)
for basename in glob.glob(os.path.join(path, '*.so'))]
_push(full_paths, args.destination)
else:
_push(path, args.destination)
if args.ndk:
libcpp = os.path.join(args.ndk,
'sources',
'cxx-stl',
'llvm-libc++',
'libs',
{
'armv7': 'armeabi-v7a',
'aarch64': 'arm64-v8a',
}[args.destination_arch],
'libc++_shared.so')
_push(libcpp, args.destination)
return 0 |
The main entry point for adb_test_runner.
Parse arguments and kick off the script. Return zero to indicate success,
a non-zero integer otherwise. | def main(args=sys.argv):
"""
The main entry point for adb_test_runner.
Parse arguments and kick off the script. Return zero to indicate success,
a non-zero integer otherwise.
"""
# We don't use argparse, because we need to be able to pass
# --arbitrary -params --like=this to the executable we're running
# on device.
program_name = os.path.basename(args.pop(0))
if len(args) == 1 and args[0] in ['-h', '--help']:
print(_help(program_name))
return 0
try:
executable_path, executable_arguments = args[0], args[1:]
except IndexError:
print(_usage(program_name))
print('{}: error: argument "executable_path" is required'.format(
program_name))
return 1
return execute_on_device(executable_path, executable_arguments) |
Given a path to a sib file with canonical sil, attempt to find a perturbed
list of function given a specific pass that causes the perf pipeline to crash
| def invoke_function_bug_reducer(args):
"""Given a path to a sib file with canonical sil, attempt to find a perturbed
list of function given a specific pass that causes the perf pipeline to crash
"""
tools = swift_tools.SwiftTools(args.swift_build_dir)
config = swift_tools.SILToolInvokerConfig(args)
nm = swift_tools.SILNMInvoker(config, tools)
input_file = args.input_file
extra_args = args.extra_args
sil_opt_invoker = swift_tools.SILOptInvoker(config, tools,
input_file,
extra_args)
# Make sure that the base case /does/ crash.
filename = sil_opt_invoker.get_suffixed_filename('base_case')
result = sil_opt_invoker.invoke_with_passlist(args.pass_list, filename)
# If we succeed, there is no further work to do.
if result['exit_code'] == 0:
print("Success with PassList: %s" % (' '.join(args.pass_list)))
return
sil_extract_invoker = swift_tools.SILFuncExtractorInvoker(config,
tools,
input_file)
function_bug_reducer(input_file, nm, sil_opt_invoker, sil_extract_invoker,
args.pass_list) |
Add parser arguments for func_bug_reducer | def add_parser_arguments(parser):
"""Add parser arguments for func_bug_reducer"""
parser.set_defaults(func=invoke_function_bug_reducer)
parser.add_argument('input_file', help='The input file to optimize')
parser.add_argument('--module-cache', help='The module cache to use')
parser.add_argument('--sdk', help='The sdk to pass to sil-func-extractor')
parser.add_argument('--target', help='The target to pass to '
'sil-func-extractor')
parser.add_argument('--resource-dir',
help='The resource-dir to pass to sil-func-extractor')
parser.add_argument('--work-dir',
help='Working directory to use for temp files',
default='bug_reducer')
parser.add_argument('--module-name',
help='The name of the module we are optimizing')
parser.add_argument('--pass', help='pass to test', dest='pass_list',
action='append')
parser.add_argument('--extra-silopt-arg', help='extra argument to pass to '
'sil-opt',
dest='extra_args', action='append') |
Given a path to a sib file with canonical sil, attempt to find a
perturbed list of passes that the perf pipeline | def invoke_pass_bug_reducer(args):
"""Given a path to a sib file with canonical sil, attempt to find a
perturbed list of passes that the perf pipeline"""
tools = swift_tools.SwiftTools(args.swift_build_dir)
config = swift_tools.SILToolInvokerConfig(args)
passes = []
if args.pass_list is None:
json_data = json.loads(subprocess.check_output(
[tools.sil_passpipeline_dumper, '-Performance']))
passes = sum((p[1:] for p in json_data), [])
passes = ['-' + x[1] for x in passes]
else:
passes = ['-' + x for x in args.pass_list]
extra_args = []
if args.extra_args is not None:
extra_args = args.extra_args
sil_opt_invoker = swift_tools.SILOptInvoker(config, tools,
args.input_file,
extra_args)
pass_bug_reducer(tools, config, passes, sil_opt_invoker, args.reduce_sil) |
Add parser arguments for opt_bug_reducer | def add_parser_arguments(parser):
"""Add parser arguments for opt_bug_reducer"""
parser.set_defaults(func=invoke_pass_bug_reducer)
parser.add_argument('input_file', help='The input file to optimize')
parser.add_argument('--module-cache', help='The module cache to use')
parser.add_argument('--sdk', help='The sdk to pass to sil-opt')
parser.add_argument('--target', help='The target to pass to sil-opt')
parser.add_argument('--resource-dir',
help='The resource-dir to pass to sil-opt')
parser.add_argument('--work-dir',
help='Working directory to use for temp files',
default='bug_reducer')
parser.add_argument('--module-name',
help='The name of the module we are optimizing')
parser.add_argument('--pass', help='pass to test', dest='pass_list',
action='append')
parser.add_argument('--extra-arg',
help='extra argument to pass to sil-opt',
dest='extra_args', action='append')
parser.add_argument('--reduce-sil', help='After finding the relevant '
'passes, try to reduce the SIL by eliminating '
'functions, blocks, etc',
action='store_true') |
Given a path to a sib file with canonical sil, attempt to find a perturbed
list of passes that the perf pipeline | def random_bug_finder(args):
"""Given a path to a sib file with canonical sil, attempt to find a perturbed
list of passes that the perf pipeline"""
tools = swift_tools.SwiftTools(args.swift_build_dir)
config = swift_tools.SILToolInvokerConfig(args)
json_data = json.loads(subprocess.check_output(
[tools.sil_passpipeline_dumper, '-Performance']))
passes = sum((p[1:] for p in json_data), [])
passes = ['-' + x[1] for x in passes]
extra_args = []
if args.extra_args is not None:
extra_args.extend(args.extra_args)
sil_opt_invoker = swift_tools.SILOptInvoker(config, tools,
args.input_file,
extra_args)
# Make sure that the base case /does/ crash.
max_count = args.max_count
for count in range(max_count):
print("Running round %i/%i" % (count, max_count))
random.shuffle(passes)
filename = sil_opt_invoker.get_suffixed_filename(str(count))
result = sil_opt_invoker.invoke_with_passlist(passes, filename)
if result['exit_code'] == 0:
print("*** Success with PassList: %s" % (' '.join(passes)))
continue
cmdline = sil_opt_invoker.cmdline_with_passlist(passes)
print("*** Fail with PassList: %s" % (' '.join(passes)))
print("*** Output File: %s" % filename)
print("*** Reproducing commandline: %s" % ' '.join(cmdline))
print("*** Trying to reduce pass list and function list")
result = opt_bug_reducer.pass_bug_reducer(tools, config, passes,
sil_opt_invoker, True)
if not result:
sys.exit(-1) |
Add parser arguments for random_bug_reducer | def add_parser_arguments(parser):
"""Add parser arguments for random_bug_reducer"""
parser.set_defaults(func=random_bug_finder)
parser.add_argument('input_file', help='The input file to optimize')
parser.add_argument('--module-cache', help='The module cache to use')
parser.add_argument('--sdk', help='The sdk to pass to sil-opt')
parser.add_argument('--target', help='The target to pass to sil-opt')
parser.add_argument('--resource-dir',
help='The resource-dir to pass to sil-opt')
parser.add_argument('--work-dir',
help='Working directory to use for temp files',
default='bug_reducer')
parser.add_argument('--module-name',
help='The name of the module we are optimizing')
parser.add_argument('--max-count',
help='Maximum number of permutations to try before'
' exiting',
default=100)
parser.add_argument('--extra-silopt-arg', help='extra argument to pass to '
'sil-opt',
dest='extra_args', action='append') |
Decorator that caches result of a function call.
NOTE: This decorator does not play nice with methods as the created cache
is not instance-local, rather it lives in the decorator.
NOTE: When running in Python 3.2 or newer this decorator is replaced with
the standard `functools.lru_cache` using a maxsize of None. | def cache(func):
"""Decorator that caches result of a function call.
NOTE: This decorator does not play nice with methods as the created cache
is not instance-local, rather it lives in the decorator.
NOTE: When running in Python 3.2 or newer this decorator is replaced with
the standard `functools.lru_cache` using a maxsize of None.
"""
# Use the standard functools.lru_cache decorator for Python 3.2 and newer.
if hasattr(functools, 'lru_cache'):
return functools.lru_cache(maxsize=None)(func)
# Otherwise use a naive caching strategy.
_cache = {}
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = tuple(args) + tuple(kwargs.items())
if key not in _cache:
result = func(*args, **kwargs)
_cache[key] = result
return result
return _cache[key]
return wrapper |
Decorator that replaces the wrapped method with the result after the
first call. Used to wrap property-like methods with no arguments. | def reify(func):
"""Decorator that replaces the wrapped method with the result after the
first call. Used to wrap property-like methods with no arguments.
"""
class wrapper(object):
def __get__(self, obj, type=None):
if obj is None:
return self
result = func(obj)
setattr(obj, func.__name__, result)
return result
return functools.update_wrapper(wrapper(), func) |
Generates a standardized __repr__ implementation for the decorated class
using the provided attributes and the class name. | def generate_repr(*attrs):
"""Generates a standardized __repr__ implementation for the decorated class
using the provided attributes and the class name.
"""
def _repr(self):
args = []
for attr in attrs:
value = getattr(self, attr)
args.append('{}={}'.format(attr, repr(value)))
return '{}({})'.format(type(self).__name__, ', '.join(args))
def decorator(cls):
setattr(cls, '__repr__', _repr)
return cls
return decorator |
Returns true if the given llvm_path is a valid LLVM checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files. | def _is_llvm_checkout(llvm_path):
"""Returns true if the given llvm_path is a valid LLVM checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files.
"""
if not os.path.exists(os.path.join(llvm_path, "tools")):
return False
if not os.path.exists(os.path.join(llvm_path, "CMakeLists.txt")):
return False
return True |
Returns true if the given swift_path is a valid Swift checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files. | def _is_swift_checkout(swift_path):
"""Returns true if the given swift_path is a valid Swift checkout, false otherwise.
NOTE: This is a very naive validation, checking only for the existence of a few
known files.
"""
if not os.path.exists(os.path.join(swift_path, "utils")):
return False
if not os.path.exists(os.path.join(swift_path, "CMakeLists.txt")):
return False
return True |
Returns the Swift source root or None if one cannot be determined.
Users are able to manually override the source root by setting the SWIFT_SOURCE_ROOT
environment variable. If that cannot be found then this function will check the
directory structure to infer if we are building as a standalone Swift build or if we
are building in the unified LLVM.
Building standalone means Swift will be checked out as a peer of LLVM and the
enclosing directory is the source root.
source-root/
|- llvm/
|- swift/
| ...
However the unified case means Swift will be checked out in the llvm/tools
directory, which means the directory containing LLVM is the source root.
source-root/
|- llvm/
| |- tools/
| | |- swift/
| | | ...
| | ...
| ...
In the case that this function is called with an invalid Swift checkout it returns
None as well.
FIXME: What about the new llvm-project monorepo? | def _get_swift_source_root(swift_path, env=None):
"""Returns the Swift source root or None if one cannot be determined.
Users are able to manually override the source root by setting the SWIFT_SOURCE_ROOT
environment variable. If that cannot be found then this function will check the
directory structure to infer if we are building as a standalone Swift build or if we
are building in the unified LLVM.
Building standalone means Swift will be checked out as a peer of LLVM and the
enclosing directory is the source root.
source-root/
|- llvm/
|- swift/
| ...
However the unified case means Swift will be checked out in the llvm/tools
directory, which means the directory containing LLVM is the source root.
source-root/
|- llvm/
| |- tools/
| | |- swift/
| | | ...
| | ...
| ...
In the case that this function is called with an invalid Swift checkout it returns
None as well.
FIXME: What about the new llvm-project monorepo?
"""
env = env or {}
# Check the environment first.
if "SWIFT_SOURCE_ROOT" in env:
return env["SWIFT_SOURCE_ROOT"]
# Assert we are in a valid Swift checkout.
if not _is_swift_checkout(swift_path):
return None
source_root = os.path.dirname(swift_path)
# Check if Swift is checked out as part of a unified build.
if os.path.basename(source_root) != "tools":
return source_root
llvm_path = os.path.dirname(source_root)
if not _is_llvm_checkout(llvm_path):
return source_root
# Return the directory containing LLVM.
return os.path.dirname(llvm_path) |
Returns the Swift build root.
Users are able to manually override the build root by setting the SWIFT_BUILD_ROOT
environment variable. If that cannot be found then this function returns the path
to a directory named "build" in the given source root. | def _get_swift_build_root(source_root, env=None):
"""Returns the Swift build root.
Users are able to manually override the build root by setting the SWIFT_BUILD_ROOT
environment variable. If that cannot be found then this function returns the path
to a directory named "build" in the given source root.
"""
env = env or {}
if "SWIFT_BUILD_ROOT" in env:
return env["SWIFT_BUILD_ROOT"]
return os.path.join(source_root, "build") |
Returns the Swift repo name or None if it cannot be determined.
Users are able to manually override the repo name by setting the SWIFT_REPO_NAME
environment variable. If that cannot be found then this function returns the name
of the given swift path or None if it is not a valid Swift checkout. | def _get_swift_repo_name(swift_path, env=None):
"""Returns the Swift repo name or None if it cannot be determined.
Users are able to manually override the repo name by setting the SWIFT_REPO_NAME
environment variable. If that cannot be found then this function returns the name
of the given swift path or None if it is not a valid Swift checkout.
"""
env = env or {}
if "SWIFT_REPO_NAME" in env:
return env["SWIFT_REPO_NAME"]
if not _is_swift_checkout(swift_path):
return None
return os.path.basename(swift_path) |
Returns the system memory as an int. None if the system memory cannot
be determined.
TODO: Support Linux and Windows platforms. | def _system_memory():
"""Returns the system memory as an int. None if the system memory cannot
be determined.
TODO: Support Linux and Windows platforms.
"""
if platform.platform() == 'Darwin':
try:
output = shell.check_output(['sysctl', 'hw.memsize']).strip()
return int(output.split(' ')[1])
except shell.CalledProcessError:
return None
return None |
Use the formula (GB Memory - 3)/6.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine. | def _default_llvm_lto_link_jobs():
"""Use the formula (GB Memory - 3)/6.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine.
"""
memory = _system_memory()
if memory is None:
return None
return int((memory / 1000000000.0 - 3.0) / 6.0) |
Use the formula (GB Memory - 3)/8.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine. | def _default_swift_lto_link_jobs():
"""Use the formula (GB Memory - 3)/8.0GB to get the number of parallel
link threads we can support. This gives the OS 3 GB of room to work with.
This is a bit conservative, but I have found that this heuristic prevents
me from swapping on my test machine.
"""
memory = _system_memory()
if memory is None:
return None
return int((memory / 1000000000.0 - 3.0) / 8.0) |
Convenience function for getting the default llvm install components for
platforms. | def llvm_install_components():
"""Convenience function for getting the default llvm install components for
platforms.
"""
components = ['llvm-ar', 'llvm-cov', 'llvm-profdata', 'IndexStore', 'clang',
'clang-resource-headers', 'compiler-rt', 'clangd', 'LTO',
'lld']
if os.sys.platform == 'darwin':
components.extend(['dsymutil'])
return ';'.join(components) |
Preprocess argument namespace to apply default behaviors.
| def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
elif args.cmake_generator == 'Xcode':
# Building with Xcode is deprecated.
args.skip_build = True
args.build_early_swift_driver = False
args.build_early_swiftsyntax = False
# Set the default build variant.
if args.build_variant is None:
args.build_variant = (
'MinSizeRel' if args.cmake_generator == 'Xcode' else 'Debug'
)
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
if args.libxml2_build_variant is None:
args.libxml2_build_variant = args.build_variant
if args.zlib_build_variant is None:
args.zlib_build_variant = args.build_variant
if args.curl_build_variant is None:
args.curl_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
if args.xros_all:
raise ValueError('error: --xros-all is unavailable in open-source '
'Swift.\nUse --xros to skip xrOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.xros or not args.build_xros:
args.build_xros_device = False
args.build_xros_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# By default use the same number of lit workers as build jobs.
if not args.lit_jobs:
args.lit_jobs = args.build_jobs
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_xros = False
args.test_android = False
args.test_cmark = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_toolchainbenchmarks = False
args.test_swiftdocc = False
# --test implies --test-early-swift-driver
# (unless explicitly skipped with `--skip-test-early-swift-driver`)
if args.test and (args.build_early_swift_driver and
args.test_early_swift_driver is None):
args.test_early_swift_driver = True
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-test-xros is merely a shorthand for host and simulator
# --tests.
if not args.test_xros:
args.test_xros_host = False
args.test_xros_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_xros_device:
args.test_xros_host = False
if not args.build_xros_simulator:
args.test_xros_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_xros_host = False
args.test_android_host = False |
Return a configured argument parser. | def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option(['--reconfigure'], store_true,
help="Reconfigure all projects as we build")
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--xros', toggle_true,
help='also build for xrOS, but disallow tests that require an '
'xrOS device')
option('--xros-all', toggle_true('xros_all'),
help='also build for Apple xrOS, and allow all Apple xrOS '
'tests')
option('--skip-xros', store_false('xros'),
help='set to skip everything xrOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--swift-disable-dead-stripping', toggle_true,
help="Turn off Darwin-specific dead stripping for Swift host tools")
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--relocate-xdg-cache-home-under-build-subdir',
store_true,
help='relocate $XDG_CACHE_HOME to the same location '
'where build products will be placed; '
'this supports having multiple runs for different branches '
'in CI bots for Linux')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option(['--lit-jobs'], store_int('lit_jobs'),
help='the number of workers to use when testing with lit')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--native-swift-tools-path', store_path,
help='the path to a directory that contains prebuilt Swift tools '
'that are executable on the host platform')
option('--native-clang-tools-path', store_path,
help='the path to a directory that contains prebuilt Clang tools '
'that are executable on the host platform')
option('--native-llvm-tools-path', store_path,
help='the path to a directory that contains prebuilt LLVM tools '
'that are executable on the host platform')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--sccache', toggle_true,
default=os.environ.get('SWIFT_USE_SCCACHE') == '1',
help='use sccache')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--swift-enable-backtracing', toggle_true,
default=True,
help='enable backtracing support')
option('--swift-runtime-fixed-backtracer-path', store,
help='if set, provide a fixed path for the Swift backtracer')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--darwin-deployment-version-xros', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_XROS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for xrOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--swift-profile-instr-use', store_path,
help='profile file to use for clang PGO while building swift')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--swift-tools-ld64-lto-codegen-only-for-supporting-targets',
toggle_true,
default=False,
help='When building ThinLTO using ld64 on Darwin, controls whether '
'to opt out of LLVM IR optimizations when linking targets that '
'will get little benefit from it (e.g. tools for '
'bootstrapping or debugging Swift)')
option('--dsymutil-jobs', store_int,
default=defaults.DSYMUTIL_JOBS,
metavar='COUNT',
help='the maximum number of parallel dsymutil jobs to use when '
'extracting symbols. Tweak with caution, since dsymutil '
'is memory intensive.')
option('--extra-dsymutil-args', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to dsymutil when extracting '
'symbols, in the form of comma separated options '
'like "--verbose,--verify-dwarf=none". Can '
'be called multiple times to add multiple such options.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--color-in-tests', toggle_true, default=True,
help='Enable color output in lit tests')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
option('--bootstrapping', store('bootstrapping_mode'),
choices=['off', 'hosttools', 'bootstrapping', 'bootstrapping-with-hostlibs'],
help='The bootstrapping build mode for swift compiler modules. '
'Available modes: `off`, `hosttools`, `bootstrapping`, '
'`bootstrapping-with-hostlibs`, `crosscompile`, and '
'`crosscompile-with-hostlibs`')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--infer-cross-compile-hosts-on-darwin', toggle_true,
help="When building on Darwin, automatically populate cross-compile-hosts "
"based on the architecture build-script is running on. "
"Has precedence over cross-compile-hosts")
option('--cross-compile-deps-path', store_path,
help='The path to a directory that contains prebuilt cross-compiled '
'library dependencies of the corelibs and other Swift repos, '
'such as the libcurl dependency of FoundationNetworking')
option('--cross-compile-append-host-target-to-destdir', toggle_true,
default=True,
help="Append each cross-compilation host target's name as a subdirectory "
"for each cross-compiled toolchain's destdir, useful when building "
"multiple toolchains and can be disabled if only cross-compiling one.")
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
option('--swift-freestanding-is-darwin', toggle_true,
help='True if the freestanding platform is a Darwin one.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', toggle_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], toggle_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], toggle_true('build_llbuild'),
help='build llbuild')
option('--install-llvm', toggle_true,
help='install llvm')
option(['--install-back-deploy-concurrency'],
toggle_true('install_backdeployconcurrency'),
help='install back-deployment support libraries for concurrency')
option(['--libcxx'], toggle_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], toggle_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skip-early-swiftsyntax'],
toggle_false('build_early_swiftsyntax'),
help='skip building early SwiftSyntax')
option(['--skstresstester'], toggle_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], toggle_true('build_swiftformat'),
help='build swift-format')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--swiftdocc'], toggle_true('build_swiftdocc'),
help='build Swift DocC')
option(['--skip-early-swift-driver'], toggle_false('build_early_swift_driver'),
help='skip building the early swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--sourcekit-lsp-lint',
toggle_true('sourcekitlsp_lint'),
help='verify that sourcekit-lsp Source code is formatted correctly')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree ' +
'match the ones that would be generated from current main')
option('--swiftsyntax-enable-test-fuzzing',
toggle_true('swiftsyntax_enable_test_fuzzing'),
help='set to modify test cases in SwiftParserTest to check for ' +
'round-trip failures and assertion failures')
option('--swiftsyntax-enable-rawsyntax-validation',
toggle_true('swiftsyntax_enable_rawsyntax_validation'),
help='set to validate that RawSyntax layout nodes contain children of ' +
'the expected types and that RawSyntax tokens have the expected ' +
'token kinds')
option('--swiftsyntax-lint',
toggle_true('swiftsyntax_lint'),
help='verify that swift-syntax Source code is formatted correctly')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-swiftformat'], toggle_true('install_swiftformat'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftdocc'], toggle_true('install_swiftdocc'),
help='install Swift DocC')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option(['--build-minimal-stdlib'], toggle_true('build_minimalstdlib'),
help='build the \'minimal\' freestanding stdlib variant into a '
'separate build directory ')
option(['--build-wasm-stdlib'], toggle_true('build_wasmstdlib'),
help='build the stdlib for WebAssembly target into a'
'separate build directory ')
option(['--wasmkit'], toggle_true('build_wasmkit'),
help='build WasmKit')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--static-libxml2', toggle_true('build_libxml2'), default=False,
help='build static libxml2')
option('--static-zlib', toggle_true('build_zlib'), default=False,
help='build static zlib')
option('--static-curl', toggle_true('build_curl'), default=False,
help='build static curl libraries')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
toggle_true('install_playgroundsupport'),
help='install playground support')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-lld'], toggle_true('build_lld'),
help='build lld as part of llvm')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
option('--skip-build-compiler-rt',
toggle_false('build_compiler_rt'),
default=True,
help='skip building compiler-rt as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option(['--clean-install-destdir'], store_true,
help='Clean the install destroot before building.')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
option('--darwin-symroot-path-filters', append,
type=argparse.ShellSplitType(),
help='Space separated list of patterns used to match '
'a subset of files to generate symbols for. '
'Only supported on Darwin. Can be called multiple times '
'to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
option(['--min-size-release'], store('build_variant'),
const='MinSizeRel',
help='build the MinSizeRel variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
option('--debug-libxml2', store('libxml2_build_variant'),
const='Debug',
help='build the Debug variant of libxml2')
option('--debug-zlib', store('zlib_build_variant'),
const='Debug',
help='build the Debug variant of zlib')
option('--debug-curl', store('curl_build_variant'),
const='Debug',
help='build the Debug variant of libcurl')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option('--build-swift-stdlib-static-print', toggle_true,
help='Build constant-folding print() support')
option('--build-swift-stdlib-unicode-data', toggle_true,
default=True,
help='Include Unicode data in the standard library.'
'Note: required for full String functionality')
option('--build-swift-clang-overlays', toggle_true,
default=True,
help='Build Swift overlays for the clang builtin modules')
option('--build-swift-remote-mirror', toggle_true,
default=True,
help='Build Remote Mirror')
option('--build-swift-libexec', toggle_true,
default=True,
help='build auxiliary executables')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-xros', toggle_false('build_xros'),
help='skip building Swift stdlibs for xrOS')
option('--skip-build-xros-device', toggle_false('build_xros_device'),
help='skip building Swift stdlibs for xrOS devices '
'(i.e. build simulators only)')
option('--skip-build-xros-simulator',
toggle_false('build_xros_simulator'),
help='skip building Swift stdlibs for xrOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
option('--build-swift-private-stdlib', toggle_true,
default=True,
help='build the private part of the Standard Library. '
'This can be useful to reduce build times when e.g. '
'tests do not need to run')
option('--build-toolchain-only', toggle_true,
help='only build the necessary tools to build an external toolchain')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-watchos-32bit-simulator',
toggle_false('test_watchos_32bit_simulator'),
default=False,
help='skip testing watchOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-xros',
toggle_false('test_xros'),
help='skip testing all xrOS targets. Equivalent to specifying both '
'--skip-test-xros-simulator and --skip-test-xros-host')
option('--skip-test-xros-simulator',
toggle_false('test_xros_simulator'),
help='skip testing xrOS simulator targets')
option('--skip-test-xros-host',
toggle_false('test_xros_host'),
help='skip testing xrOS device targets on the host machine')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-clean-libdispatch', toggle_false('clean_libdispatch'),
help='skip cleaning up libdispatch')
option('--skip-clean-foundation', toggle_false('clean_foundation'),
help='skip cleaning up foundation')
option('--skip-clean-xctest', toggle_false('clean_xctest'),
help='skip cleaning up xctest')
option('--skip-clean-llbuild', toggle_false('clean_llbuild'),
help='skip cleaning up llbuild')
option('--clean-early-swift-driver', toggle_true('clean_early_swift_driver'),
help='Clean up the early SwiftDriver')
option('--skip-test-early-swift-driver',
store('test_early_swift_driver', const=False),
help='Test the early SwiftDriver against the host toolchain')
option('--skip-clean-swiftpm', toggle_false('clean_swiftpm'),
help='skip cleaning up swiftpm')
option('--skip-clean-swift-driver', toggle_false('clean_swift_driver'),
help='skip cleaning up Swift driver')
option('--skip-test-cmark', toggle_false('test_cmark'),
default=False,
help='skip testing cmark')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
option('--skip-test-swiftdocc', toggle_false('test_swiftdocc'),
help='skip testing swift-docc')
option('--skip-test-wasm-stdlib', toggle_false('test_wasmstdlib'),
help='skip testing stdlib for WebAssembly')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-enable-modules', toggle_true('llvm_enable_modules'),
help='enable building llvm using modules')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips;RISCV;WebAssembly',
help='LLVM target generators to build')
option('--llvm-ninja-targets', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'instead of the default ones. Only supported when using '
'ninja to build. Can be called multiple times '
'to add multiple such options.')
option('--llvm-ninja-targets-for-cross-compile-hosts', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'in cross compile hosts instead of the ones specified in '
'llvm-ninja-targets (or the default ones). '
'Can be called multiple times '
'to add multiple such options.')
option('--no-llvm-include-tests', toggle_false('llvm_include_tests'),
help='do not generate testing targets for LLVM')
option('--llvm-cmake-options', append,
type=argparse.ShellSplitType(),
help='CMake options used for llvm in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64', 'x86_64'],
default='armv7',
help='The target architecture when building for Android. '
'Currently, only armv7, aarch64, and x86_64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming.')
option('--enable-experimental-concurrency', toggle_true, default=True,
help='Enable experimental Swift concurrency model.')
option('--enable-experimental-cxx-interop', toggle_true,
default=True,
help='Enable experimental C++ interop.')
option('--enable-cxx-interop-swift-bridging-header', toggle_true,
default=True,
help='Ship the <swift/bridging> header for C++ interop')
option('--enable-experimental-distributed', toggle_true,
default=True,
help='Enable experimental Swift distributed actors.')
option('--enable-experimental-nonescapable-types', toggle_true,
default=False,
help='Enable experimental NonescapableTypes.')
option('--enable-experimental-string-processing', toggle_true,
default=True,
help='Enable experimental Swift string processing.')
option('--enable-experimental-observation', toggle_true,
default=True,
help='Enable experimental Swift observation.')
option('--enable-synchronization', toggle_true,
default=True,
help='Enable Swift Synchronization.')
option('--enable-experimental-parser-validation', toggle_true,
default=False,
help='Enable experimental Swift Parser validation by default.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--build-llvm', toggle_true('_build_llvm'),
default=True,
help='build llvm and clang')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
option('--skip-build-libxml2', toggle_false('build_libxml2'),
help='skip building libxml2')
option('--skip-build-zlib', toggle_false('build_zlib'),
help='skip building zlib')
option('--skip-build-curl', toggle_false('build_curl'),
help='skip building curl')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build() |
Migrate usages of the now deprecated `--swift-sdks` option to the new
`--stdlib-deployment-targets` option, converting Swift SDKs to the
corresponding targets.
This function is a stop-gap to replacing all instances of `--swift-sdks`. | def migrate_swift_sdks(args):
"""Migrate usages of the now deprecated `--swift-sdks` option to the new
`--stdlib-deployment-targets` option, converting Swift SDKs to the
corresponding targets.
This function is a stop-gap to replacing all instances of `--swift-sdks`.
"""
def _flatten(iterable):
return itertools.chain.from_iterable(iterable)
def _swift_sdk_to_stdlib_targets(sdk):
targets = StdlibDeploymentTarget.get_migrated_targets_for_sdk(sdk)
if targets is None:
raise UnknownSDKError(sdk)
return targets
def _migrate_swift_sdks_arg(arg):
if not arg.startswith('--swift-sdks'):
return arg
sdks = arg.split('=')[1]
sdk_list = [] if sdks == '' else sdks.split(';')
targets = _flatten(map(_swift_sdk_to_stdlib_targets, sdk_list))
target_names = [target.name for target in targets]
return '--stdlib-deployment-targets={}'.format(' '.join(target_names))
return list(map(_migrate_swift_sdks_arg, args)) |
These arguments are only listed in the driver arguments to stop argparse
from auto expanding arguments like --install-swift to the known argument
--install-swiftevolve. Remove them from args and add them to unknown_args
again. | def _process_disambiguation_arguments(args, unknown_args):
"""These arguments are only listed in the driver arguments to stop argparse
from auto expanding arguments like --install-swift to the known argument
--install-swiftevolve. Remove them from args and add them to unknown_args
again.
"""
if hasattr(args, 'impl_skip_test_swift'):
if args.impl_skip_test_swift:
unknown_args.append('--skip-test-swift')
del args.impl_skip_test_swift
if hasattr(args, 'impl_install_swift'):
if args.impl_install_swift:
unknown_args.append('--install-swift')
del args.impl_install_swift
return args, unknown_args |
Parses a given argument list with the given argparse.ArgumentParser.
Return a processed arguments object. Any unknown arguments are stored in
`build_script_impl_args` attribute as a list. Ignores '--' to be compatible
with old style argument list. | def parse_args(parser, args, namespace=None):
"""Parses a given argument list with the given argparse.ArgumentParser.
Return a processed arguments object. Any unknown arguments are stored in
`build_script_impl_args` attribute as a list. Ignores '--' to be compatible
with old style argument list.
"""
args = [arg for arg in args if arg != '--']
args, unknown_args = parser.parse_known_args(args, namespace)
args, unknown_args = _process_disambiguation_arguments(args, unknown_args)
args.build_script_impl_args = unknown_args
return args |
Check whether given argv are all known arguments for
`build-script-impl`.
Raises a ValueError if any invalid argument is found. Return nothing
otherwise. | def check_impl_args(build_script_impl, args):
"""Check whether given argv are all known arguments for
`build-script-impl`.
Raises a ValueError if any invalid argument is found. Return nothing
otherwise.
"""
pipe = subprocess.Popen(
[build_script_impl, '--check-args-only=1'] + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, err = pipe.communicate()
if pipe.returncode != 0:
raise ValueError(str(err.splitlines()[0].decode())) |
Decorator used to catch and rethrowing configparser's
DuplicateOptionError. | def _catch_duplicate_option_error(func):
"""Decorator used to catch and rethrowing configparser's
DuplicateOptionError.
"""
if not hasattr(configparser, 'DuplicateOptionError'):
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except configparser.DuplicateOptionError as e:
preset_name = _remove_prefix(e.section, _PRESET_PREFIX).strip()
raise DuplicateOptionError(preset_name, e.option)
return wrapper |
Decorator used to catch and rethrowing configparser's
DuplicateSectionError. | def _catch_duplicate_section_error(func):
"""Decorator used to catch and rethrowing configparser's
DuplicateSectionError.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except configparser.DuplicateSectionError as e:
preset_name = _remove_prefix(e.section, _PRESET_PREFIX).strip()
raise DuplicatePresetError(preset_name)
return wrapper |
Helper function that maps the given func over the iterables and then
creates a single flat iterable from the results. | def _flatmap(func, *iterables):
"""Helper function that maps the given func over the iterables and then
creates a single flat iterable from the results.
"""
return itertools.chain.from_iterable(map(func, *iterables)) |
Helper function used to convert an instance of pathlib.Path into a
unicode string. | def _convert_pathlib_path(path):
"""Helper function used to convert an instance of pathlib.Path into a
unicode string.
"""
if Path is None:
return path
if isinstance(path, Path):
return str(path)
return path |
Helper function used to decode the standard PIPE and STDOUT constants
into actual file objects. | def _get_stream_file(stream):
"""Helper function used to decode the standard PIPE and STDOUT constants
into actual file objects.
"""
if stream == PIPE:
return sys.stdout
if stream == STDOUT:
return sys.stdout
if stream == DEVNULL:
raise ValueError('DEVNULL should be replaced by now!')
return stream |
Helper function used to echo a given command to some stream. An optional
command prefix can be provided. | def _echo_command(command, stream, prefix=ECHO_PREFIX):
"""Helper function used to echo a given command to some stream. An optional
command prefix can be provided.
"""
if stream == DEVNULL:
return
stream = _get_stream_file(stream)
stream.write('{}{}\n'.format(prefix, quote(command)))
stream.flush() |
Normalizes a list of arguments containing one or more strings and
CommandWrapper instances into a one-dimensional list of strings. | def _normalize_args(args):
"""Normalizes a list of arguments containing one or more strings and
CommandWrapper instances into a one-dimensional list of strings.
"""
if isinstance(args, (str,)):
return shlex.split(args)
def normalize_arg(arg):
arg = _convert_pathlib_path(arg)
if isinstance(arg, (str,)):
return [str(arg)]
if isinstance(arg, AbstractWrapper):
return list(map(_convert_pathlib_path, arg.command))
raise ValueError('Invalid argument type: {}'.format(
type(arg).__name__))
if isinstance(args, AbstractWrapper):
return normalize_arg(args)
return list(_flatmap(normalize_arg, args)) |
Decorator used to uniformly normalize the input command of the
subprocess wrappers. | def _normalize_command(func):
"""Decorator used to uniformly normalize the input command of the
subprocess wrappers.
"""
@functools.wraps(func)
def wrapper(command, **kwargs):
if not isinstance(command, (str,)):
command = _normalize_args(command)
return func(command, **kwargs)
return wrapper |
Decorator used to add the 'echo' keyword-only argument that echos the
input command to whatever stdout the user passes (or sys.stdout if not
supplied). | def _add_echo_kwarg(func):
"""Decorator used to add the 'echo' keyword-only argument that echos the
input command to whatever stdout the user passes (or sys.stdout if not
supplied).
"""
@functools.wraps(func)
def wrapper(command, **kwargs):
if kwargs.pop('echo', False):
stdout = kwargs.get('stdout', sys.stdout)
_echo_command(command, stdout)
return func(command, **kwargs)
return wrapper |
Extension of the standard shutil.quote that handles both strings and
lists of strings. This mirrors how the subprocess package can handle
commands as both a standalone string or list of strings.
>>> quote('/Applications/App Store.app')
"'/Applications/App Store.app'"
>>> quote(['rm', '-rf', '~/Documents/My Homework'])
"rm -rf '~/Documents/My Homework'" | def quote(command):
"""Extension of the standard shutil.quote that handles both strings and
lists of strings. This mirrors how the subprocess package can handle
commands as both a standalone string or list of strings.
>>> quote('/Applications/App Store.app')
"'/Applications/App Store.app'"
>>> quote(['rm', '-rf', '~/Documents/My Homework'])
"rm -rf '~/Documents/My Homework'"
"""
if isinstance(command, (str,)):
return _quote(command)
if isinstance(command, collections.abc.Iterable):
return ' '.join([_quote(arg) for arg in _normalize_args(command)])
raise ValueError('Invalid command type: {}'.format(type(command).__name__)) |
Replace the current process with itself running with root permissions.
Prompt the user for their password to support this. | def rerun_as_root():
"""Replace the current process with itself running with root permissions.
Prompt the user for their password to support this.
"""
euid = os.geteuid()
if euid == 0:
return
args = ['sudo', sys.executable] + sys.argv + [os.environ]
os.execlpe('sudo', *args) |
Simple wrapper around subprocess.call which backports DEVNULL support
and adds support for the echo keyword-only argument. | def call(command, **kwargs):
"""Simple wrapper around subprocess.call which backports DEVNULL support
and adds support for the echo keyword-only argument.
"""
return subprocess.call(command, **kwargs) |
Simple wrapper around subprocess.check_call which backports DEVNULL
support and adds support for the echo keyword-only argument. | def check_call(command, **kwargs):
"""Simple wrapper around subprocess.check_call which backports DEVNULL
support and adds support for the echo keyword-only argument.
"""
return subprocess.check_call(command, **kwargs) |
Simple wrapper around subprocess.check_output which backports DEVNULL
support and adds support for the echo keyword-only argument.
Output is returned as a unicode string. | def check_output(command, **kwargs):
"""Simple wrapper around subprocess.check_output which backports DEVNULL
support and adds support for the echo keyword-only argument.
Output is returned as a unicode string.
"""
kwargs['encoding'] = 'utf-8'
output = subprocess.check_output(command, **kwargs)
return output |
Emulates the `cp` command to copy a file or directory.
| def copy(source, dest, echo=False):
"""Emulates the `cp` command to copy a file or directory.
"""
source = _convert_pathlib_path(source)
dest = _convert_pathlib_path(dest)
if os.path.isfile(source):
if echo:
_echo_command(['cp', source, dest], sys.stdout)
return shutil.copyfile(source, dest)
if os.path.isdir(source):
if echo:
_echo_command(['cp', '-R', source, dest], sys.stdout)
return shutil.copytree(source, dest) |
Emulates the `mkdir -p` command to recursively create directories for
the path given if it doesn't already exist. | def makedirs(path, echo=False):
"""Emulates the `mkdir -p` command to recursively create directories for
the path given if it doesn't already exist.
"""
path = _convert_pathlib_path(path)
if os.path.exists(path):
return
if echo:
_echo_command(['mkdir', '-p', path], sys.stdout)
os.makedirs(path) |
Emulates the `mv` command to move files or directories.
| def move(source, dest, echo=False):
"""Emulates the `mv` command to move files or directories.
"""
source = _convert_pathlib_path(source)
dest = _convert_pathlib_path(dest)
if echo:
_echo_command(['mv', source, dest], sys.stdout)
return shutil.move(source, dest) |
Emulates the `rm` command for both files and directories.
| def remove(path, echo=False):
"""Emulates the `rm` command for both files and directories.
"""
path = _convert_pathlib_path(path)
if os.path.isfile(path):
if echo:
_echo_command(['rm', path], sys.stdout)
return os.remove(path)
if os.path.isdir(path):
if echo:
_echo_command(['rm', '-rf', path], sys.stdout)
return shutil.rmtree(path, ignore_errors=True) |
Emulates the `ln` command to symlink a file or directory.
| def symlink(source, dest, echo=False):
"""Emulates the `ln` command to symlink a file or directory.
"""
source = _convert_pathlib_path(source)
dest = _convert_pathlib_path(dest)
if echo:
_echo_command(['ln', '-s', source, dest], sys.stdout)
return os.symlink(source, dest) |
Polyfill for the Python 3 shutil.which function. Does not support
Windows platforms. | def which(command, mode=os.F_OK | os.X_OK, path=None):
"""Polyfill for the Python 3 shutil.which function. Does not support
Windows platforms.
"""
# Default to environment PATH or os.defpath
path = path or os.environ.get('PATH', os.defpath)
for location in path.split(os.pathsep):
# If command is a full path then candidate will be just command
candidate = os.path.join(location, command)
if os.path.isfile(candidate) and os.access(candidate, mode):
return candidate
return None |
Simple utility function to instantiate a CommandWrapper instance in a
more fluent way. | def wraps(command):
"""Simple utility function to instantiate a CommandWrapper instance in a
more fluent way.
"""
return CommandWrapper(command) |
Classifies a component into one of the registered component types.
| def _get_component_type(component):
"""Classifies a component into one of the registered component types.
"""
if len(component) <= 0:
raise ValueError('Empty component')
if component == '.':
return _ComponentType.DOT
if component.isdigit():
return _ComponentType.NUMERIC
if component.isalpha():
if component.isupper():
return _ComponentType.ALPHA_UPPER
elif component.islower():
return _ComponentType.ALPHA_LOWER
else:
raise ValueError('Unknown component type for {!r}'.format(
component))
return _ComponentType.OTHER |
Attempts to cast an object to a class, returning the resulting casted
object or the original object if the cast raises a ValueError. | def _try_cast(obj, cls):
"""Attempts to cast an object to a class, returning the resulting casted
object or the original object if the cast raises a ValueError.
"""
try:
return cls(obj)
except ValueError:
return obj |
Splits a version string into a tuple of components using similar rules
to distutils.version.LooseVersion. All version strings are valid, but the
outcome will only split on boundaries between:
* lowercase alpha characters
* uppercase alpha characters
* numeric characters
* the literal '.' (dot) character
All other characters are grouped into an "other" category.
Numeric components are converted into integers in the resulting tuple.
An empty tuple is returned for the empty string.
```
>>> _split_version('1000.2.108')
(1000, 2, 28)
>>> _split_version('10A23b')
(10, 'A', 23, 'b')
>>> _split_version('10.23-beta4')
(10, 23, '-', 'beta', 4)
>>> _split_version('FOObarBAZqux')
('FOO', 'bar', 'BAZ', 'qux')
``` | def _split_version(version):
"""Splits a version string into a tuple of components using similar rules
to distutils.version.LooseVersion. All version strings are valid, but the
outcome will only split on boundaries between:
* lowercase alpha characters
* uppercase alpha characters
* numeric characters
* the literal '.' (dot) character
All other characters are grouped into an "other" category.
Numeric components are converted into integers in the resulting tuple.
An empty tuple is returned for the empty string.
```
>>> _split_version('1000.2.108')
(1000, 2, 28)
>>> _split_version('10A23b')
(10, 'A', 23, 'b')
>>> _split_version('10.23-beta4')
(10, 23, '-', 'beta', 4)
>>> _split_version('FOObarBAZqux')
('FOO', 'bar', 'BAZ', 'qux')
```
"""
if len(version) < 1:
return tuple()
components = []
part = version[0]
part_type = _get_component_type(part)
for char in version[1:]:
char_type = _get_component_type(char)
if part_type == char_type:
part += char
else:
components.append(part)
part = char
part_type = char_type
# Add last part
components.append(part)
# Remove '.' groups and try casting components to ints
components = (_try_cast(c, int) for c in components if c != '.')
return tuple(components) |
Helper function for implementing __repr__ methods on *Type classes.
| def _repr(cls, args):
"""Helper function for implementing __repr__ methods on *Type classes.
"""
_args = []
for key, value in args.items():
_args.append('{}={}'.format(key, repr(value)))
return '{}({})'.format(type(cls).__name__, ', '.join(_args)) |
Decorator used to catch exceptions and return None.
| def _catch_return_none(exceptions):
"""Decorator used to catch exceptions and return None.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
return None
return wrapper
return decorator |
Method decorator used to prepend the sdk and toolchain arguments to the
final command passed to xcrun. | def _prepend_sdk_and_toolchain(func):
"""Method decorator used to prepend the sdk and toolchain arguments to the
final command passed to xcrun.
"""
@functools.wraps(func)
def wrapper(self, args, sdk=None, toolchain=None, **kwargs):
if isinstance(args, (str,)):
args = shlex.split(args)
if toolchain:
args = ['--toolchain', toolchain] + args
if sdk:
args = ['--sdk', sdk] + args
return func(self, args, **kwargs)
return wrapper |
Decorator used to skip tests if an object does not have the required
attribute. | def requires_attr(obj, attr):
"""Decorator used to skip tests if an object does not have the required
attribute.
"""
try:
getattr(obj, attr)
return lambda func: func
except AttributeError:
return unittest.skip('Required attribute "{}" not found on {}'.format(
attr, obj)) |
Decorator used to skip tests if a module is not imported.
| def requires_module(fullname):
"""Decorator used to skip tests if a module is not imported.
"""
if _can_import(fullname):
return lambda func: func
return unittest.skip('Unable to import "{}"'.format(fullname)) |
Decorator used to skip tests if not running on the given platform.
| def requires_platform(name):
"""Decorator used to skip tests if not running on the given platform.
"""
if name == platform.system():
return lambda func: func
return unittest.skip(
'Required platform "{}" does not match system'.format(name)) |
Decorator used to skip tests if the running Python version is not
greater or equal to the required version. | def requires_python(version):
"""Decorator used to skip tests if the running Python version is not
greater or equal to the required version.
"""
if isinstance(version, str):
version = Version(version)
if _PYTHON_VERSION >= version:
return lambda func: func
return unittest.skip(
'Requires Python version {} or greater'.format(version)) |
Finds all stats-profiles in path, returning list of JobProfs objects | def list_stats_dir_profiles(path, select_module=[], select_stat=[], **kwargs):
"""Finds all stats-profiles in path, returning list of JobProfs objects"""
jobprofs = []
for root, dirs, files in os.walk(path):
for d in dirs:
mg = match_profilepat(d)
if not mg:
continue
# NB: "pid" in fpat is a random number, not unix pid.
jobkind = mg['kind']
jobid = int(mg['pid'])
module = mg["module"]
if len(select_module) != 0 and module not in select_module:
continue
jobargs = [mg["input"], mg["triple"], mg["out"], mg["opt"]]
e = JobProfs(jobkind=jobkind, jobid=jobid,
module=module, jobargs=jobargs,
profiles=find_profiles_in(os.path.join(root, d),
select_stat))
jobprofs.append(e)
return jobprofs |
Loads all stats-files found in path into a list of JobStats objects | def load_stats_dir(path, select_module=[], select_stat=[],
exclude_timers=False, merge_timers=False, **kwargs):
"""Loads all stats-files found in path into a list of JobStats objects"""
jobstats = []
sre = re.compile('.*' if len(select_stat) == 0 else
'|'.join(select_stat))
for root, dirs, files in os.walk(path):
for f in files:
mg = match_filepat(f)
if not mg:
continue
# NB: "pid" in fpat is a random number, not unix pid.
jobkind = mg['kind']
jobid = int(mg['pid'])
start_usec = int(mg['start'])
module = mg["module"]
if len(select_module) != 0 and module not in select_module:
continue
jobargs = [mg["input"], mg["triple"], mg["out"], mg["opt"]]
if platform.system() == 'Windows':
p = str(u"\\\\?\\%s" % os.path.abspath(os.path.join(root, f)))
else:
p = os.path.join(root, f)
with open(p) as fp:
j = json.load(fp)
dur_usec = 1
stats = dict()
for (k, v) in j.items():
if sre.search(k) is None:
continue
if k.startswith('time.'):
v = int(1000000.0 * float(v))
if k.startswith('time.') and exclude_timers:
continue
tm = match_timerpat(k)
if tm:
if tm['jobkind'] == jobkind and \
tm['timerkind'] == 'wall':
dur_usec = v
if merge_timers:
k = "time.swift-%s.%s" % (tm['jobkind'],
tm['timerkind'])
stats[k] = v
e = JobStats(jobkind=jobkind, jobid=jobid,
module=module, start_usec=start_usec,
dur_usec=dur_usec, jobargs=jobargs,
stats=stats)
jobstats.append(e)
return jobstats |
Does a pairwise merge of the elements of list of jobs | def merge_all_jobstats(jobstats, select_module=[], group_by_module=False,
merge_by="sum", divide_by=1, **kwargs):
"""Does a pairwise merge of the elements of list of jobs"""
m = None
if len(select_module) > 0:
jobstats = filter(lambda j: j.module in select_module, jobstats)
if group_by_module:
def keyfunc(j):
return j.module
jobstats = list(jobstats)
jobstats.sort(key=keyfunc)
prefixed = []
for mod, group in itertools.groupby(jobstats, keyfunc):
groupmerge = merge_all_jobstats(group, merge_by=merge_by,
divide_by=divide_by)
prefixed.append(groupmerge.prefixed_by(mod))
jobstats = prefixed
for j in jobstats:
if m is None:
m = j
else:
m = m.merged_with(j, merge_by=merge_by)
if m is None:
return m
return m.divided_by(divide_by) |
Strip non-essential character sequences from a string.
>>> unwrap('(String) value = "42\\n"')
'42'
>>> unwrap('(Int) $R0 = 42')
'42'
>>> unwrap('\\"foo\"')
'foo'
>>> unwrap('foo\nbar')
'foo\nbar'
>>> unwrap(' foo ')
'foo' | def unwrap(s):
'''
Strip non-essential character sequences from a string.
>>> unwrap('(String) value = "42\\\\n"')
'42'
>>> unwrap('(Int) $R0 = 42')
'42'
>>> unwrap('\\\\"foo\\"')
'foo'
>>> unwrap('foo\\nbar')
'foo\\nbar'
>>> unwrap(' foo ')
'foo'
'''
s = s[s.find('=') + 1:]
s = s.lstrip(' "')
s = s.rstrip('"')
if s.endswith('\\n'):
s = s[:-2]
if s.endswith('\\"'):
s = s[:-2]
if s.startswith('\\"'):
s = s[2:]
s = s.replace('\\n', '\n')
s = s.strip()
return s |
This function disassembles the current assembly frame into a temporary file
and then uses that temporary file as input to blockifyasm | viewcfg. This
will cause a pdf of the cfg to be opened on Darwin. | def disassemble_asm_cfg(debugger, command, exec_ctx, result, internal_dict):
"""
This function disassembles the current assembly frame into a temporary file
and then uses that temporary file as input to blockifyasm | viewcfg. This
will cause a pdf of the cfg to be opened on Darwin.
"""
d = exec_ctx.frame.Disassemble()
with tempfile.TemporaryFile() as f:
f.write(bytes(d, 'utf-8'))
f.flush()
f.seek(0)
p1 = subprocess.Popen([BLOCKIFYASM_PATH], stdin=f,
stdout=subprocess.PIPE)
subprocess.Popen([VIEWCFG_PATH], stdin=p1.stdout)
p1.stdout.close() |
This function disassembles the current assembly frame into a file specified
by the user. | def disassemble_to_file(debugger, command, exec_ctx, result, internal_dict):
"""This function disassembles the current assembly frame into a file specified
by the user.
"""
parser = argparse.ArgumentParser(prog='disassemble-to-file', description="""
Dump the disassembly of the current frame or specified function to the
specified file.
""")
parser.add_argument('file', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-n', dest='func_name', help="""
Function name to disassembly. Frame used if unset.""")
args = parser.parse_args(shlex.split(command))
if args.func_name is None:
args.file.write(exec_ctx.frame.disassembly)
else:
name = args.func_name
result = exec_ctx.target.FindFunctions(name)
if result is None:
raise RuntimeError('No function with name: {}'.format(name))
if len(result) > 1:
errorStr = 'Matched multiple functions to name: {}'
raise RuntimeError(errorStr.format(name))
f = result[0].GetFunction()
args.file.write(str(f.GetInstructions(exec_ctx.target)) + "\n") |
Combine multiple semicolon separated lldb commands into one command.
This command is particularly useful for defining aliases and breakpoint
commands. Some examples:
# Define an alias that prints rax and also steps one instruction.
command alias xs sequence p/x $rax; stepi
# Breakpoint command to show the frame's info and arguments.
breakpoint command add -o 'seq frame info; reg read arg1 arg2 arg3'
# Override `b` to allow a condition to be specified. For example:
# b someMethod if someVar > 2
command regex b
s/(.+) if (.+)/seq _regexp-break %1; break mod -c "%2"/
s/(.*)/_regexp-break %1/ | def sequence(debugger, command, exec_ctx, result, internal_dict):
"""
Combine multiple semicolon separated lldb commands into one command.
This command is particularly useful for defining aliases and breakpoint
commands. Some examples:
# Define an alias that prints rax and also steps one instruction.
command alias xs sequence p/x $rax; stepi
# Breakpoint command to show the frame's info and arguments.
breakpoint command add -o 'seq frame info; reg read arg1 arg2 arg3'
# Override `b` to allow a condition to be specified. For example:
# b someMethod if someVar > 2
command regex b
s/(.+) if (.+)/seq _regexp-break %1; break mod -c "%2"/
s/(.*)/_regexp-break %1/
"""
interpreter = debugger.GetCommandInterpreter()
for subcommand in command.split(';'):
subcommand = subcommand.strip()
if not subcommand:
continue # skip empty commands
ret = lldb.SBCommandReturnObject()
interpreter.HandleCommand(subcommand, exec_ctx, ret)
if ret.GetOutput():
print(ret.GetOutput().strip(), file=result)
if not ret.Succeeded():
result.SetError(ret.GetError())
result.SetStatus(ret.GetStatus())
return |
For a given a subset input_input_product_classes of
all_input_product_classes, compute a topological ordering of the
input_input_product_classes + topological closures that respects the
dependency graph. | def produce_scheduled_build(input_product_classes):
"""For a given a subset input_input_product_classes of
all_input_product_classes, compute a topological ordering of the
input_input_product_classes + topological closures that respects the
dependency graph.
"""
dag = BuildDAG()
worklist = list(input_product_classes)
visited = set(input_product_classes)
# Construct the DAG.
while len(worklist) > 0:
entry = worklist.pop()
deps = entry.get_dependencies()
if len(deps) == 0:
dag.set_root(entry)
for d in deps:
dag.add_edge(d, entry)
if d not in visited:
worklist.append(d)
visited = visited.union(deps)
# Then produce the schedule.
schedule = dag.produce_schedule()
# Finally check that all of our input_product_classes are in the schedule.
if len(set(input_product_classes) - set(schedule[0])) != 0:
raise RuntimeError('Found disconnected graph?!')
return schedule |
Raises a SystemExit error with the given message.
| def _fatal_error(message):
"""Raises a SystemExit error with the given message.
"""
raise SystemExit('ERROR: {}\n'.format(message)) |
quote_command(args) -> str
Quote the command for passing to a shell. | def quote_command(args):
"""
quote_command(args) -> str
Quote the command for passing to a shell.
"""
return ' '.join([_quote(a) for a in args]) |
call(command, ...) -> str
Execute the given command.
This function will raise an exception on any command failure. | def call(command, stderr=None, env=None, dry_run=None, echo=True):
"""
call(command, ...) -> str
Execute the given command.
This function will raise an exception on any command failure.
"""
dry_run = _coerce_dry_run(dry_run)
if dry_run or echo:
_echo_command(dry_run, command, env=env)
if dry_run:
return
_env = None
if env is not None:
_env = dict(os.environ)
_env.update(env)
try:
subprocess.check_call(command, env=_env, stderr=stderr)
except subprocess.CalledProcessError as e:
_fatal_error(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
except OSError as e:
_fatal_error(
"could not execute '" + quote_command(command) +
"': " + e.strerror) |
Execute a command during which system sleep is disabled.
By default, this ignores the state of the `shell.dry_run` flag. | def call_without_sleeping(command, env=None, dry_run=False, echo=False):
"""
Execute a command during which system sleep is disabled.
By default, this ignores the state of the `shell.dry_run` flag.
"""
# Disable system sleep, if possible.
if platform.system() == 'Darwin':
# Don't mutate the caller's copy of the arguments.
command = ["caffeinate"] + list(command)
call(command, env=env, dry_run=dry_run, echo=echo) |
capture(command, ...) -> str
Execute the given command and return the standard output.
This function will raise an exception on any command failure. | def capture(command, stderr=None, env=None, dry_run=None, echo=True,
optional=False, allow_non_zero_exit=False):
"""
capture(command, ...) -> str
Execute the given command and return the standard output.
This function will raise an exception on any command failure.
"""
dry_run = _coerce_dry_run(dry_run)
if dry_run or echo:
_echo_command(dry_run, command, env=env)
if dry_run:
return
_env = None
if env is not None:
_env = dict(os.environ)
_env.update(env)
try:
return subprocess.check_output(command, env=_env, stderr=stderr,
universal_newlines=True)
except subprocess.CalledProcessError as e:
if allow_non_zero_exit:
return e.output
if optional:
return None
_fatal_error(
"command terminated with a non-zero exit status " +
str(e.returncode) + ", aborting")
except OSError as e:
if optional:
return None
_fatal_error(
"could not execute '" + quote_command(command) +
"': " + e.strerror) |
Returns the default path at which built Swift products (like bin, lib,
and include) will be installed, based on the host machine's operating
system. | def install_prefix():
"""
Returns the default path at which built Swift products (like bin, lib,
and include) will be installed, based on the host machine's operating
system.
"""
if platform.system() == 'Darwin':
return '/Applications/Xcode.app/Contents/Developer/Toolchains/' + \
'XcodeDefault.xctoolchain/usr'
else:
return '/usr' |
Given the install prefix for a Darwin system, and assuming that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory. | def darwin_toolchain_prefix(darwin_install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory.
"""
return os.path.split(darwin_install_prefix)[0] |
Given the install prefix for a Darwin system, and assuming that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory in the given install directory.
This toolchain is being populated during the build-script invocation.
Downstream products can use products that were previously installed into
this toolchain. | def toolchain_path(install_destdir, install_prefix):
"""
Given the install prefix for a Darwin system, and assuming that path
is to a .xctoolchain directory, return the path to the .xctoolchain
directory in the given install directory.
This toolchain is being populated during the build-script invocation.
Downstream products can use products that were previously installed into
this toolchain.
"""
built_toolchain_path = install_destdir
if platform.system() == 'Darwin':
# The prefix is an absolute path, so concatenate without os.path.
built_toolchain_path += darwin_toolchain_prefix(install_prefix) + "/usr"
else:
built_toolchain_path += install_prefix
return built_toolchain_path |
Writes a message to the given stream and exits. By default this
function outputs to stderr. | def fatal_error(message, stream=sys.stderr):
"""Writes a message to the given stream and exits. By default this
function outputs to stderr.
"""
stream.write('[{}] ERROR: {}\n'.format(sys.argv[0], message))
stream.flush()
sys.exit(1) |
Analyze .build_script_log and provide a summary of the time execution. | def log_analyzer():
"""
Analyze .build_script_log and provide a summary of the time execution.
"""
build_script_log_path = log_time_path()
print("--- Build Script Analyzer ---", file=sys.stderr)
build_events = []
total_duration = 0
if os.path.exists(build_script_log_path):
print("Build Script Log: {}".format(build_script_log_path), file=sys.stderr)
with open(build_script_log_path) as f:
for event in f:
build_event = json.loads(event)
build_event["duration"] = float(build_event["duration"])
total_duration += build_event["duration"]
build_events.append(build_event)
finish_events = [x for x in build_events if x["event"] == "end"]
finish_events.sort(key=lambda x: x["duration"], reverse=True)
print("Build Percentage \t Build Duration (sec) \t Build Phase",
file=sys.stderr)
print("================ \t ==================== \t ===========",
file=sys.stderr)
event_row = '{:<17.1%} \t {:<21} \t {}'
for build_event in finish_events:
duration_percentage = \
(float(build_event["duration"]) / float(total_duration))
print(event_row.format(duration_percentage,
build_event["duration"],
build_event["command"]), file=sys.stderr)
hours, remainder = divmod(total_duration, 3600)
minutes, seconds = divmod(remainder, 60)
if hours > 0:
formatted_duration = " ({}h {}m {}s)".format(
int(hours), int(minutes), int(seconds))
elif minutes > 0:
formatted_duration = " ({}m {}s)".format(int(minutes), int(seconds))
else:
formatted_duration = ""
print("Total Duration: {:.2f} seconds".format(
total_duration) + formatted_duration, file=sys.stderr)
else:
print("Skip build script analyzer", file=sys.stderr)
print(".build_script_log file not found at {}".format(build_script_log_path),
file=sys.stderr) |
This allows under Linux to relocate the default location
of the module cache -- this can be useful when there are
are lot of invocations to touch or when some invocations
can't be easily configured (as is the case for Swift
compiler detection in CMake) | def relocate_xdg_cache_home_under(new_cache_location):
"""
This allows under Linux to relocate the default location
of the module cache -- this can be useful when there are
are lot of invocations to touch or when some invocations
can't be easily configured (as is the case for Swift
compiler detection in CMake)
"""
os.environ['XDG_CACHE_HOME'] = new_cache_location |
Function used to run a given closure in parallel.
NOTE: This function was originally located in the shell module of
swift_build_support and should eventually be replaced with a better
parallel implementation. | def run_parallel(fn, pool_args, n_processes=0):
"""Function used to run a given closure in parallel.
NOTE: This function was originally located in the shell module of
swift_build_support and should eventually be replaced with a better
parallel implementation.
"""
if n_processes == 0:
n_processes = cpu_count() * 2
lk = Lock()
print("Running ``%s`` with up to %d processes." %
(fn.__name__, n_processes))
pool = Pool(processes=n_processes, initializer=child_init, initargs=(lk,))
results = pool.map_async(func=fn, iterable=pool_args).get(999999)
pool.close()
pool.join()
return results |
Function used to check the results of run_parallel.
NOTE: This function was originally located in the shell module of
swift_build_support and should eventually be replaced with a better
parallel implementation. | def check_parallel_results(results, op):
"""Function used to check the results of run_parallel.
NOTE: This function was originally located in the shell module of
swift_build_support and should eventually be replaced with a better
parallel implementation.
"""
fail_count = 0
if results is None:
return 0
for r in results:
if r is not None:
if fail_count == 0:
print("======%s FAILURES======" % op)
print("%s failed (ret=%d): %s" % (r.repo_path, r.ret, r))
fail_count += 1
if r.stderr:
print(r.stderr)
return fail_count |
Confirm that a given tag exists in a git repository. This function
assumes that the repository is already a current working directory before
it's called.
Args:
tag (str): tag to look up in the repository
repo_name (str): name the repository for the look up, used for logging
Returns:
str | None: returns `tag` argument value or `None` if the tag doesn't
exist. | def confirm_tag_in_repo(tag, repo_name):
# type: (str, str) -> str | None
"""Confirm that a given tag exists in a git repository. This function
assumes that the repository is already a current working directory before
it's called.
Args:
tag (str): tag to look up in the repository
repo_name (str): name the repository for the look up, used for logging
Returns:
str | None: returns `tag` argument value or `None` if the tag doesn't
exist.
"""
tag_exists = shell.capture(['git', 'ls-remote', '--tags',
'origin', tag], echo=False)
if not tag_exists:
print("Tag '" + tag + "' does not exist for '" +
repo_name + "', just updating regularly")
tag = None
return tag |
Infer, fetch, and return a branch corresponding to a given PR, otherwise
return a branch found in the config for this repository name.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
repo_name (str): name of the repository for checking out the branch
scheme_name (str): name of the scheme to look up in the config
scheme_map (Dict[str, str]): map of repo names to branches to check out
cross_repos_pr (Dict[str, str]): map of repo ids to PRs to check out
Returns:
Tuple[str, bool]: a pair of a checked out branch and a boolean
indicating whether this repo matched any `cross_repos_pr`. | def get_branch_for_repo(config, repo_name, scheme_name, scheme_map,
cross_repos_pr):
"""Infer, fetch, and return a branch corresponding to a given PR, otherwise
return a branch found in the config for this repository name.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
repo_name (str): name of the repository for checking out the branch
scheme_name (str): name of the scheme to look up in the config
scheme_map (Dict[str, str]): map of repo names to branches to check out
cross_repos_pr (Dict[str, str]): map of repo ids to PRs to check out
Returns:
Tuple[str, bool]: a pair of a checked out branch and a boolean
indicating whether this repo matched any `cross_repos_pr`.
"""
cross_repo = False
repo_branch = scheme_name
if scheme_map:
scheme_branch = scheme_map[repo_name]
repo_branch = scheme_branch
remote_repo_id = config['repos'][repo_name]['remote']['id']
if remote_repo_id in cross_repos_pr:
cross_repo = True
pr_id = cross_repos_pr[remote_repo_id]
repo_branch = "ci_pr_{0}".format(pr_id)
shell.run(["git", "checkout", scheme_branch],
echo=True)
shell.capture(["git", "branch", "-D", repo_branch],
echo=True, allow_non_zero_exit=True)
shell.run(["git", "fetch", "origin",
"pull/{0}/merge:{1}"
.format(pr_id, repo_branch), "--tags"], echo=True)
return repo_branch, cross_repo |
Computes a timestamp of the last commit on the current branch in
the `swift` repository.
Args:
match_timestamp (str | None): value of `--match-timestamp` to check.
source_root (str): directory that contains sources of the Swift project.
Returns:
str | None: a timestamp of the last commit of `swift` repository if
`match_timestamp` argument has a value, `None` if `match_timestamp` is
falsy. | def get_timestamp_to_match(match_timestamp, source_root):
# type: (str | None, str) -> str | None
"""Computes a timestamp of the last commit on the current branch in
the `swift` repository.
Args:
match_timestamp (str | None): value of `--match-timestamp` to check.
source_root (str): directory that contains sources of the Swift project.
Returns:
str | None: a timestamp of the last commit of `swift` repository if
`match_timestamp` argument has a value, `None` if `match_timestamp` is
falsy.
"""
if not match_timestamp:
return None
with shell.pushd(os.path.join(source_root, "swift"),
dry_run=False, echo=False):
return shell.capture(["git", "log", "-1", "--format=%cI"],
echo=False).strip() |
Find a mapping from repository IDs to branches in the config.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
scheme_name (str): name of the scheme to look up in `config`
Returns:
Dict[str, str]: a mapping from repos to branches for the given scheme. | def get_scheme_map(config, scheme_name):
"""Find a mapping from repository IDs to branches in the config.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
scheme_name (str): name of the scheme to look up in `config`
Returns:
Dict[str, str]: a mapping from repos to branches for the given scheme.
"""
if scheme_name:
# This loop is only correct, since we know that each alias set has
# unique contents. This is checked by validate_config. Thus the first
# branch scheme data that has scheme_name as one of its aliases is
# the only possible correct answer.
for v in config['branch-schemes'].values():
if scheme_name in v['aliases']:
return v['repos']
return None |
Dumps the current state of the repo into a new config file that contains a
main branch scheme with the relevant branches set to the appropriate
hashes. | def dump_repo_hashes(args, config, branch_scheme_name='repro'):
"""
Dumps the current state of the repo into a new config file that contains a
main branch scheme with the relevant branches set to the appropriate
hashes.
"""
new_config = {}
config_copy_keys = ['ssh-clone-pattern', 'https-clone-pattern', 'repos']
for config_copy_key in config_copy_keys:
new_config[config_copy_key] = config[config_copy_key]
repos = {}
repos = repo_hashes(args, config)
branch_scheme = {'aliases': [branch_scheme_name], 'repos': repos}
new_config['branch-schemes'] = {branch_scheme_name: branch_scheme}
json.dump(new_config, sys.stdout, indent=4) |
Computes a list of repositories to skip when updating or cloning, if not
overriden by `--all-repositories` CLI argument.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
all_repos (List[str]): repositories not required for current platform.
Returns:
List[str]: a resulting list of repositories to skip or empty list if
`all_repos` is not empty. | def skip_list_for_platform(config, all_repos):
"""Computes a list of repositories to skip when updating or cloning, if not
overriden by `--all-repositories` CLI argument.
Args:
config (Dict[str, Any]): deserialized `update-checkout-config.json`
all_repos (List[str]): repositories not required for current platform.
Returns:
List[str]: a resulting list of repositories to skip or empty list if
`all_repos` is not empty.
"""
if all_repos:
return [] # Do not skip any platform-specific repositories
# If there is a platforms key only include the repo if the
# platform is in the list
skip_list = []
platform_name = platform.system()
for repo_name, repo_info in config['repos'].items():
if 'platforms' in repo_info:
if platform_name not in repo_info['platforms']:
print("Skipping", repo_name, "on", platform_name)
skip_list.append(repo_name)
else:
print("Including", repo_name, "on", platform_name)
return skip_list |
Returns a valid kernel code name (like `swift-for-tensorflow`)
from a kernel display name (like `Swift for TensorFlow`). | def get_kernel_code_name(kernel_name):
"""
Returns a valid kernel code name (like `swift-for-tensorflow`)
from a kernel display name (like `Swift for TensorFlow`).
"""
kernel_code_name = kernel_name.lower().replace(" ", kernel_code_name_allowed_chars[0])
kernel_code_name = "".join(list(filter(lambda x: x.isalnum() or x in kernel_code_name_allowed_chars, kernel_code_name)))
return kernel_code_name |
Returns environment variables that tell the kernel where things are. | def make_kernel_env(args):
"""Returns environment variables that tell the kernel where things are."""
kernel_env = {}
if args.swift_toolchain is not None:
# Use a prebuilt Swift toolchain.
if platform.system() == 'Linux':
kernel_env['PYTHONPATH'] = linux_pythonpath(args.swift_toolchain + '/usr')
kernel_env['LD_LIBRARY_PATH'] = '%s/usr/lib/swift/linux' % args.swift_toolchain
kernel_env['REPL_SWIFT_PATH'] = '%s/usr/bin/repl_swift' % args.swift_toolchain
kernel_env['SWIFT_BUILD_PATH'] = '%s/usr/bin/swift-build' % args.swift_toolchain
kernel_env['SWIFT_PACKAGE_PATH'] = '%s/usr/bin/swift-package' % args.swift_toolchain
elif platform.system() == 'Darwin':
kernel_env['PYTHONPATH'] = '%s/System/Library/PrivateFrameworks/LLDB.framework/Resources/Python' % args.swift_toolchain
kernel_env['LD_LIBRARY_PATH'] = '%s/usr/lib/swift/macosx' % args.swift_toolchain
kernel_env['REPL_SWIFT_PATH'] = '%s/System/Library/PrivateFrameworks/LLDB.framework/Resources/repl_swift' % args.swift_toolchain
elif platform.system() == 'Windows':
kernel_env['PYTHONPATH'] = os.path.join('%s','usr','lib','site-packages') % args.swift_toolchain
kernel_env['LD_LIBRARY_PATH'] = os.path.join(os.path.dirname(os.path.dirname(args.swift_toolchain)),
'Platforms','Windows.platform','Developer','Library','XCTest-development',
'usr','lib','swift')
kernel_env['REPL_SWIFT_PATH'] = os.path.join('%s','usr','bin','repl_swift.exe') % args.swift_toolchain
else:
raise Exception('Unknown system %s' % platform.system())
elif args.swift_build is not None:
# Use a build dir created by build-script.
# TODO: Make this work on macos
if platform.system() != 'Linux':
raise Exception('build-script build dir only implemented on Linux')
swift_build_dir = '%s/swift-linux-x86_64' % args.swift_build
lldb_build_dir = '%s/lldb-linux-x86_64' % args.swift_build
kernel_env['PYTHONPATH'] = linux_pythonpath(lldb_build_dir)
kernel_env['LD_LIBRARY_PATH'] = '%s/lib/swift/linux' % swift_build_dir
kernel_env['REPL_SWIFT_PATH'] = '%s/bin/repl_swift' % lldb_build_dir
elif args.xcode_path is not None:
# Use an Xcode provided Swift toolchain.
if platform.system() != 'Darwin':
raise Exception('Xcode support is only available on Darwin')
lldb_framework = '%s/Contents/SharedFrameworks/LLDB.framework' % args.xcode_path
xcode_toolchain = '%s/Contents/Developer/Toolchains/XcodeDefault.xctoolchain' % args.xcode_path
kernel_env['PYTHONPATH'] = '%s/Resources/Python' % lldb_framework
kernel_env['REPL_SWIFT_PATH'] = '%s/Resources/repl_swift' % lldb_framework
kernel_env['LD_LIBRARY_PATH'] = '%s/usr/lib/swift/macosx' % xcode_toolchain
if args.swift_python_version is not None:
kernel_env['PYTHON_VERSION'] = args.swift_python_version
if args.swift_python_library is not None:
kernel_env['PYTHON_LIBRARY'] = args.swift_python_library
if args.swift_python_use_conda:
if platform.system() == 'Darwin':
libpython = glob(sys.prefix+'/lib/libpython*.dylib')[0]
elif platform.system() == 'Linux':
libpython = glob(sys.prefix+'/lib/libpython*.so')[0]
elif platform.system() == 'Windows':
libpython = glob(sys.prefix+'/python*.dll')[0]
else:
raise Exception('Unable to find libpython for system %s' % platform.system())
kernel_env['PYTHON_LIBRARY'] = libpython
if args.use_conda_shared_libs:
if platform.system() != 'Windows': # ':' is used after drive letter in Windows
kernel_env['LD_LIBRARY_PATH'] += ':' + sys.prefix + '/lib'
else:
kernel_env['LD_LIBRARY_PATH'] += ';' + os.path.join(sys.prefix, 'lib')
return kernel_env |
Validates that the env vars refer to things that actually exist. | def validate_kernel_env(kernel_env):
"""Validates that the env vars refer to things that actually exist."""
# TODO: if not /lldb/_lldb.*
if platform.system() == 'Windows':
if not os.path.isfile(kernel_env['PYTHONPATH'] + '/lldb/_lldb.pyd'):
raise Exception('lldb python libs not found at %s' %
kernel_env['PYTHONPATH'])
else:
if not os.path.isfile(kernel_env['PYTHONPATH'] + '/lldb/_lldb.so'):
raise Exception('lldb python libs not found at %s' %
kernel_env['PYTHONPATH'])
if not os.path.isfile(kernel_env['REPL_SWIFT_PATH']):
raise Exception('repl_swift binary not found at %s' %
kernel_env['REPL_SWIFT_PATH'])
if 'SWIFT_BUILD_PATH' in kernel_env and \
not os.path.isfile(kernel_env['SWIFT_BUILD_PATH']):
raise Exception('swift-build binary not found at %s' %
kernel_env['SWIFT_BUILD_PATH'])
if 'SWIFT_PACKAGE_PATH' in kernel_env and \
not os.path.isfile(kernel_env['SWIFT_PACKAGE_PATH']):
raise Exception('swift-package binary not found at %s' %
kernel_env['SWIFT_PACKAGE_PATH'])
if 'PYTHON_LIBRARY' in kernel_env and \
not os.path.isfile(kernel_env['PYTHON_LIBRARY']):
raise Exception('python library not found at %s' %
kernel_env['PYTHON_LIBRARY'])
lib_paths = kernel_env['LD_LIBRARY_PATH'].split(':') if platform.system() != 'Windows' else \
kernel_env['LD_LIBRARY_PATH'].split(';') # ':' proceeds after drive letter in Windows
for index, lib_path in enumerate(lib_paths):
if os.path.isdir(lib_path):
continue
# First LD_LIBRARY_PATH should contain the swift toolchain libs.
if index == 0:
raise Exception('swift libs not found at %s' % lib_path)
# Other LD_LIBRARY_PATHs may be appended for other libs.
raise Exception('shared lib dir not found at %s' % lib_path) |
Instantiates a CapturingSocket and SwiftShell and hooks them up.
After you call this, the returned CapturingSocket should capture all
IPython display messages. | def create_shell(username, session_id, key):
"""Instantiates a CapturingSocket and SwiftShell and hooks them up.
After you call this, the returned CapturingSocket should capture all
IPython display messages.
"""
socket = CapturingSocket()
session = Session(username=username, session=session_id, key=key)
shell = SwiftShell.instance()
shell.display_pub.session = session
shell.display_pub.pub_socket = socket
return (socket, shell) |
Return parsed command line arguments. | def parse_args():
"""Return parsed command line arguments."""
parser = argparse.ArgumentParser()
project.add_arguments(parser)
return parser.parse_args() |
Execute specified indexed project actions. | def main():
"""Execute specified indexed project actions."""
args = parse_args()
with open(args.projects) as projects:
index = json.loads(projects.read())
result = project.ProjectListBuilder(
args.include_repos,
args.exclude_repos,
args.verbose,
args.process_count,
project.ProjectBuilder.factory(
args.include_actions,
args.exclude_actions,
args.verbose,
project.ActionBuilder.factory(
args.swiftc,
args.swift_version,
args.swift_branch,
args.sandbox_profile_xcodebuild,
args.sandbox_profile_package,
args.add_swift_flags,
args.skip_clean,
args.build_config,
args.strip_resource_phases
),
),
index
).build()
common.debug_print(str(result))
return 0 if result.result in [project.ResultEnum.PASS,
project.ResultEnum.XFAIL] else 1 |
Return parsed command line arguments. | def parse_args():
"""Return parsed command line arguments."""
parser = argparse.ArgumentParser()
project.add_arguments(parser)
return parser.parse_args() |
Execute specified indexed project actions. | def main():
"""Execute specified indexed project actions."""
args = parse_args()
with open(args.projects) as projects:
index = json.loads(projects.read())
result = project.ProjectListBuilder(
args.include_repos,
args.exclude_repos,
args.verbose,
args.process_count,
project.ProjectBuilder.factory(
args.include_actions,
args.exclude_actions,
args.verbose,
project.IncrementalActionBuilder.factory(
args.swiftc,
args.swift_version,
args.swift_branch,
args.job_type,
args.sandbox_profile_xcodebuild,
args.sandbox_profile_package,
args.add_swift_flags,
args.build_config,
args.strip_resource_phases
),
),
index
).build()
common.debug_print(str(result))
return 0 if result.result in [project.ResultEnum.PASS,
project.ResultEnum.XFAIL] else 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.