response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Page through text by invoking a program on a temporary file.
def _tempfilepager(generator: cabc.Iterable[str], cmd: str, color: bool | None) -> None: """Page through text by invoking a program on a temporary file.""" import tempfile fd, filename = tempfile.mkstemp() # TODO: This never terminates if the passed generator never terminates. text = "".join(generator) if not color: text = strip_ansi(text) encoding = get_best_encoding(sys.stdout) with open_stream(filename, "wb")[0] as f: f.write(text.encode(encoding)) try: os.system(f'{cmd} "{filename}"') finally: os.close(fd) os.unlink(filename)
Simply print unformatted text. This is the ultimate fallback.
def _nullpager( stream: t.TextIO, generator: cabc.Iterable[str], color: bool | None ) -> None: """Simply print unformatted text. This is the ultimate fallback.""" for text in generator: if not color: text = strip_ansi(text) stream.write(text)
A Group can be built with a dict of commands.
def test_group_commands_dict(runner): """A Group can be built with a dict of commands.""" @click.command() def sub(): click.echo("sub", nl=False) cli = click.Group(commands={"other": sub}) result = runner.invoke(cli, ["other"]) assert result.output == "sub"
A Group can be built with a list of commands.
def test_group_from_list(runner): """A Group can be built with a list of commands.""" @click.command() def sub(): click.echo("sub", nl=False) cli = click.Group(commands=[sub]) result = runner.invoke(cli, ["sub"]) assert result.output == "sub"
When a group has ``invoke_without_command=True``, the result callback is always invoked. A regular group invokes it with its return value, a chained group with ``[]``.
def test_no_command_result_callback(runner, chain, expect): """When a group has ``invoke_without_command=True``, the result callback is always invoked. A regular group invokes it with its return value, a chained group with ``[]``. """ @click.group(invoke_without_command=True, chain=chain) def cli(): return 1 @cli.result_callback() def process_result(result): click.echo(result, nl=False) result = runner.invoke(cli, []) assert result.output == expect
Test to check that make_pass_decorator doesn't consume arguments based on invocation order.
def test_make_pass_decorator_args(runner): """ Test to check that make_pass_decorator doesn't consume arguments based on invocation order. """ class Foo: title = "foocmd" pass_foo = click.make_pass_decorator(Foo) @click.group() @click.pass_context def cli(ctx): ctx.obj = Foo() @cli.command() @click.pass_context @pass_foo def test1(foo, ctx): click.echo(foo.title) @cli.command() @pass_foo @click.pass_context def test2(ctx, foo): click.echo(foo.title) result = runner.invoke(cli, ["test1"]) assert not result.exception assert result.output == "foocmd\n" result = runner.invoke(cli, ["test2"]) assert not result.exception assert result.output == "foocmd\n"
A context's ``show_default`` setting defaults to the value from the parent context.
def test_propagate_show_default_setting(runner): """A context's ``show_default`` setting defaults to the value from the parent context. """ group = click.Group( commands={ "sub": click.Command("sub", params=[click.Option(["-a"], default="a")]), }, context_settings={"show_default": True}, ) result = runner.invoke(group, ["sub", "--help"]) assert "[default: a]" in result.output
A command with a custom ``context_class`` should produce a context using that type.
def test_command_context_class(): """A command with a custom ``context_class`` should produce a context using that type. """ class CustomContext(click.Context): pass class CustomCommand(click.Command): context_class = CustomContext command = CustomCommand("test") context = command.make_context("test", []) assert isinstance(context, CustomContext)
A command invoked from a custom context should have a new context with the same type.
def test_context_invoke_type(runner): """A command invoked from a custom context should have a new context with the same type. """ class CustomContext(click.Context): pass class CustomCommand(click.Command): context_class = CustomContext @click.command() @click.argument("first_id", type=int) @click.pass_context def second(ctx, first_id): assert isinstance(ctx, CustomContext) assert id(ctx) != first_id @click.command(cls=CustomCommand) @click.pass_context def first(ctx): assert isinstance(ctx, CustomContext) ctx.invoke(second, first_id=id(ctx)) assert not runner.invoke(first).exception
A context with a custom ``formatter_class`` should format help using that type.
def test_context_formatter_class(): """A context with a custom ``formatter_class`` should format help using that type. """ class CustomFormatter(click.HelpFormatter): def write_heading(self, heading): heading = click.style(heading, fg="yellow") return super().write_heading(heading) class CustomContext(click.Context): formatter_class = CustomFormatter context = CustomContext( click.Command("test", params=[click.Option(["--value"])]), color=True ) assert "\x1b[33mOptions\x1b[0m:" in context.get_help()
A group with a custom ``command_class`` should create subcommands of that type by default.
def test_group_command_class(runner): """A group with a custom ``command_class`` should create subcommands of that type by default. """ class CustomCommand(click.Command): pass class CustomGroup(click.Group): command_class = CustomCommand group = CustomGroup() subcommand = group.command()(lambda: None) assert type(subcommand) is CustomCommand subcommand = group.command(cls=click.Command)(lambda: None) assert type(subcommand) is click.Command
A group with a custom ``group_class`` should create subgroups of that type by default.
def test_group_group_class(runner): """A group with a custom ``group_class`` should create subgroups of that type by default. """ class CustomSubGroup(click.Group): pass class CustomGroup(click.Group): group_class = CustomSubGroup group = CustomGroup() subgroup = group.group()(lambda: None) assert type(subgroup) is CustomSubGroup subgroup = group.command(cls=click.Group)(lambda: None) assert type(subgroup) is click.Group
A group with ``group_class = type`` should create subgroups of the same type as itself.
def test_group_group_class_self(runner): """A group with ``group_class = type`` should create subgroups of the same type as itself. """ class CustomGroup(click.Group): group_class = type group = CustomGroup() subgroup = group.group()(lambda: None) assert type(subgroup) is CustomGroup
Default default for flags when multiple=True should be empty tuple.
def test_multiple_flag_default(runner): """Default default for flags when multiple=True should be empty tuple.""" @click.command # flag due to secondary token @click.option("-y/-n", multiple=True) # flag due to is_flag @click.option("-f", is_flag=True, multiple=True) # flag due to flag_value @click.option("-v", "v", flag_value=1, multiple=True) @click.option("-q", "v", flag_value=-1, multiple=True) def cli(y, f, v): return y, f, v result = runner.invoke(cli, standalone_mode=False) assert result.return_value == ((), (), ()) result = runner.invoke(cli, ["-y", "-n", "-f", "-v", "-q"], standalone_mode=False) assert result.return_value == ((True, False), (True,), (1, -1))
:author: @r-m-n Including attribution to #612
def test_formatting_usage_error_metavar_missing_arg(runner): """ :author: @r-m-n Including attribution to #612 """ @click.command() @click.argument("arg", metavar="metavar") def cmd(arg): pass result = runner.invoke(cmd, []) assert result.exit_code == 2 assert result.output.splitlines() == [ "Usage: cmd [OPTIONS] metavar", "Try 'cmd --help' for help.", "", "Error: Missing argument 'metavar'.", ]
A count option with the default type should not show >=0 in help.
def test_count_default_type_help(): """A count option with the default type should not show >=0 in help.""" option = click.Option(["--count"], count=True, help="some words") context = click.Context(click.Command("test")) result = option.get_help_record(context)[1] assert result == "some words"
The default for a File type is a filename string. The string should be displayed in help, not an open file object. Type casting is only applied to defaults in processing, not when getting the default value.
def test_file_type_help_default(): """The default for a File type is a filename string. The string should be displayed in help, not an open file object. Type casting is only applied to defaults in processing, not when getting the default value. """ option = click.Option( ["--in"], type=click.File(), default=__file__, show_default=True ) context = click.Context(click.Command("test")) result = option.get_help_record(context)[1] assert __file__ in result
Ensure we can reuse a custom class option. See Issue #926
def test_option_custom_class_reusable(runner): """Ensure we can reuse a custom class option. See Issue #926""" class CustomOption(click.Option): def get_help_record(self, ctx): """a dumb override of a help text for testing""" return ("--help", "I am a help text") # Assign to a variable to re-use the decorator. testoption = click.option("--testoption", cls=CustomOption, help="you wont see me") @click.command() @testoption def cmd1(testoption): click.echo(testoption) @click.command() @testoption def cmd2(testoption): click.echo(testoption) # Both of the commands should have the --help option now. for cmd in (cmd1, cmd2): result = runner.invoke(cmd, ["--help"]) assert "I am a help text" in result.output assert "you wont see me" not in result.output
When a boolean flag has distinct True/False opts, it should show the default opt name instead of the default value. It should only show one name even if multiple are declared.
def test_show_default_boolean_flag_name(runner, default, expect): """When a boolean flag has distinct True/False opts, it should show the default opt name instead of the default value. It should only show one name even if multiple are declared. """ opt = click.Option( ("--cache/--no-cache", "--c/--nc"), default=default, show_default=True, help="Enable/Disable the cache.", ) ctx = click.Context(click.Command("test")) message = opt.get_help_record(ctx)[1] assert f"[default: {expect}]" in message
When a boolean flag only has one opt and its default is True, it will show the default value, not the opt name.
def test_show_true_default_boolean_flag_value(runner): """When a boolean flag only has one opt and its default is True, it will show the default value, not the opt name. """ opt = click.Option( ("--cache",), is_flag=True, show_default=True, default=True, help="Enable the cache.", ) ctx = click.Context(click.Command("test")) message = opt.get_help_record(ctx)[1] assert "[default: True]" in message
When a boolean flag only has one opt and its default is False or None, it will not show the default
def test_hide_false_default_boolean_flag_value(runner, default): """When a boolean flag only has one opt and its default is False or None, it will not show the default """ opt = click.Option( ("--cache",), is_flag=True, show_default=True, default=default, help="Enable the cache.", ) ctx = click.Context(click.Command("test")) message = opt.get_help_record(ctx)[1] assert "[default: " not in message
When show_default is a string show that value as default.
def test_show_default_string(runner): """When show_default is a string show that value as default.""" opt = click.Option(["--limit"], show_default="unlimited") ctx = click.Context(click.Command("cli")) message = opt.get_help_record(ctx)[1] assert "[default: (unlimited)]" in message
When show_default is True and no default is set do not show None.
def test_do_not_show_no_default(runner): """When show_default is True and no default is set do not show None.""" opt = click.Option(["--limit"], show_default=True) ctx = click.Context(click.Command("cli")) message = opt.get_help_record(ctx)[1] assert "[default: None]" not in message
When show_default is True and multiple=True is set, it should not print empty default value in --help output.
def test_do_not_show_default_empty_multiple(): """When show_default is True and multiple=True is set, it should not print empty default value in --help output. """ opt = click.Option(["-a"], multiple=True, help="values", show_default=True) ctx = click.Context(click.Command("cli")) message = opt.get_help_record(ctx)[1] assert message == "values"
item_show_func should show the current item being yielded.
def test_progressbar_item_show_func(runner, monkeypatch): """item_show_func should show the current item being yielded.""" @click.command() def cli(): with click.progressbar(range(3), item_show_func=lambda x: str(x)) as progress: for item in progress: click.echo(f" item {item}") monkeypatch.setattr(click._termui_impl, "isatty", lambda _: True) lines = runner.invoke(cli).output.splitlines() for i, line in enumerate(x for x in lines if "item" in x): assert f"{i} item {i}" in line
Writing to stderr should escape invalid characters instead of raising a UnicodeEncodeError.
def test_isolation_stderr_errors(): """Writing to stderr should escape invalid characters instead of raising a UnicodeEncodeError. """ runner = CliRunner(mix_stderr=False) with runner.isolation() as (_, err): click.echo("\udce2", err=True, nl=False) assert err.getvalue() == b"\\udce2"
echo should not fail when stdout and stderr are None with pythonw on Windows.
def test_echo_no_streams(monkeypatch, runner): """echo should not fail when stdout and stderr are None with pythonw on Windows.""" with runner.isolation(): sys.stdout = None sys.stderr = None click.echo("test") click.echo("test", err=True)
Simple program that greets NAME for a total of COUNT times.
def hello() -> None: """Simple program that greets NAME for a total of COUNT times.""" click.echo("Hello!")
Simple program that greets NAME for a total of COUNT times.
def hello(count: int, name: str) -> None: """Simple program that greets NAME for a total of COUNT times.""" for _ in range(count): click.echo(f"Hello, {name}!")
Setup sub-parsers for the remote command.
def _setup_args_for_remote(parser): """Setup sub-parsers for the remote command.""" parser.add_argument( '-i', '--instance-name', required=True, help='The instance name (e.g. clusterfuzz-linux-0005).') parser.add_argument('--project', help='The Project ID.') parser.add_argument('--zone', help='The Project Zone.') subparsers = parser.add_subparsers(dest='remote') parser_tail = subparsers.add_parser( 'tail', help='Print the last `size` lines of log_name.') parser_tail.add_argument('log_name', help='The log file name (without .log).') parser_tail.add_argument( 'line_count', type=int, help='The number of lines to be showed.') parser_tailf = subparsers.add_parser( 'tailf', help=('Print the last lines of logs and wait for more. ' 'This is equivalent to tail -f.')) parser_tailf.add_argument( 'log_names', nargs='+', help='The log file names (without .log).') stage = subparsers.add_parser( 'stage', help=('Stage a zip file by' ' (1) Build a zip with `butler.py package`' ' (2) Send the zip to the instance,' ' (3) Unzip it to the clusterfuzz path, and' ' (4) Restart run_bot.py.')) stage.add_argument( '-c', '--config-dir', required=True, help='Path to application config.') parser_rdp = subparsers.add_parser( 'rdp', help=('Launch Remmina with correct configuration (e.g. IP address for the' ' instance).')) parser_rdp.add_argument( '--share-path', help=('The share path that is mounted on the remote instance.' 'It is convenient for sending files to the remote instance.')) subparsers.add_parser('restart', help='Restart a bot by killing run_bot.py.') subparsers.add_parser('reboot', help='Reboot with `sudo reboot`.')
Adds a parser for the `weights fuzzer` command.
def _add_weights_fuzzer_subparser(weights_subparsers): """Adds a parser for the `weights fuzzer` command.""" parser = weights_subparsers.add_parser( 'fuzzer', help='Interact with FuzzerJob weights.') subparsers = parser.add_subparsers(dest='fuzzer_command') subparsers.add_parser( 'platforms', help='List distinct platform field values.') list_parser = subparsers.add_parser('list', help='List FuzzerJob entries.') list_parser.add_argument( '-p', '--platforms', help='Which platforms to list entries for.', nargs='+') list_parser.add_argument( '-f', '--fuzzers', help='Which fuzzers to list entries for.', nargs='+') list_parser.add_argument( '-j', '--jobs', help='Which jobs to list entries for.', nargs='+') list_parser.add_argument( '--format', help='Output format.', choices=['text', 'csv'], default='text') aggregate_parser = subparsers.add_parser( 'aggregate', help='Aggregate matching FuzzerJob entries.') aggregate_parser.add_argument( '-p', '--platform', help='Which platform to query.', required=True) aggregate_parser.add_argument( '-f', '--fuzzers', help='Which fuzzers to aggregate.', nargs='+') aggregate_parser.add_argument( '-j', '--jobs', help='Which jobs to aggregate.', nargs='+')
Adds a parser for the `weights fuzzer-batch` command.
def _add_weights_batches_subparser(weights_subparsers): """Adds a parser for the `weights fuzzer-batch` command.""" parser = weights_subparsers.add_parser( 'fuzzer-batch', help='Interact with FuzzerJobs weights. FuzzerJobs database entries ' + 'consist of batches of FuzzerJob entries that share the same platform. ' + 'These are periodically generated from FuzzerJob entries. Bots read ' + 'from FuzzerJobs batches in production instead of reading directly ' + 'from FuzzerJob entries.') subparsers = parser.add_subparsers(dest='fuzzer_batch_command') list_parser = subparsers.add_parser('list', help='List FuzzerJobs entries.') list_parser.add_argument( '-p', '--platforms', help='Which platforms to list entries for.', nargs='+') list_parser.add_argument( '--format', help='Output format.', choices=['text', 'csv'], default='text')
Adds a parser for the `weights fuzz-target` command.
def _add_weights_target_subparser(weights_subparsers): """Adds a parser for the `weights fuzz-target` command.""" parser = weights_subparsers.add_parser( 'fuzz-target', help='Interact with FuzzTargetJob weights.') subparsers = parser.add_subparsers(dest='fuzz_target_command') list_parser = subparsers.add_parser( 'list', help='List FuzzTargetJob entries.') list_parser.add_argument( '-t', '--targets', help='Which fuzz target names to list entries for.', nargs='+') list_parser.add_argument( '-j', '--jobs', help='Which jobs to list entries for.', nargs='+') list_parser.add_argument( '-e', '--engines', help='Which engine to list entries for.', nargs='+') list_parser.add_argument( '--format', help='Output format.', choices=['text', 'csv'], default='text') set_parser = subparsers.add_parser( 'set', help='Set the weight of a FuzzTargetJob entry.') set_parser.add_argument( '-t', '--target', help='The fuzz_target_name field of the entry to modify.', required=True) set_parser.add_argument( '-j', '--job', help='The job field of the entry to modify.', required=True) set_parser.add_argument( '-w', '--weight', help='The new weight to set.', type=float, required=True)
Adds a parser for the `weights` command.
def _add_weights_subparser(toplevel_subparsers): """Adds a parser for the `weights` command.""" parser = toplevel_subparsers.add_parser( 'weights', help='Interact with fuzzer/job weights.') parser.add_argument( '-c', '--config-dir', required=True, help='Path to application config.') subparsers = parser.add_subparsers(dest='weights_command') _add_weights_fuzzer_subparser(subparsers) _add_weights_batches_subparser(subparsers) _add_weights_target_subparser(subparsers)
Parse the command-line args and invoke the right command.
def main(): """Parse the command-line args and invoke the right command.""" parser = _ArgumentParser( description='Butler is here to help you with command-line tasks.') subparsers = parser.add_subparsers(dest='command') subparsers.add_parser( 'bootstrap', help=('Install all required dependencies for running an appengine, a bot,' 'and a mapreduce locally.')) parser_py_unittest = subparsers.add_parser( 'py_unittest', help='Run Python unit tests.') parser_py_unittest.add_argument( '-p', '--pattern', help='Pattern for test files. Default is *_test.py.') parser_py_unittest.add_argument( '-u', '--unsuppress-output', action='store_true', help='Unsuppress output from `print`. Good for debugging.') parser_py_unittest.add_argument( '-m', '--parallel', action='store_true', help='Run tests in parallel.') parser_py_unittest.add_argument( '-v', '--verbose', action='store_true', help='Print logs from tests.') parser_py_unittest.add_argument( '-t', '--target', required=True, choices=['appengine', 'core', 'modules']) parser_py_unittest.add_argument( '-c', '--config-dir', help='Config dir to use for module tests.') parser_js_unittest = subparsers.add_parser( 'js_unittest', help='Run Javascript unit tests.') parser_js_unittest.add_argument( '-p', '--persist', action='store_true', help=('Do not close browser when tests ' 'finish. Good for debugging.')) subparsers.add_parser('format', help='Format changed code in current branch.') subparsers.add_parser('lint', help='Lint changed code in current branch.') parser_package = subparsers.add_parser( 'package', help='Package clusterfuzz with a staging revision') parser_package.add_argument( '-p', '--platform', choices=['linux', 'macos', 'windows', 'all']) parser_deploy = subparsers.add_parser('deploy', help='Deploy to Appengine') parser_deploy.add_argument( '-f', '--force', action='store_true', help='Force deploy from any branch.') parser_deploy.add_argument( '-c', '--config-dir', required=True, help='Path to application config.') parser_deploy.add_argument( '--staging', action='store_true', help='Deploy to staging.') parser_deploy.add_argument( '--prod', action='store_true', help='Deploy to production.') parser_deploy.add_argument( '--targets', nargs='*', default=['appengine', 'k8s', 'zips']) parser_run_server = subparsers.add_parser( 'run_server', help='Run the local Clusterfuzz server.') parser_run_server.add_argument( '-b', '--bootstrap', action='store_true', help='Bootstrap the local database.') parser_run_server.add_argument( '--storage-path', default='local/storage', help='storage path for local database.') parser_run_server.add_argument( '--skip-install-deps', action='store_true', help=('Don\'t install dependencies before running this command (useful ' 'when you\'re restarting the server often).')) parser_run_server.add_argument( '--log-level', default='info', help='Logging level') parser_run_server.add_argument( '--clean', action='store_true', help='Clear existing database data.') parser_run = subparsers.add_parser( 'run', help='Run a one-off script against a datastore (e.g. migration).') parser_run.add_argument( 'script_name', help='The script module name under `./local/butler/scripts`.') parser_run.add_argument( '--script_args', action='append', help='Script specific arguments') parser_run.add_argument( '--non-dry-run', action='store_true', help='Run with actual datastore writes. Default to dry-run.') parser_run.add_argument( '-c', '--config-dir', required=True, help='Path to application config.') parser_run.add_argument( '--local', action='store_true', help='Run against local server instance.') parser_run_bot = subparsers.add_parser( 'run_bot', help='Run a local clusterfuzz bot.') parser_run_bot.add_argument( '--name', default='test-bot', help='Name of the bot.') parser_run_bot.add_argument( '--server-storage-path', default='local/storage', help='Server storage path.') parser_run_bot.add_argument('directory', help='Directory to create bot in.') parser_run_bot.add_argument( '--android-serial', help='Serial number of an Android device to connect to instead of ' 'running normally.') parser_remote = subparsers.add_parser( 'remote', help='Run command-line tasks on a remote bot.') _setup_args_for_remote(parser_remote) parser_clean_indexes = subparsers.add_parser( 'clean_indexes', help='Clean up undefined indexes (in index.yaml).') parser_clean_indexes.add_argument( '-c', '--config-dir', required=True, help='Path to application config.') parser_create_config = subparsers.add_parser( 'create_config', help='Create a new deployment config.') parser_create_config.add_argument( 'new_config_dir', type=str, help='The new config directory to create.') parser_create_config.add_argument( '--project-id', type=str, required=True, help='Your Cloud Project ID.') parser_create_config.add_argument( '--firebase-api-key', type=str, required=True, help='Firebase web API key (for authentication).') parser_create_config.add_argument( '--oauth-client-secrets-path', type=str, required=True, help='Path to client_secrets.json.') parser_create_config.add_argument( '--gce-zone', type=str, default='us-central1-f', help='Region for GCE VMs.') parser_create_config.add_argument( '--appengine-location', type=str, default='us-central', help='Location for App Engine.') subparsers.add_parser( 'integration_tests', help='Run end-to-end integration tests.') _add_weights_subparser(subparsers) args = parser.parse_args() if not args.command: parser.print_help() return 0 _setup() command = importlib.import_module(f'local.butler.{args.command}') return command.execute(args)
Set up configs and import paths.
def _setup(): """Set up configs and import paths.""" os.environ['ROOT_DIR'] = os.path.abspath('.') os.environ['PYTHONIOENCODING'] = 'UTF-8' sys.path.insert(0, os.path.abspath(os.path.join('src'))) from clusterfuzz._internal.base import modules modules.fix_module_search_paths()
Set up host environment.
def setup_environment(): """Set up host environment.""" os.environ['QUEUE_OVERRIDE'] = 'LINUX_UNTRUSTED' os.environ['WORKER_ROOT_DIR'] = os.path.join(MNT_DIR, 'clusterfuzz') os.environ['WORKER_BOT_TMPDIR'] = os.path.join(MNT_DIR, 'tmp') if not os.path.exists(BOT_BASEDIR): os.mkdir(BOT_BASEDIR)
Set up bot directory.
def start_bot_instance(instance_num): """Set up bot directory.""" env = os.environ.copy() host_name = os.getenv('HOSTNAME', socket.gethostname()) bot_name = '%s-%d' % (host_name, instance_num) env['BOT_NAME'] = bot_name env['HOST_INSTANCE_NAME'] = host_name env['HOST_INSTANCE_NUM'] = str(instance_num) bot_directory = os.path.join(BOT_BASEDIR, bot_name) bot_root_directory = os.path.join(bot_directory, 'clusterfuzz') tmp_directory = os.path.join(bot_directory, 'tmp') if not os.path.exists(bot_directory): os.mkdir(bot_directory) os.mkdir(tmp_directory) env['ROOT_DIR'] = bot_root_directory env['BOT_TMPDIR'] = tmp_directory env['PYTHONPATH'] = os.path.join(bot_root_directory, 'src') if os.path.exists(bot_root_directory): shutil.rmtree(bot_root_directory) shutil.copytree(SRC_DIR, bot_root_directory) while True: bot_proc = subprocess.Popen( sys.executable + ' src/python/bot/startup/run.py 2>&1 > console.txt', shell=True, env=env, cwd=bot_root_directory) bot_proc.wait() print('Instance %i exited.' % instance_num, file=sys.stderr)
Return a list of last modified times for the files in a directory.
def get_file_modified_times(directory): """Return a list of last modified times for the files in a directory.""" modified_times = [] for root, _, filenames in os.walk(directory): for filename in filenames: modified_times.append(os.path.getmtime(os.path.join(root, filename))) return modified_times
Build a single file using polymer-bundler.
def build_file(filename): """Build a single file using polymer-bundler.""" input_filename = os.path.join('private', 'templates', filename) output_filename = os.path.join('templates', filename) os.system('polymer-bundler --inline-scripts --inline-css --strip-comments ' '--out-file={output_filename} {input_filename}'.format( output_filename=output_filename, input_filename=input_filename)) if os.path.exists(output_filename) and os.path.getsize(output_filename): return True print('Failed to build template: ' + output_filename) return False
Use polymer-bundler to compile templates.
def main(): """Use polymer-bundler to compile templates.""" os.chdir(APPENGINE_DIRECTORY) bundled_change_times = get_file_modified_times('templates') first_bundled_time = min(bundled_change_times) if bundled_change_times else 0 latest_unbundled_time = max(get_file_modified_times('private')) if latest_unbundled_time < first_bundled_time: print('App Engine templates are up to date.') return print('Building templates for App Engine...') if not os.path.exists('templates'): os.mkdir('templates') template_names = os.listdir(os.path.join('private', 'templates')) pool = multiprocessing.Pool(max(multiprocessing.cpu_count() // 2, 1)) result = pool.map(build_file, template_names) if not all(result): print('Failed to build App Engine templates.') sys.exit(1) print('App Engine templates built successfully.')
WSGI middleware for ndb_datastore context allocation to the app.
def ndb_wsgi_middleware(wsgi_app): """WSGI middleware for ndb_datastore context allocation to the app.""" def middleware(environ, start_response): with ndb_client.context(): return wsgi_app(environ, start_response) return middleware
Utility function to register all routes to the flask app.
def register_routes(flask_app, routes): """Utility function to register all routes to the flask app.""" for route, handler in routes: flask_app.add_url_rule(route, view_func=handler.as_view(route))
Redirection handler.
def redirect_handler(): """Redirection handler.""" if not redirect_domains: return None if request.host in redirect_domains: return redirect('https://' + main_domain + request.full_path) return None
Format datetime object for display.
def format_time(dt): """Format datetime object for display.""" return '{t.day} {t:%b} {t:%y} {t:%X} PDT'.format(t=dt)
Split text into lines.
def splitlines(text): """Split text into lines.""" return text.splitlines()
Dump base64-encoded JSON string (to avoid XSS).
def encode_json(value): """Dump base64-encoded JSON string (to avoid XSS).""" return base64.b64encode(json.dumps( value, cls=JsonEncoder).encode('utf-8')).decode('utf-8')
Add menu item to the main navigation.
def add_menu(name, href): """Add menu item to the main navigation.""" _MENU_ITEMS.append(_MenuItem(name, href))
Make the switch account url.
def make_login_url(dest_url): """Make the switch account url.""" return '/login?' + urllib.parse.urlencode({'dest': dest_url})
Make the switch account url.
def make_logout_url(dest_url): """Make the switch account url.""" return '/logout?' + urllib.parse.urlencode({ 'csrf_token': form.generate_csrf_token(), 'dest': dest_url, })
Check redirect URL is safe.
def check_redirect_url(url): """Check redirect URL is safe.""" if not _SAFE_URL_PATTERN.match(url): raise helpers.EarlyExitError('Invalid redirect.', 403)
Get the time before which we consider bots to be dead.
def _get_alive_cutoff(): """Get the time before which we consider bots to be dead.""" seconds_to_wait_for_dead_bot = ( tasks.TASK_LEASE_SECONDS + tasks.TASK_COMPLETION_BUFFER + data_types.HEARTBEAT_WAIT_INTERVAL) alive_cutoff = utils.utcnow() - datetime.timedelta( seconds=seconds_to_wait_for_dead_bot) return alive_cutoff
Format heartbeats for template.
def _convert_heartbeats_to_dicts(heartbeats): """Format heartbeats for template.""" alive_cutoff = _get_alive_cutoff() result = [] for heartbeat in heartbeats: result.append({ 'bot_name': heartbeat.bot_name, 'source_version': heartbeat.source_version, 'task_payload': heartbeat.task_payload, 'platform_id': heartbeat.platform_id, 'task_end_time': utils.utc_datetime_to_timestamp(heartbeat.task_end_time) if heartbeat.task_end_time else '', 'last_beat_time': utils.utc_datetime_to_timestamp(heartbeat.last_beat_time) if heartbeat.last_beat_time else '', 'alive': 'alive' if heartbeat.last_beat_time > alive_cutoff else 'dead' }) return result
Get results for the bots page.
def get_results(): """Get results for the bots page.""" # Return bots sorted alphabetically by bot_name query = datastore_query.Query(data_types.Heartbeat) query.order('bot_name', is_desc=False) params = dict(request.iterparams()) filters.add(query, params, FILTERS) page = helpers.cast(request.get('page', 1), int, "'page' is not an int.") items, total_pages, total_items, has_more = query.fetch_page( page=page, page_size=PAGE_SIZE, projection=None, more_limit=MORE_LIMIT) items = _convert_heartbeats_to_dicts(items) helpers.log('Bots', helpers.VIEW_OPERATION) result = { 'hasMore': has_more, 'items': items, 'page': page, 'pageSize': PAGE_SIZE, 'totalItems': total_items, 'totalPages': total_pages, } return result, params
Get the data from BigQuery.
def get(params, query, offset, limit): """Get the data from BigQuery.""" sql = SQL.format( table_id='%ss' % params['type'], where_clause=query.get_where_clause(), prefix=params['type'], offset=offset, limit=limit) client = big_query.Client() result = client.query(query=sql, offset=offset, limit=limit) return result.rows, result.total_count
Get the result for the crash stats page.
def get_result(): """Get the result for the crash stats page.""" params = dict(request.iterparams()) params['type'] = params.get('type', 'regression') page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.") is_revision_empty = 'revision' not in params query = big_query_query.Query() crash_access.add_scope(query, params, 'security_flag', 'job_type', 'fuzzer_name') if is_revision_empty: total_count = 0 rows = [] else: filters.add(query, params, FILTERS) rows, total_count = get( params=params, query=query, offset=(page - 1) * PAGE_SIZE, limit=PAGE_SIZE) helpers.log('Regression', helpers.VIEW_OPERATION) result = { 'totalPages': (total_count // PAGE_SIZE) + 1, 'page': page, 'pageSize': PAGE_SIZE, 'items': rows, 'totalCount': total_count, 'isRevisionEmpty': is_revision_empty } return result, params
Return value for entry whose name matches the one in item list.
def get_value_by_name(item_list, name): """Return value for entry whose name matches the one in item list.""" for item in item_list: if item['name'] == name: return item['value'] return None
Return url for the report requested.
def _get_project_report_url(job, date): """Return url for the report requested.""" project = data_handler.get_project_name(job) if not project: return None if date == 'latest': date = None else: try: date = datetime.datetime.strptime(date, '%Y-%m-%d').date() except: raise helpers.EarlyExitError('Invalid date.', 400) info = fuzzer_stats.get_coverage_info(project, date) if not info: return None return info.html_report_url
Get report url for a redirect from the coverage report handler.
def get_report_url(report_type, argument, date): """Get report url for a redirect from the coverage report handler.""" # It's very easy to add support for per fuzzer reports, but we don't need it. if report_type != 'job': raise helpers.EarlyExitError('Invalid report type.', 400) job = argument if not job: raise helpers.EarlyExitError('Job name cannot be empty.', 400) if not data_types.Job.VALID_NAME_REGEX.match(job): raise helpers.EarlyExitError('Invalid job name.', 400) if not date or not VALID_DATE_REGEX.match(date): raise helpers.EarlyExitError('Invalid date.', 400) return _get_project_report_url(job, date)
Start a query for an associated testcase.
def query_testcase(project_name, crash_type, crash_state, security_flag, is_open): """Start a query for an associated testcase.""" return data_types.Testcase.query( data_types.Testcase.project_name == project_name, data_types.Testcase.crash_type == crash_type, data_types.Testcase.crash_state == crash_state, data_types.Testcase.security_flag == security_flag, data_types.Testcase.open == is_open, ndb_utils.is_false(data_types.Testcase.is_a_duplicate_flag)).order( -data_types.Testcase.timestamp).iter( limit=1, projection=[ 'bug_information', 'group_bug_information', ])
Attach testcase to each crash.
def attach_testcases(rows): """Attach testcase to each crash.""" testcases = {} for index, row in enumerate(rows): testcases[index] = { 'open_testcase': query_testcase( project_name=row['projectName'], crash_type=row['crashType'], crash_state=row['crashState'], security_flag=row['isSecurity'], is_open=True), 'closed_testcase': query_testcase( project_name=row['projectName'], crash_type=row['crashType'], crash_state=row['crashState'], security_flag=row['isSecurity'], is_open=False) } for index, row in enumerate(rows): testcase = (list(testcases[index]['open_testcase']) or list(testcases[index]['closed_testcase']) or [None])[0] if testcase: testcase = { 'id': testcase.key.id(), 'issueNumber': testcase.bug_information, 'groupIssueNumber': testcase.group_bug_information } row['testcase'] = testcase
Get the result for the crash stats page.
def get_result(): """Get the result for the crash stats page.""" params = dict(request.iterparams()) page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.") group_by = params.get('group', 'platform') params['group'] = group_by sort_by = params.get('sort', 'total_count') params['sort'] = sort_by params['number'] = params.get('number', 'count') # Conditions for individual records. query = crash_stats.Query() query.group_by = group_by query.sort_by = sort_by crash_access.add_scope(query, params, 'security_flag', 'job_type', 'fuzzer_name') filters.add(query, params, FILTERS) # Conditions after grouping. group_query = crash_stats.Query() filters.add(group_query, params, GROUP_FILTERS) try: total_count, rows = crash_stats.get( query=query, group_query=group_query, offset=(page - 1) * PAGE_SIZE, limit=PAGE_SIZE) except ValueError: raise helpers.EarlyExitError('Invalid filters', 400) attach_testcases(rows) helpers.log('CrashStats', helpers.VIEW_OPERATION) result = { 'totalPages': (total_count // PAGE_SIZE) + 1, 'page': page, 'pageSize': PAGE_SIZE, 'items': rows, 'totalCount': total_count } return result, params
Get all platforms including parent platform.
def get_all_platforms(): """Get all platforms including parent platform.""" items = data_types.Testcase.query( projection=[data_types.Testcase.platform], distinct=True) return sorted( list( set([item.platform.lower() for item in items if item.platform] + ['android'])))
Mark bug as fixed.
def _mark_as_fixed(testcase, revision): """Mark bug as fixed.""" testcase.open = False # Bisection not available for external reproduction infrastructure. Assume # range (crash revision : current revision). testcase.fixed = f'{testcase.crash_revision}:{revision}' data_handler.update_progression_completion_metadata( testcase, revision, message=f'fixed in r{revision}')
Mark testcase as errored out.
def _mark_errored(testcase, revision, error): """Mark testcase as errored out.""" message = 'Received error from external infra, marking testcase as NA.' logs.log_warn(message, error=error, testcase_id=testcase.key.id()) testcase.fixed = 'NA' testcase.open = False data_handler.update_progression_completion_metadata( testcase, revision, message=message)
Handle update.
def handle_update(testcase, revision, stacktraces, error, protocol_version): """Handle update.""" def is_still_crashing(st_index, stacktrace): """Check if the the given stackstrace indicates the testcase is still crashing""" state = stack_analyzer.get_crash_data( stacktrace, fuzz_target=fuzz_target_name, symbolize_flag=False, already_symbolized=True, detect_ooms_and_hangs=True) crash_comparer = CrashComparer(state.crash_state, testcase.crash_state) if not crash_comparer.is_similar(): return False logs.log(f'State for trial {st_index} of {testcase_id} ' f'remains similar' f'(old_state={testcase.crash_state}, ' f'new_state={state.crash_state}).') is_security = crash_analyzer.is_security_issue( state.crash_stacktrace, state.crash_type, state.crash_address) if is_security != testcase.security_flag: return False logs.log(f'Security flag for trial {st_index} of {testcase_id} ' f'still matches' f'({testcase.security_flag}).') return True testcase_id = testcase.key.id() logs.log('Got external update for testcase.', testcase_id=testcase_id) if error: _mark_errored(testcase, revision, error) return last_tested_revision = ( testcase.get_metadata('last_tested_revision') or testcase.crash_revision) if revision < last_tested_revision: logs.log_warn(f'Revision {revision} less than previously tested ' f'revision {last_tested_revision}.') return if protocol_version not in [OLD_PROTOCOL, NEW_PROTOCOL]: logs.log_error(f'Invalid protocol_version provided: ' f'{protocol_version} ' f'is not one of {{{OLD_PROTOCOL, NEW_PROTOCOL}}} ' f'(testcase_id={testcase_id}).') return if not stacktraces: logs.log_error(f'Empty JSON stacktrace list provided ' f'(testcase_id={testcase_id}).') return fuzz_target = testcase.get_fuzz_target() if fuzz_target: fuzz_target_name = fuzz_target.binary else: fuzz_target_name = None # Record use of fuzz target to avoid garbage collection (since fuzz_task does # not run). data_handler.record_fuzz_target(fuzz_target.engine, fuzz_target.binary, testcase.job_type) for st_index, stacktrace in enumerate(stacktraces): if is_still_crashing(st_index, stacktrace): logs.log(f'stacktrace {st_index} of {testcase_id} still crashes.') testcase.last_tested_crash_stacktrace = stacktrace data_handler.update_progression_completion_metadata( testcase, revision, is_crash=True) return # All trials resulted in a non-crash. Close the testcase. logs.log(f'No matching crash detected in {testcase_id} ' f'over {len(stacktraces)} trials, marking as fixed.') _mark_as_fixed(testcase, revision)
Convert bigquery type to charts type.
def _bigquery_type_to_charts_type(typename): """Convert bigquery type to charts type.""" typename = typename.lower() if typename in ('integer', 'float'): return 'number' if typename == 'timestamp': return 'date' return 'string'
Convert bigquery type to charts type.
def _python_type_to_charts_type(type_value): """Convert bigquery type to charts type.""" if type_value in (int, float): return 'number' if type_value == datetime.date: return 'date' return 'string'
Parse YYYY-MM-DD.
def _parse_date(date_str): """Parse YYYY-MM-DD.""" if not date_str: return None pattern = re.compile(r'^(\d{4})-(\d{2})-(\d{2})$') match = pattern.match(date_str) if not match: return None year, month, day = (int(val) for val in match.groups()) return datetime.date(year, month, day)
Parse stats columns.
def _parse_stats_column_fields(results, stats_columns, group_by, fuzzer, jobs): """Parse stats columns.""" result = [] columns = fuzzer_stats.parse_stats_column_fields(stats_columns) # Insert first column (group by) group_by_field_name = fuzzer_stats.group_by_to_field_name(group_by) columns.insert(0, fuzzer_stats.QueryField('j', group_by_field_name, None)) contexts = {} for column in columns: if isinstance(column, fuzzer_stats.QueryField): key = '%s_%s' % (column.table_alias, column.select_alias) for i, field_info in enumerate(results['schema']['fields']): # the 'name' field could either be "prefix_fieldname" or simply # "fieldname" if (field_info['name'] == column.select_alias or field_info['name'] == key): result.append( QueryField(column, i, _bigquery_type_to_charts_type(field_info['type']), field_info['type'])) break elif isinstance(column, fuzzer_stats.BuiltinFieldSpecifier): # Builtin field. # Create new context if it does not exist. field_class = column.field_class() if not field_class: continue context_class = field_class.CONTEXT_CLASS context = contexts.setdefault(context_class, context_class(fuzzer, jobs)) result.append(BuiltinField(column, column.create(context))) return result
Parse group_by value.
def _parse_group_by(group_by): """Parse group_by value.""" if group_by == 'by-day': return fuzzer_stats.QueryGroupBy.GROUP_BY_DAY if group_by == 'by-time': return fuzzer_stats.QueryGroupBy.GROUP_BY_TIME if group_by == 'by-revision': return fuzzer_stats.QueryGroupBy.GROUP_BY_REVISION if group_by == 'by-job': return fuzzer_stats.QueryGroupBy.GROUP_BY_JOB if group_by == 'by-fuzzer': return fuzzer_stats.QueryGroupBy.GROUP_BY_FUZZER return None
Return fuzzer entity, or engine this target is part of.
def _get_fuzzer_or_engine(name): """Return fuzzer entity, or engine this target is part of.""" fuzz_target = data_handler.get_fuzz_target(name) if fuzz_target: name = fuzz_target.engine return data_types.Fuzzer.query(data_types.Fuzzer.name == name).get()
Return results from BigQuery.
def _do_bigquery_query(query): """Return results from BigQuery.""" logs.log(query) client = big_query.Client() try: results = client.raw_query(query, max_results=10000) except HttpError as e: raise helpers.EarlyExitError(str(e), 500) if 'rows' not in results: raise helpers.EarlyExitError('No stats.', 404) return results
Parse stats column descriptions.
def _parse_stats_column_descriptions(stats_column_descriptions): """Parse stats column descriptions.""" if not stats_column_descriptions: return {} try: result = yaml.safe_load(stats_column_descriptions) for key, value in result.items(): result[key] = html.escape(value) return result except yaml.parser.ParserError: logs.log_error('Failed to parse stats column descriptions.') return {}
Build columns.
def _build_columns(result, columns): """Build columns.""" for column in columns: if isinstance(column, QueryField): result['cols'].append({ 'label': column.field.select_alias, 'type': column.field_type, }) elif isinstance(column, BuiltinField): result['cols'].append({ 'label': column.spec.alias or column.spec.name, 'type': _python_type_to_charts_type(column.field.VALUE_TYPE), })
Try casting the value_str into cast_function.
def _try_cast(cell, value_str, cast_function, default_value): """Try casting the value_str into cast_function.""" try: cell['v'] = cast_function(value_str) except (ValueError, TypeError): cell['v'] = default_value cell['f'] = '--'
Build rows.
def _build_rows(result, columns, rows, group_by): """Build rows.""" for row in rows: row_data = [] first_column_value = None for column in columns: cell = {} if isinstance(column, QueryField): value = row['f'][column.results_index]['v'] if column.field.select_alias == 'time': timestamp = float(value) time = datetime.datetime.utcfromtimestamp(timestamp) first_column_value = first_column_value or time cell['v'] = 'Date(%d, %d, %d, %d, %d, %d)' % ( time.year, time.month - 1, time.day, time.hour, time.minute, time.second) elif column.field.select_alias == 'date': timestamp = float(value) date = datetime.datetime.utcfromtimestamp(timestamp).date() first_column_value = first_column_value or date cell['v'] = 'Date(%d, %d, %d)' % (date.year, date.month - 1, date.day) elif column.bigquery_type == 'integer': _try_cast(cell, value, int, 0) elif column.bigquery_type == 'float': # Round all float values to single digits. _try_cast(cell, value, lambda s: round(float(s), 1), 0.0) else: cell['v'] = value first_column_value = first_column_value or cell['v'] elif isinstance(column, BuiltinField): data = column.field.get(group_by, first_column_value) if data: formatted_value = data.value if data.link: link = ( _get_cloud_storage_link(data.link) if data.link.startswith('gs://') else data.link) formatted_value = '<a href="%s">%s</a>' % (link, data.value) if data.sort_key is not None: cell['v'] = data.sort_key else: cell['v'] = data.value if data.sort_key is not None or data.link: cell['f'] = formatted_value else: cell['v'] = '' cell['f'] = '--' row_data.append(cell) result['rows'].append({'c': row_data})
Return a clickable link to a cloud storage file given the bucket path.
def _get_cloud_storage_link(bucket_path): """Return a clickable link to a cloud storage file given the bucket path.""" return '/gcs-redirect?' + urllib.parse.urlencode({'path': bucket_path})
Creates a job filter from |job|.
def _get_filter_from_job(job): """Creates a job filter from |job|.""" return [str(job)] if job else None
Wrapper around the caching wrappers for _build_results. Decides which of those wrappers to call based on how long query should be cached for.
def build_results(fuzzer, jobs, group_by, date_start, date_end): """Wrapper around the caching wrappers for _build_results. Decides which of those wrappers to call based on how long query should be cached for.""" datetime_end = _parse_date(date_end) if not datetime_end: raise helpers.EarlyExitError('Missing end date.', 400) if datetime_end < utils.utcnow().date(): logs.log('Building results for older stats %s %s %s %s %s.' % (fuzzer, jobs, group_by, date_start, date_end)) return _build_old_results(fuzzer, jobs, group_by, date_start, date_end) logs.log('Building results for stats including today %s %s %s %s %s.' % (fuzzer, jobs, group_by, date_start, date_end)) return _build_todays_results(fuzzer, jobs, group_by, date_start, date_end)
Wrapper around _build_results that is intended for use by queries where date_end is today. Caches results for 15 minutes.
def _build_todays_results(fuzzer, jobs, group_by, date_start, date_end): """Wrapper around _build_results that is intended for use by queries where date_end is today. Caches results for 15 minutes.""" return _build_results(fuzzer, jobs, group_by, date_start, date_end)
Wrapper around _build_results that is intended for use by queries where date_end is before today. Caches results for 24 hours.
def _build_old_results(fuzzer, jobs, group_by, date_start, date_end): """Wrapper around _build_results that is intended for use by queries where date_end is before today. Caches results for 24 hours.""" return _build_results(fuzzer, jobs, group_by, date_start, date_end)
Build results.
def _build_results(fuzzer, jobs, group_by, date_start, date_end): """Build results.""" date_start = _parse_date(date_start) date_end = _parse_date(date_end) if not fuzzer or not group_by or not date_start or not date_end: raise helpers.EarlyExitError('Missing params.', 400) fuzzer_entity = _get_fuzzer_or_engine(fuzzer) if not fuzzer_entity: raise helpers.EarlyExitError('Fuzzer not found.', 404) if fuzzer_entity.stats_columns: stats_columns = fuzzer_entity.stats_columns else: stats_columns = fuzzer_stats.JobQuery.DEFAULT_FIELDS group_by = _parse_group_by(group_by) if group_by is None: raise helpers.EarlyExitError('Invalid grouping.', 400) table_query = fuzzer_stats.TableQuery(fuzzer, jobs, stats_columns, group_by, date_start, date_end) results = _do_bigquery_query(table_query.build()) is_timeseries = group_by == fuzzer_stats.QueryGroupBy.GROUP_BY_TIME result = { 'cols': [], 'rows': [], 'column_descriptions': _parse_stats_column_descriptions( fuzzer_entity.stats_column_descriptions), 'is_timeseries': is_timeseries } columns = _parse_stats_column_fields(results, stats_columns, group_by, fuzzer, jobs) # If we are grouping by time and plotting graphs, skip builtin columns. if is_timeseries: columns = [c for c in columns if not isinstance(c, BuiltinField)] _build_columns(result, columns) _build_rows(result, columns, results['rows'], group_by) return result
Returns |date_value| if it is not empty otherwise returns the date |days_ago| number of days ago.
def _get_date(date_value, days_ago): """Returns |date_value| if it is not empty otherwise returns the date |days_ago| number of days ago.""" if date_value: return date_value date_datetime = utils.utcnow() - datetime.timedelta(days=days_ago) return date_datetime.strftime('%Y-%m-%d')
Sort key function.
def _sort_by_name(item): """Sort key function.""" return item['name']
Return the (engine display name, engine name) for the job.
def _get_engine_names(job_name): """Return the (engine display name, engine name) for the job.""" if job_name.startswith('afl_'): return 'AFL', 'afl' if job_name.startswith('libfuzzer_'): return 'libFuzzer', 'libFuzzer' return 'Unknown', 'Unknown'
Return the name of the single child fuzzer for the project, or None.
def get_single_fuzz_target_or_none(project, engine_name): """Return the name of the single child fuzzer for the project, or None.""" fuzz_targets = data_handler.get_fuzz_targets( engine=engine_name, project=project) fuzz_target_name = None for fuzz_target in fuzz_targets: if fuzz_target_name: # More than 1 child fuzzer. return None fuzz_target_name = fuzz_target.fully_qualified_name() return fuzz_target_name
Return introspector projects status
def get_introspector_index(): """Return introspector projects status""" if storage.exists(INTROSPECTOR_INDEX_JSON_URL): introspector_index = json.loads( storage.read_data(INTROSPECTOR_INDEX_JSON_URL)) else: introspector_index = {} logs.log('Loaded introspector status: %d' % len(introspector_index)) return introspector_index
Return projects for jobs.
def _get_project_results_for_jobs(jobs): """Return projects for jobs.""" projects = {} introspector_index = get_introspector_index() for job in sorted(jobs, key=lambda j: j.name): project_name = job.get_environment().get('PROJECT_NAME', job.name) if project_name not in projects: projects[project_name] = {'name': project_name, 'jobs': []} if utils.string_is_true(job.get_environment().get('CORPUS_PRUNE')): projects[project_name]['coverage_job'] = job.name projects[project_name]['has_introspector'] = False if project_name in introspector_index: projects[project_name]['has_introspector'] = True projects[project_name]['introspector_report'] = introspector_index[ project_name] engine_display_name, engine_name = _get_engine_names(job.name) projects[project_name]['jobs'].append({ 'engine_display_name': engine_display_name, 'engine_name': engine_name, 'sanitizer_string': environment.get_memory_tool_display_string(job.name), 'name': job.name, 'single_target': get_single_fuzz_target_or_none(project_name, engine_name), 'has_stats': True }) projects = list(projects.values()) projects.sort(key=_sort_by_name) for project in projects: project['jobs'].sort(key=_sort_by_name) return projects
Return all results.
def _get_all_project_results(): """Return all results.""" jobs = list(data_types.Job.query()) return _get_project_results_for_jobs(jobs)
Return results for external user.
def _get_project_results_for_external_user(external_jobs): """Return results for external user.""" jobs = list(data_types.Job.query()) jobs = [job for job in jobs if job.name in external_jobs] return _get_project_results_for_jobs(jobs)
Return results.
def get_results(): """Return results.""" is_user = access.has_access() user_email = helpers.get_user_email() external_jobs = external_users.allowed_jobs_for_user(user_email) is_external_user = not is_user and external_jobs if not is_user and not is_external_user: raise helpers.AccessDeniedError() if is_user: projects = _get_all_project_results() else: projects = _get_project_results_for_external_user(external_jobs) results = { 'info': { 'projects': projects, 'is_internal_user': is_user, }, } return results
Return list of task queues.
def get_queues(): """Return list of task queues.""" queues = [] for name, display_name in tasks.TASK_QUEUE_DISPLAY_NAMES.items(): queue = { 'name': name, 'display_name': display_name, } queues.append(queue) queues.sort(key=lambda q: q['display_name']) return queues
Return a dict of job items along with associated fuzzers.
def _job_to_dict(job): """Return a dict of job items along with associated fuzzers.""" result = job.to_dict() result['id'] = job.key.id() # Adding all associated fuzzers with each job. fuzzers = data_types.Fuzzer.query() result['fuzzers'] = [ fuzzer.name for fuzzer in fuzzers if job.name in fuzzer.jobs ] return result
Get results for the jobs page.
def get_results(): """Get results for the jobs page.""" # Return jobs sorted alphabetically by name query = datastore_query.Query(data_types.Job) query.order('name', is_desc=False) params = dict(request.iterparams()) filters.add(query, params, FILTERS) page = helpers.cast(request.get('page', 1), int, "'page' is not an int.") items, total_pages, total_items, has_more = query.fetch_page( page=page, page_size=PAGE_SIZE, projection=None, more_limit=MORE_LIMIT) helpers.log('Jobs', helpers.VIEW_OPERATION) result = { 'hasMore': has_more, 'items': [_job_to_dict(item) for item in items], 'page': page, 'pageSize': PAGE_SIZE, 'totalItems': total_items, 'totalPages': total_pages, } return result, params
Add filters based on params.
def add_filters(query, params): """Add filters based on params.""" if not filters.has_params(params, FILTERS) and not params.get('showall'): params['open'] = 'yes' query.filter('status', 'Processed') query.filter('is_a_duplicate_flag', False) # For queries that use inequality we need to order by that field. Otherwise, # use the timestamp. if 'revision_greater_than' in params: query.order('crash_revision', is_desc=True) else: query.order('timestamp', is_desc=True) filters.add(query, params, FILTERS)
Get the result for the testcase list page.
def get_result(): """Get the result for the testcase list page.""" params = dict(request.iterparams()) page = helpers.cast(request.get('page') or 1, int, "'page' is not an int.") query = datastore_query.Query(data_types.Testcase) crash_access.add_scope(query, params, 'security_flag', 'job_type', 'fuzzer_name_indices') add_filters(query, params) testcases, total_pages, total_items, has_more = query.fetch_page( page=page, page_size=PAGE_SIZE, projection=FIELDS, more_limit=MORE_LIMIT) items = [] for testcase in testcases: regression_range = '' fixed_range = '' if testcase.regression and testcase.regression != 'NA': regression_range = testcase.regression if testcase.fixed and testcase.fixed != 'NA': fixed_range = testcase.fixed item = { 'id': testcase.key.id(), 'crashType': ' '.join(testcase.crash_type.splitlines()), 'crashStateLines': testcase.crash_state.strip().splitlines(), 'jobType': testcase.job_type, 'isClosed': not testcase.open, 'isFixed': testcase.fixed and testcase.fixed != 'NA', 'isReproducible': not testcase.one_time_crasher_flag, 'isSecurity': testcase.security_flag, 'isImpactSet': testcase.is_impact_set_flag, 'impacts': { 'extendedStable': testcase.impact_extended_stable_version, 'stable': testcase.impact_stable_version, 'beta': testcase.impact_beta_version, 'head': testcase.impact_head_version, }, 'regressionRange': regression_range, 'fixedRange': fixed_range, 'groupId': testcase.group_id, 'projectName': testcase.project_name, 'platform': testcase.platform, 'issueId': testcase.bug_information or testcase.group_bug_information, 'showImpacts': testcase.has_impacts(), 'impactsProduction': testcase.impacts_production() } if testcase.timestamp: item['timestamp'] = utils.utc_datetime_to_timestamp(testcase.timestamp) items.append(item) helpers.log('Testcases', helpers.VIEW_OPERATION) result = { 'hasMore': has_more, 'items': items, 'page': page, 'pageSize': PAGE_SIZE, 'totalItems': total_items, 'totalPages': total_pages, } return result, params
Return bool on whether user is allowed to upload to any job or fuzzer.
def _is_uploader_allowed(email): """Return bool on whether user is allowed to upload to any job or fuzzer.""" return external_users.is_upload_allowed_for_user(email)
Return whether or not uploader is allowed and trusted.
def _is_trusted_uploader_allowed(email): """Return whether or not uploader is allowed and trusted.""" return access.has_access( need_privileged_access=True) and _is_uploader_allowed(email)
Attach testcase to each crash.
def attach_testcases(rows): """Attach testcase to each crash.""" testcases = {} for index, row in enumerate(rows): testcases[index] = query_testcase(row['testcaseId']) for index, row in enumerate(rows): testcase = (list(testcases[index]) or [None])[0] if testcase: testcase = { 'crashType': testcase.crash_type, 'crashStateLines': (testcase.crash_state or '').strip().splitlines(), 'isSecurity': testcase.security_flag, 'issueNumber': testcase.bug_information, 'job': testcase.job_type, 'fuzzerName': testcase.actual_fuzzer_name(), 'projectName': testcase.project_name, } row['testcase'] = testcase