response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Clean up resources.
def cleanup_resources(): """Clean up resources.""" manager = bot_manager.BotManager(TEST_PROJECT, TEST_ZONE) try: manager.instance_group(test_instance_group_name()).delete() except bot_manager.NotFoundError: pass try: manager.instance_template(test_instance_template_name()).delete() except bot_manager.NotFoundError: pass
Return mocked version of local_config._load_yaml_file. Uses custom version of auth.yaml for tests in this file.
def mocked_load_yaml_file(yaml_file_path): """Return mocked version of local_config._load_yaml_file. Uses custom version of auth.yaml for tests in this file.""" if os.path.basename(yaml_file_path) == 'auth.yaml': yaml_file_path = os.path.abspath( os.path.join(os.path.dirname(__file__), 'handler_data', 'auth.yaml')) return yaml.safe_load(open(yaml_file_path).read())
Create test data.
def _create_data(self): """Create test data.""" self.mocks = [] for i in range(30): m = TestDatastoreModel() if (i % 2) == 0: m.tokens = ['a'] m.boolean_value = True else: m.tokens = ['b'] m.boolean_value = False m.datetime_value = datetime.datetime.fromtimestamp(100 - i) m.put() self.mocks.append(m)
Generate function with memoization.
def gen_func(): """Generate function with memoization.""" del CALLED[:] @memoize.wrap(memoize.FifoInMemory(10)) def func(a, b=2): CALLED.append((a, b)) return a + b return func
Mock get_crash_data.
def mock_get_crash_data(output, symbolize_flag=True): # pylint: disable=unused-argument """Mock get_crash_data.""" if 'crash' in output: stack_analyzer_state = stacktraces.CrashInfo() stack_analyzer_state.crash_state = 'state' stack_analyzer_state.crash_type = 'Null-dereference' stack_analyzer_state.crash_stacktrace = output return stack_analyzer_state return stacktraces.CrashInfo()
Return timeout for fuzzing.
def get_fuzz_timeout(fuzz_time): """Return timeout for fuzzing.""" return (fuzz_time + launcher.AflRunner.AFL_CLEAN_EXIT_TIME + launcher.AflRunner.SIGTERM_WAIT_TIME)
Decorator that asserts neither metrics.logs.log_error nor metrics.logs.log_fatal_and_exit were called.
def no_errors(f): """Decorator that asserts neither metrics.logs.log_error nor metrics.logs.log_fatal_and_exit were called.""" def call_f(self, *args, **kwargs): test_helpers.patch(self, ['clusterfuzz._internal.metrics.logs.log_error']) result = f(self, *args, **kwargs) self.assertEqual(0, self.mock.log_error.call_count) return result return call_f
Run launcher.py.
def run_launcher(*args): """Run launcher.py.""" mock_stdout = test_utils.MockStdout() os.environ['FUZZ_TARGET'] = args[1] with mock.patch('sys.stdout', mock_stdout): launcher.main(['launcher.py'] + list(args)) return mock_stdout.getvalue()
Mocked version of AflFuzzOutputDirectory.is_testcase that looks for "COLON" instead of ":" because this repo cannot be used on windows if it has filenames with ":".
def mocked_is_testcase(path): """Mocked version of AflFuzzOutputDirectory.is_testcase that looks for "COLON" instead of ":" because this repo cannot be used on windows if it has filenames with ":".""" testcase_regex = re.compile(r'idCOLON\d{6},.+') return (os.path.isfile(path) and bool(re.match(testcase_regex, os.path.basename(path))))
Mocked version of AflRunner.fuzz.
def mocked_fuzz(runner): """Mocked version of AflRunner.fuzz.""" fuzz_args = runner.generate_afl_args() runner._fuzz_args = fuzz_args # pylint: disable=protected-access engine_common.recreate_directory(runner.afl_output.output_directory) runner._fuzzer_stderr = '' # pylint: disable=protected-access # Create the queue directory within AFL's output directory. queue = runner.afl_output.queue engine_common.recreate_directory(queue) new_corpus_dir = os.path.join(DATA_DIR, 'merge_new_corpus') for filename in os.listdir(new_corpus_dir): src = os.path.join(new_corpus_dir, filename) dst = os.path.join(queue, filename) shutil.copy(src, dst) return new_process.ProcessResult( command=[], return_code=0, output='', time_executed=1)
Setup testcase and corpus.
def setup_testcase_and_corpus(unittest_testcase, testcase, corpus, fuzz=False): """Setup testcase and corpus.""" copied_testcase_path = os.path.join(unittest_testcase.temp_dir, testcase) shutil.copy(os.path.join(DATA_DIR, testcase), copied_testcase_path) copied_corpus_path = os.path.join(unittest_testcase.temp_dir, corpus) src_corpus_path = os.path.join(DATA_DIR, corpus) if os.path.exists(src_corpus_path): shutil.copytree(src_corpus_path, copied_corpus_path) else: os.mkdir(copied_corpus_path) with open(os.path.join(copied_corpus_path, fuzzer.AFL_DUMMY_INPUT), 'w') as f: f.write(' ') if fuzz: os.environ['FUZZ_CORPUS_DIR'] = copied_corpus_path return copied_testcase_path
Helper function to prevent using fuzzing strategies, unless asked for.
def dont_use_strategies(obj): """Helper function to prevent using fuzzing strategies, unless asked for.""" test_helpers.patch(obj, [ 'clusterfuzz._internal.bot.fuzzers.engine_common.decide_with_probability', ]) obj.mock.decide_with_probability.return_value = False
Used to patch environment.getEnv to set FAIL_RETRIES.
def override_fail_retries(env_var, default_value=None): """Used to patch environment.getEnv to set FAIL_RETRIES.""" return os.getenv( env_var, default=default_value) if env_var != 'FAIL_RETRIES' else 1
Returns temporary test_paths that can be used for centipede.
def get_test_paths(): """Returns temporary test_paths that can be used for centipede.""" with tempfile.TemporaryDirectory() as temp_dir: temp_dir = pathlib.Path(temp_dir) data_dir = temp_dir / 'test_data' shutil.copytree(_TEST_DATA_SRC, data_dir) test_paths = TestPaths(data_dir, temp_dir / 'corpus', temp_dir / 'crashes', str(data_dir / 'centipede')) os.mkdir(test_paths.corpus) os.mkdir(test_paths.crashes) yield test_paths
Sets up testcase and corpus.
def setup_testcase(testcase, test_paths): """Sets up testcase and corpus.""" src_testcase_path = test_paths.data / testcase src_testcase_options_path = test_paths.data / f'{testcase}.options' copied_testcase_path = test_paths.corpus / testcase copied_testcase_options_path = test_paths.corpus / f'{testcase}.options' if os.path.isfile(src_testcase_path): shutil.copy(src_testcase_path, copied_testcase_path) if os.path.isdir(src_testcase_path): shutil.copytree(src_testcase_path, copied_testcase_path) if src_testcase_options_path.exists(): shutil.copy(src_testcase_options_path, copied_testcase_options_path) return copied_testcase_path
Sets up Centipede for fuzzing.
def setup_centipede(target_name, test_paths, centipede_bin=None, sanitized_target_dir=None): """Sets up Centipede for fuzzing.""" # Setup Centipede's fuzz target. engine_impl = engine.Engine() target_path = engine_common.find_fuzzer_path(test_paths.data, target_name) if sanitized_target_dir is None: sanitized_target_dir = test_paths.data / fuzzer_utils.EXTRA_BUILD_DIR sanitized_target_path = sanitized_target_dir / target_name # Setup Centipede's binary. if centipede_bin and centipede_bin != test_paths.centipede: os.rename(centipede_bin, test_paths.centipede) return engine_impl, target_path, sanitized_target_path
Clear temp directory.
def clear_temp_dir(): """Clear temp directory.""" if os.path.exists(TEMP_DIR): shutil.rmtree(TEMP_DIR) os.mkdir(TEMP_DIR)
Setup testcase and corpus.
def setup_testcase_and_corpus(testcase, corpus): """Setup testcase and corpus.""" clear_temp_dir() copied_testcase_path = os.path.join(TEMP_DIR, testcase) shutil.copy(os.path.join(DATA_DIR, testcase), copied_testcase_path) copied_corpus_path = os.path.join(TEMP_DIR, corpus) src_corpus_path = os.path.join(DATA_DIR, corpus) if os.path.exists(src_corpus_path): shutil.copytree(src_corpus_path, copied_corpus_path) else: os.mkdir(copied_corpus_path) return copied_testcase_path, copied_corpus_path
Helper method to create instances of strategy pools for patching use.
def set_strategy_pool(strategies=None): """Helper method to create instances of strategy pools for patching use.""" strategy_pool = strategy_selection.StrategyPool() if strategies is not None: for strategy_tuple in strategies: strategy_pool.add_strategy(strategy_tuple) return strategy_pool
Always returns first element from the sequence.
def mock_random_choice(seq): """Always returns first element from the sequence.""" # We could try to mock a particular |seq| to be a list with a single element, # but it does not work well, as random_choice returns a 'mock.mock.MagicMock' # object that behaves differently from the actual type of |seq[0]|. return seq[0]
Clear temp directory.
def clear_temp_dir(): """Clear temp directory.""" if os.path.exists(TEMP_DIR): shutil.rmtree(TEMP_DIR) os.mkdir(TEMP_DIR)
Setup testcase and corpus.
def setup_testcase_and_corpus(testcase, corpus): """Setup testcase and corpus.""" clear_temp_dir() copied_testcase_path = os.path.join(TEMP_DIR, testcase) shutil.copy(os.path.join(DATA_DIR, testcase), copied_testcase_path) copied_corpus_path = os.path.join(TEMP_DIR, corpus) src_corpus_path = os.path.join(DATA_DIR, corpus) if os.path.exists(src_corpus_path): shutil.copytree(src_corpus_path, copied_corpus_path) else: os.mkdir(copied_corpus_path) return copied_testcase_path, copied_corpus_path
Mocked version, always return 1 for new testcases directory.
def mock_get_directory_file_count(dir_path): """Mocked version, always return 1 for new testcases directory.""" if dir_path == os.path.join(fuzzer_utils.get_temp_dir(), 'new'): return 1 return _get_directory_file_count_orig(dir_path)
Read test data.
def _read_test_data(name): """Read test data.""" data_path = os.path.join(TESTDATA_PATH, name) with open(data_path) as f: return f.read()
Reads data from file.
def read_data_from_file(file_path): """Reads data from file.""" with open(file_path, 'rb') as file_handle: return file_handle.read().decode('utf-8')
Creates a mock subprocess.Popen.
def create_mock_popen(output, corpus_path=None, merge_corpus_path=None, number_of_testcases=0, return_code=0): """Creates a mock subprocess.Popen.""" class MockPopen: """Mock subprocess.Popen.""" commands = [] testcases_written = [] def __init__(self, command, *args, **kwargs): """Inits the MockPopen.""" stdout = kwargs.pop('stdout', None) self.command = command self.commands.append(command) self.stdout = None self.return_code = return_code if hasattr(stdout, 'write'): self.stdout = stdout def _do_merge(self): """Mock merge.""" if not corpus_path or not merge_corpus_path: return for filepath in os.listdir(corpus_path): shutil.copy(os.path.join(corpus_path, filepath), merge_corpus_path) def _write_fake_units(self): """Mock writing of new units.""" for i in range(number_of_testcases): with open(os.path.join(corpus_path, str(i)), 'w') as f: f.write(str(i)) self.testcases_written.append(str(i)) def communicate(self, input_data=None): """Mock subprocess.Popen.communicate.""" if '/fake/build_dir/fake_fuzzer' in self.command: if '-merge=1' in self.command: # Mock merge. self._do_merge() else: # Mock writing of new units. self._write_fake_units() if self.stdout: self.stdout.write(output) return None, None def poll(self, input_data=None): """Mock subprocess.Popen.poll.""" return self.return_code return MockPopen
Mock minijail._create_tmp_mount.
def mock_create_tmp_mount(base_dir): """Mock minijail._create_tmp_mount.""" path = os.path.join(base_dir, 'TEMP') os.mkdir(path) return path
Mock minijail._create_chroot_dir.
def mock_create_chroot_dir(base_dir): """Mock minijail._create_chroot_dir.""" path = os.path.join(base_dir, 'CHROOT') os.mkdir(path) return path
Helper method to create instances of strategy pools for patching use.
def set_strategy_pool(strategies=None): """Helper method to create instances of strategy pools for patching use.""" strategy_pool = strategy_selection.StrategyPool() if strategies is not None: for strategy_tuple in strategies: strategy_pool.add_strategy(strategy_tuple) return strategy_pool
Clear temp directory.
def clear_temp_dir(): """Clear temp directory.""" if os.path.exists(TEMP_DIR): shutil.rmtree(TEMP_DIR) os.mkdir(TEMP_DIR)
A dummy function.
def dummy(*args, **kwargs): """A dummy function.""" del args del kwargs return os.environ['TASK_PAYLOAD']
A dummy function.
def dummy_exception(*args, **kwargs): """A dummy function.""" raise RuntimeError(os.environ['TASK_PAYLOAD'])
Helper function to deterministically sample a list.
def _sample(input_list, count): """Helper function to deterministically sample a list.""" assert count <= len(input_list) return input_list[:count]
Mock build_manager.set_environment_vars.
def _mock_set_environment_vars(_): """Mock build_manager.set_environment_vars.""" os.environ['APP_PATH'] = '' os.environ['APP_DIR'] = ''
Mock _unpack_build.
def _mock_unpack_build( self, _, build_dir, build_url, target_weights=None, ): """Mock _unpack_build.""" if not shell.remove_directory(build_dir, recreate=True): return False with open(os.path.join(build_dir, FAKE_APP_NAME), 'w') as f: f.write('') with open(os.path.join(build_dir, 'args.gn'), 'w') as f: f.write('') with open(os.path.join(build_dir, 'llvm-symbolizer'), 'w') as f: f.write('') fuzz_target = os.environ.get('FUZZ_TARGET') if fuzz_target: with open(os.path.join(build_dir, '.partial_build'), 'w') as f: f.write('') return True
Return the timestamp, or None.
def _get_timestamp(base_build_dir): """Return the timestamp, or None.""" return utils.read_data_from_file( os.path.join(base_build_dir, '.timestamp'), eval_data=True)
Helper function to return the mappings for a fuzzer as a list.
def _get_job_list_for_fuzzer(fuzzer): """Helper function to return the mappings for a fuzzer as a list.""" query = data_types.FuzzerJob.query() query.filter(data_types.FuzzerJob.fuzzer == fuzzer.name) return [m.job for m in ndb_utils.get_all_from_query(query)]
Helper function to return the mappings for a job as a list.
def _get_fuzzer_list_for_job(job): """Helper function to return the mappings for a job as a list.""" return [ fuzzer.name for fuzzer in data_types.Fuzzer.query() if job.name in fuzzer.jobs ]
Mock random_weighted_choice.
def _mock_random_weighted_choice(items, weight_attribute='weight'): # pylint: disable=unused-argument """Mock random_weighted_choice.""" # Always select the first element rather than a random one for the sake of # determinism. return items[0]
Creates many jobs.
def _create_many(): """Creates many jobs.""" many = [None for _ in range(2000)] with multiprocessing.pool.Pool(120) as pool: pool.map(_send_test_job, many)
Creates a test batch job for local manual testing to ensure job creation actually works.
def _send_test_job(_=None, get_config_directory=None, get_job=None): """Creates a test batch job for local manual testing to ensure job creation actually works.""" del _ del get_config_directory del get_job tasks = [ batch.BatchTask('variant', 'libfuzzer_chrome_asan', 'https://fake/') for _ in range(10) ] batch.create_uworker_main_batch_jobs(tasks)
Convert hour to index according to the SQL.
def bq_convert_hour_to_index(hour, time_span, remainder): """Convert hour to index according to the SQL.""" return (hour - remainder) // time_span
Sanitize the sql by removing all new lines and surrounding whitespace.
def sanitize_sql(s): """Sanitize the sql by removing all new lines and surrounding whitespace.""" s = re.sub('[ \\s\n\r]*\n[ \\s\n\r]*', ' ', s, flags=re.MULTILINE) s = re.sub('\\([ \t]+', '(', s) s = re.sub('[ \t]+\\)', ')', s) return s.strip()
No-op mocked version of symbolize_stacktrace.
def _mock_symbolize_stacktrace(stacktrace, enable_inline_frames=True): """No-op mocked version of symbolize_stacktrace.""" return stacktrace
No-op mocked version of symbolize_stacktrace.
def _mock_symbolize_stacktrace(stacktrace, enable_inline_frames=True): """No-op mocked version of symbolize_stacktrace.""" return stacktrace
Create a mock popen.
def mock_popen_factory(execute_time, output, sigterm_handler_time, return_code=0): """Create a mock popen.""" class MockPopen: """Mock subprocess.Popen.""" received_signals = [] def __init__(self, *args, **kwargs): """Inits the MockPopen.""" self.start_time = time.time() self.signal_queue = queue.Queue() def poll(self): """Mock subprocess.Popen.poll.""" if time.time() >= self.start_time + execute_time: return return_code return None def kill(self): """Mock subprocess.Popen.kill.""" self.signal_queue.put(Signal.KILL) def terminate(self): """Mock subprocess.Popen.terminate.""" self.signal_queue.put(Signal.TERM) def communicate(self, input_data=None): """Mock subprocess.Popen.communicate.""" for i in range(2): timeout = execute_time if i == 0 else sigterm_handler_time try: received_signal = self.signal_queue.get(block=True, timeout=timeout) except queue.Empty: continue self.received_signals.append((received_signal, time.time() - self.start_time)) if received_signal == Signal.KILL: break return output, None return MockPopen
Returns a simple issue object for use in tests.
def create_generic_issue(created_days_ago=28): """Returns a simple issue object for use in tests.""" issue = Issue() issue.cc = [] issue.comment = '' issue.comments = [] issue.components = [] issue.labels = [] issue.open = True issue.owner = '[email protected]' issue.status = 'Assigned' issue.id = 1 issue.itm = create_issue_tracker_manager() # Test issue was created 1 week before the current (mocked) time. issue.created = ( test_utils.CURRENT_TIME - datetime.timedelta(days=created_days_ago)) return monorail.Issue(issue)
Return a simple comment used for testing.
def create_generic_issue_comment(comment_body='Comment.', author='[email protected]', days_ago=21, labels=None): """Return a simple comment used for testing.""" comment = Comment() comment.comment = comment_body comment.author = author comment.created = test_utils.CURRENT_TIME - datetime.timedelta(days=days_ago) comment.labels = labels if comment.labels is None: comment.labels = [] return comment
Create a fake issue tracker manager.
def create_issue_tracker_manager(): """Create a fake issue tracker manager.""" class FakeIssueTrackerManager: """Mock issue tracker manager.""" def __init__(self): self.project_name = 'test-project' self.issues = {} self.next_id = 1 def get_issue(self, issue_id): """Get original issue.""" issue = self.issues.get(issue_id) if not issue: return None issue.itm = self return issue def get_comments(self, issue): # pylint: disable=unused-argument """Return an empty comment list.""" return [] def save(self, issue, *args, **kwargs): # pylint: disable=unused-argument """Save an issue.""" if issue.new: issue.id = self.next_id issue.new = False self.next_id += 1 self.issues[issue.id] = issue return FakeIssueTrackerManager()
Patch names and add them as attributes to testcase_obj. For example, `patch(obj, ['a.b.function', ('function2', 'c.d.method')])` adds the attributes `mock.function` and `mock.function2` to `obj`. To provide a replacement function for a mocked one, use `side_effect` attribute, for example: `self.mock.function.side_effect = replacementFunctionForTests.`
def patch(testcase_obj, names): """Patch names and add them as attributes to testcase_obj. For example, `patch(obj, ['a.b.function', ('function2', 'c.d.method')])` adds the attributes `mock.function` and `mock.function2` to `obj`. To provide a replacement function for a mocked one, use `side_effect` attribute, for example: `self.mock.function.side_effect = replacementFunctionForTests.` """ if not hasattr(testcase_obj, 'mock'): setattr(testcase_obj, 'mock', _Object()) for name in names: if isinstance(name, tuple): attr_name = name[0] full_path = name[1] else: attr_name = name.split('.')[-1] full_path = name patcher = mock.patch(full_path, autospec=True) testcase_obj.addCleanup(patcher.stop) setattr(testcase_obj.mock, attr_name, patcher.start()) patched = getattr(testcase_obj.mock, attr_name) # A class doesn't have __name__. We need to set __name__ for a method # because we use it when monkey-patching. if '__name__' in dir(patched): setattr(patched, '__name__', attr_name)
Patch environment.
def patch_environ(testcase_obj, env=None): """Patch environment.""" if env is None: env = {} patcher = mock.patch.dict(os.environ, env) testcase_obj.addCleanup(patcher.stop) patcher.start()
Create a simple test case.
def create_generic_testcase(created_days_ago=28): """Create a simple test case.""" testcase = data_types.Testcase() # Add more values here as needed. Intended to be the bare minimum for what we # need to simulate a test case. testcase.absolute_path = '/a/b/c/test.html' testcase.crash_address = '0xdeadbeef' testcase.crash_revision = 1 testcase.crash_state = 'crashy_function()' testcase.crash_stacktrace = testcase.crash_state testcase.crash_type = 'fake type' testcase.comments = 'Fuzzer: test' testcase.fuzzed_keys = 'abcd' testcase.minimized_keys = 'efgh' testcase.fuzzer_name = 'fuzzer1' testcase.open = True testcase.one_time_crasher_flag = False testcase.job_type = 'test_content_shell_drt' testcase.status = 'Processed' testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago) testcase.project_name = 'project' testcase.platform = 'linux' testcase.put() return testcase
Create a simple test case variant.
def create_generic_testcase_variant(): """Create a simple test case variant.""" variant = data_types.TestcaseVariant() variant.job_type = 'test_variant_job_type' variant.crash_type = 'crash type 1' variant.crash_state = 'variant_function()' variant.security_flag = True variant.put() return variant
Return a bool on whether two input entities are the same.
def entities_equal(entity_1, entity_2, check_key=True): """Return a bool on whether two input entities are the same.""" if check_key: return entity_1.key == entity_2.key return entity_1.to_dict() == entity_2.to_dict()
Return a bool on where the entity exists in datastore.
def entity_exists(entity): """Return a bool on where the entity exists in datastore.""" return entity.get_by_id(entity.key.id())
Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before merging and are NOT counted toward test coverage; they are used to test tricky situations. Another way to think about it is that, if there was no adhoc test, we would write a Python script (which is not checked in) to test what we want anyway... so, it's better to check in the script. For example, downloading a chrome revision (10GB) and unpacking it. It can be enabled using the env ADHOC=1.
def adhoc(func): """Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before merging and are NOT counted toward test coverage; they are used to test tricky situations. Another way to think about it is that, if there was no adhoc test, we would write a Python script (which is not checked in) to test what we want anyway... so, it's better to check in the script. For example, downloading a chrome revision (10GB) and unpacking it. It can be enabled using the env ADHOC=1.""" return unittest.skipIf(not environment.get_value('ADHOC', False), 'Adhoc tests are not enabled.')( func)
Mark the testcase as integration because it depends on network resources and/or is slow. The integration tests should, at least, be run before merging and are counted toward test coverage. It can be enabled using the env INTEGRATION=1.
def integration(func): """Mark the testcase as integration because it depends on network resources and/or is slow. The integration tests should, at least, be run before merging and are counted toward test coverage. It can be enabled using the env INTEGRATION=1.""" return unittest.skipIf(not environment.get_value('INTEGRATION', False), 'Integration tests are not enabled.')( func)
Slow tests which are skipped during presubmit.
def slow(func): """Slow tests which are skipped during presubmit.""" return unittest.skipIf(not environment.get_value('SLOW_TESTS', True), 'Skipping slow tests.')( func)
Skip Android-specific tests if we cannot run them.
def android_device_required(func): """Skip Android-specific tests if we cannot run them.""" reason = None if not environment.get_value('ANDROID_SERIAL'): reason = 'Android device tests require that ANDROID_SERIAL is set.' elif not environment.get_value('INTEGRATION'): reason = 'Integration tests are not enabled.' elif environment.platform() != 'LINUX': reason = 'Android device tests can only run on a Linux host.' return unittest.skipIf(reason is not None, reason)(func)
Find a free port.
def _find_free_port(): """Find a free port.""" sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) _, port = sock.getsockname() sock.close() return port
Wait for emulator to be ready.
def wait_for_emulator_ready(proc, emulator, indicator, timeout=EMULATOR_TIMEOUT, output_lines=None): """Wait for emulator to be ready.""" if output_lines is None: output_lines = [] def _read_thread(proc, ready_event): """Thread to continuously read from the process stdout.""" ready = False while True: line = proc.stdout.readline() if not line: break if output_lines is not None: output_lines.append(line) if not ready and indicator in line: ready = True ready_event.set() # Wait for process to become ready. ready_event = threading.Event() thread = threading.Thread(target=_read_thread, args=(proc, ready_event)) thread.daemon = True thread.start() if not ready_event.wait(timeout): output = b'\n'.join(output_lines).decode() raise RuntimeError( f'{emulator} emulator did not get ready in time: {output}.') return thread
Start a cloud emulator.
def start_cloud_emulator(emulator, args=None, data_dir=None, store_on_disk=False): """Start a cloud emulator.""" ready_indicators = { 'datastore': b'is now running', 'pubsub': b'Server started', } store_on_disk_flag = ('--store-on-disk' if store_on_disk else '--no-store-on-disk') default_flags = { 'datastore': [store_on_disk_flag, '--consistency=1'], 'pubsub': [], } if emulator not in ready_indicators: raise RuntimeError('Unsupported emulator') if data_dir: cleanup_dir = None else: temp_dir = tempfile.mkdtemp() data_dir = temp_dir cleanup_dir = temp_dir port = _find_free_port() command = [ 'gcloud', 'beta', 'emulators', emulator, 'start', '--data-dir=' + data_dir, '--host-port=localhost:' + str(port), '--project=' + local_config.GAEConfig().get('application_id') ] if args: command.extend(args) command.extend(default_flags[emulator]) # Start emulator. proc = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) thread = wait_for_emulator_ready(proc, emulator, ready_indicators[emulator]) # Set env vars. env_vars = subprocess.check_output([ 'gcloud', 'beta', 'emulators', emulator, 'env-init', '--data-dir=' + data_dir ]) for line in env_vars.splitlines(): key, value = line.split()[1].split(b'=') os.environ[key.strip().decode('utf-8')] = value.strip().decode('utf-8') return EmulatorInstance(proc, port, thread, cleanup_dir)
Create topic if it doesn't exist.
def create_pubsub_topic(client, project, name): """Create topic if it doesn't exist.""" full_name = pubsub.topic_name(project, name) if client.get_topic(full_name): return client.create_topic(full_name)
Create subscription if it doesn't exist.
def create_pubsub_subscription(client, project, topic, name): """Create subscription if it doesn't exist.""" topic_name = pubsub.topic_name(project, topic) full_name = pubsub.subscription_name(project, name) if client.get_subscription(full_name): return client.create_subscription(full_name, topic_name)
Set up pubsub topics and subscriptions.
def setup_pubsub(project): """Set up pubsub topics and subscriptions.""" config = local_config.Config('pubsub.queues') client = pubsub.PubSubClient() queues = config.get('resources') for queue in queues: create_pubsub_topic(client, project, queue['name']) create_pubsub_subscription(client, project, queue['name'], queue['name'])
Decorator for starting cloud emulators from a unittest.TestCase.
def with_cloud_emulators(*emulator_names): """Decorator for starting cloud emulators from a unittest.TestCase.""" def decorator(cls): """Decorator.""" class Wrapped(cls): """Wrapped class.""" @classmethod def setUpClass(cls): """Class setup.""" for emulator_name in emulator_names: if emulator_name not in _emulators: _emulators[emulator_name] = start_cloud_emulator(emulator_name) atexit.register(_emulators[emulator_name].cleanup) if emulator_name == 'datastore': cls._context_generator = ndb_init.context() cls._context_generator.__enter__() super().setUpClass() @classmethod def tearDownClass(cls): """Class teardown.""" for emulator_name in emulator_names: if emulator_name == 'datastore': cls._context_generator.__exit__(None, None, None) super().tearDownClass() def setUp(self): for emulator in _emulators.values(): emulator.reset() super().setUp() Wrapped.__module__ = cls.__module__ Wrapped.__name__ = cls.__name__ return Wrapped return decorator
Helper to set up Pyfakefs.
def set_up_pyfakefs(test_self, allow_root_user=True): """Helper to set up Pyfakefs.""" real_cwd = os.path.realpath(os.getcwd()) config_dir = os.path.realpath(environment.get_config_directory()) test_self.setUpPyfakefs(allow_root_user=allow_root_user) test_self.fs.add_real_directory(config_dir, lazy_read=False) os.chdir(real_cwd)
Decorator for enabling tests only on certain platforms.
def supported_platforms(*platforms): """Decorator for enabling tests only on certain platforms.""" def decorator(func): # pylint: disable=unused-argument """Decorator.""" return unittest.skipIf(environment.platform() not in platforms, 'Unsupported platform.')( func) return decorator
Start an untrusted process.
def untrusted_process(): """Start an untrusted process.""" os.environ['BOT_NAME'] = 'localhost' untrusted.start_server()
Return path to directory for bot and server data.
def _test_data_dir(): """Return path to directory for bot and server data.""" root_dir = os.environ['ROOT_DIR'] return os.path.join(root_dir, '_test_data')
Start test bot.
def _create_test_bot(): """Start test bot.""" # TODO(ochang): Use Docker container instead. bot_path = os.path.join(_test_data_dir(), 'worker_bot') if os.path.exists(bot_path): shutil.rmtree(bot_path, ignore_errors=True) env = os.environ.copy() env['UNTRUSTED_WORKER'] = 'True' env['BOT_NAME'] = 'localhost' bot_proc = subprocess.Popen( ['python', 'butler.py', 'run_bot', bot_path], env=env) return bot_proc, os.path.join(bot_path, 'clusterfuzz')
Create test ROOT_DIR for the trusted host.
def _create_test_root(): """Create test ROOT_DIR for the trusted host.""" root_path = os.path.join(_test_data_dir(), 'test_root') if os.path.exists(root_path): shutil.rmtree(root_path, ignore_errors=True) real_root = os.environ['ROOT_DIR'] os.makedirs(root_path) # TODO(ochang): Make sure we don't copy files that aren't tracked in git. shutil.copytree( os.path.join(real_root, 'bot'), os.path.join(root_path, 'bot')) shutil.copytree( os.path.join(real_root, 'resources'), os.path.join( root_path, 'resources')) os.mkdir(os.path.join(root_path, 'src')) shutil.copytree( os.path.join(real_root, 'src', 'appengine'), os.path.join(root_path, 'src', 'appengine')) shutil.copytree( os.path.join(real_root, 'src', 'python'), os.path.join(root_path, 'src', 'python')) shutil.copytree( os.path.join(real_root, 'src', 'clusterfuzz'), os.path.join(root_path, 'src', 'clusterfuzz')) shutil.copytree( os.path.join(real_root, 'src', 'third_party'), os.path.join(root_path, 'src', 'third_party')) return root_path
Return full path to |prog| (based on $PATH).
def _which(prog): """Return full path to |prog| (based on $PATH).""" for path in os.getenv('PATH', '').split(':'): candidate = os.path.join(path, prog) if os.path.exists(candidate): return candidate return None
Add environment variables to yaml file if necessary.
def _add_env_vars_if_needed(yaml_path, additional_env_vars): """Add environment variables to yaml file if necessary.""" # Defer imports since our python paths have to be set up first. import yaml from src.clusterfuzz._internal.config import local_config env_values = local_config.ProjectConfig().get('env') if additional_env_vars: env_values.update(additional_env_vars) if not env_values: return with open(yaml_path) as f: data = yaml.safe_load(f) if not isinstance(data, dict) or 'service' not in data: # Not a service. return data.setdefault('env_variables', {}).update(env_values) with open(yaml_path, 'w') as f: yaml.safe_dump(data, f)
Copy paths to appengine source directories since they reference sources and otherwise, deployment fails.
def copy_yamls_and_preprocess(paths, additional_env_vars=None): """Copy paths to appengine source directories since they reference sources and otherwise, deployment fails.""" rebased_paths = [] for path in paths: target_filename = os.path.basename(path) rebased_path = os.path.join(SRC_DIR_PY, target_filename) # Remove target in case it's a symlink, since shutil.copy follows symlinks. if os.path.exists(rebased_path): os.remove(rebased_path) shutil.copy(path, rebased_path) os.chmod(rebased_path, 0o600) _add_env_vars_if_needed(rebased_path, additional_env_vars) rebased_paths.append(rebased_path) return rebased_paths
Find the App Engine SDK path.
def find_sdk_path(): """Find the App Engine SDK path.""" if common.get_platform() == 'windows': _, gcloud_path = common.execute('where gcloud.cmd', print_output=False) else: gcloud_path = shutil.which('gcloud') if not gcloud_path: print('Please install the Google Cloud SDK and set up PATH to point to it.') sys.exit(1) cloud_sdk_path = os.path.dirname( os.path.dirname(os.path.realpath(gcloud_path))) appengine_sdk_path = os.path.join(cloud_sdk_path, 'platform', 'google_appengine') if not os.path.exists(appengine_sdk_path): print('App Engine SDK not found. Please run local/install_deps.bash') sys.exit(1) return appengine_sdk_path
Symlink folders for use on appengine.
def symlink_dirs(): """Symlink folders for use on appengine.""" symlink_config_dir() common.symlink( src=os.path.join('src', 'clusterfuzz'), target=os.path.join(SRC_DIR_PY, 'clusterfuzz')) # Remove existing local_gcs symlink (if any). This is important, as otherwise # we will try deploying the directory in production. This is only needed for # local development in run_server. local_gcs_symlink_path = os.path.join(SRC_DIR_PY, 'local_gcs') common.remove_symlink(local_gcs_symlink_path)
Build template files used in appengine.
def build_templates(): """Build template files used in appengine.""" common.execute('python polymer_bundler.py', cwd='local')
Symlink config directory in appengine directory.
def symlink_config_dir(): """Symlink config directory in appengine directory.""" config_dir = os.getenv('CONFIG_DIR_OVERRIDE', constants.TEST_CONFIG_DIR) common.symlink(src=config_dir, target=os.path.join(SRC_DIR_PY, 'config'))
Convert an app engine location ID to a region.
def region_from_location(location): """Convert an app engine location ID to a region.""" if not location[-1].isdigit(): # e.g. us-central -> us-central1 location += '1' return location
Get the App Engine region.
def region(project): """Get the App Engine region.""" return_code, location = common.execute( 'gcloud app describe --project={project} ' '--format="value(locationId)"'.format(project=project)) if return_code: raise RuntimeError('Could not get App Engine region') return region_from_location(location.strip().decode('utf-8'))
Install all required dependencies for running tests, the appengine, and the bot.
def execute(args): """Install all required dependencies for running tests, the appengine, and the bot.""" del args common.install_dependencies() appengine.symlink_dirs() print('Bootstrap successfully finished.')
Cleanup indexes.
def _cleanup_indexes(project, index_yaml_path): """Cleanup indexes.""" common.execute(('gcloud datastore indexes cleanup ' '--quiet --project {project} {index_yaml_path}').format( project=project, index_yaml_path=index_yaml_path))
Clean indexes.
def execute(args): """Clean indexes.""" if not os.path.exists(args.config_dir): print('Please provide a valid configuration directory.') sys.exit(1) os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir config = local_config.GAEConfig() application_id = config.get('application_id') _cleanup_indexes(application_id, INDEX_FILE_PATH)
Run a command and handle its error output.
def _run_and_handle_exception(arguments, exception_class): """Run a command and handle its error output.""" print('Running:', ' '.join(shlex.quote(arg) for arg in arguments)) try: return subprocess.check_output(arguments) except subprocess.CalledProcessError as e: raise exception_class(e.output)
We need this method for mocking.
def _utcnow(): """We need this method for mocking.""" return datetime.datetime.utcnow()
Staging revision adds 2 days to timestamp and append 'staging'.
def compute_staging_revision(): """Staging revision adds 2 days to timestamp and append 'staging'.""" return _compute_revision(_utcnow() + datetime.timedelta(days=2), 'staging')
Get prod revision.
def compute_prod_revision(): """Get prod revision.""" return _compute_revision(_utcnow())
Return a revision that contains a timestamp, git-sha, user, and is_staging. The ordinality of revision is crucial for updating source code. Later revision *must* be greater than earlier revision. See: crbug.com/674173.
def _compute_revision(timestamp, *extras): """Return a revision that contains a timestamp, git-sha, user, and is_staging. The ordinality of revision is crucial for updating source code. Later revision *must* be greater than earlier revision. See: crbug.com/674173.""" timestamp = timestamp.strftime('%Y%m%d%H%M%S-utc') _, git_sha = execute('git rev-parse --short HEAD') git_sha = git_sha.strip().decode('utf-8') components = [timestamp, git_sha, os.environ['USER']] + list(extras) return '-'.join(components)
Print output of process line by line. Returns the whole output.
def process_proc_output(proc, print_output=True): """Print output of process line by line. Returns the whole output.""" def _print(s): if print_output: print(s) lines = [] for line in iter(proc.stdout.readline, b''): _print(f'| {line.rstrip().decode("utf-8")}') lines.append(line) return b''.join(lines)
Execute a bash command asynchronously. Returns a subprocess.Popen.
def execute_async(command, extra_environments=None, cwd=None, stderr=subprocess.STDOUT): """Execute a bash command asynchronously. Returns a subprocess.Popen.""" environments = os.environ.copy() if extra_environments is not None: environments.update(extra_environments) return subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=stderr, env=environments, cwd=cwd)
Execute a bash command.
def execute(command, print_output=True, exit_on_error=True, extra_environments=None, cwd=None, stderr=subprocess.STDOUT): """Execute a bash command.""" def _print(s): if print_output: print(s) print_string = f'Running: {command}' if cwd: print_string += f' (cwd="{cwd}")' _print(print_string) proc = execute_async(command, extra_environments, cwd=cwd, stderr=stderr) output = process_proc_output(proc, print_output) proc.wait() if proc.returncode != 0: _print(f'| Return code is non-zero ({proc.returncode}).') if exit_on_error: _print('| Exit.') sys.exit(proc.returncode) return (proc.returncode, output)
Kill the process by its name.
def kill_process(name): """Kill the process by its name.""" plt = get_platform() if plt == 'windows': execute( 'wmic process where (commandline like "%%%s%%") delete' % name, exit_on_error=False) elif plt in ['linux', 'macos']: execute(f'pkill -KILL -f "{name}"', exit_on_error=False)
Check if git is dirty.
def is_git_dirty(): """Check if git is dirty.""" _, output = execute('git status --porcelain') return output
Return path to chromedriver binary.
def get_chromedriver_path(): """Return path to chromedriver binary.""" if get_platform() == 'windows': chromedriver_binary = 'chromedriver.exe' binary_directory = 'Scripts' else: chromedriver_binary = 'chromedriver' binary_directory = 'bin' return os.path.join(os.environ['ROOT_DIR'], 'ENV', binary_directory, chromedriver_binary)
Install the latest chromedriver binary in the virtualenv.
def _install_chromedriver(): """Install the latest chromedriver binary in the virtualenv.""" # Download a file containing the version number of the latest release. version_request = urllib.request.urlopen(constants.CHROMEDRIVER_VERSION_URL) version = version_request.read().decode() plt = get_platform() if plt == 'linux': archive_name = 'chromedriver_linux64.zip' elif plt == 'macos': archive_name = 'chromedriver_mac64.zip' elif plt == 'windows': archive_name = 'chromedriver_win32.zip' archive_request = urllib.request.urlopen( constants.CHROMEDRIVER_DOWNLOAD_PATTERN.format( version=version, archive_name=archive_name)) archive_io = io.BytesIO(archive_request.read()) chromedriver_archive = zipfile.ZipFile(archive_io) chromedriver_path = get_chromedriver_path() output_directory = os.path.dirname(chromedriver_path) chromedriver_binary = os.path.basename(chromedriver_path) chromedriver_archive.extract(chromedriver_binary, output_directory) os.chmod(chromedriver_path, 0o750) print(f'Installed chromedriver at: {chromedriver_path}')
Get the pip binary name.
def _pip(): """Get the pip binary name.""" return 'pip3'
Output a requirements.txt given a locked Pipfile.
def _pipfile_to_requirements(pipfile_dir, requirements_path, dev=False): """Output a requirements.txt given a locked Pipfile.""" dev_arg = '' if dev: dev_arg = '--dev' return_code, output = execute( f'python -m pipenv requirements {dev_arg}', exit_on_error=False, cwd=pipfile_dir, extra_environments={'PIPENV_IGNORE_VIRTUALENVS': '1'}, stderr=subprocess.DEVNULL) if return_code != 0: # Older pipenv version. return_code, output = execute( f'python -m pipenv lock -r --no-header {dev_arg}', exit_on_error=False, cwd=pipfile_dir, extra_environments={'PIPENV_IGNORE_VIRTUALENVS': '1'}, stderr=subprocess.DEVNULL) if return_code != 0: raise RuntimeError('Failed to generate requirements from Pipfile.') with open(requirements_path, 'wb') as f: f.write(output)
Perform pip install using requirements_path onto target_path.
def _install_pip(requirements_path, target_path): """Perform pip install using requirements_path onto target_path.""" if os.path.exists(target_path): shutil.rmtree(target_path) execute( '{pip} install -r {requirements_path} --upgrade --target {target_path}'. format( pip=_pip(), requirements_path=requirements_path, target_path=target_path))
Install platform specific pip packages.
def _install_platform_pip(requirements_path, target_path, platform_name): """Install platform specific pip packages.""" pip_platform = constants.PLATFORMS.get(platform_name) if not pip_platform: raise OSError(f'Unknown platform: {platform_name}.') # Some platforms can specify multiple pip platforms (e.g. macOS has multiple # SDK versions). if isinstance(pip_platform, str): pip_platforms = (pip_platform,) else: assert isinstance(pip_platform, tuple) pip_platforms = pip_platform pip_abi = constants.ABIS[platform_name] for pip_platform in pip_platforms: temp_dir = tempfile.mkdtemp() return_code, _ = execute( f'{_pip()} download --no-deps --only-binary=:all: ' f'--platform={pip_platform} --abi={pip_abi} -r {requirements_path} -d ' f'{temp_dir}', exit_on_error=False) if return_code != 0: print(f'Did not find package for platform: {pip_platform}') continue execute(f'unzip -o -d {target_path} "{temp_dir}/*.whl"') shutil.rmtree(temp_dir, ignore_errors=True) break if return_code != 0: raise RuntimeError( f'Failed to find package in supported platforms: {pip_platforms}')