response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Remove invalid file whose filename is invalid to appengine.
def _remove_invalid_files(): """Remove invalid file whose filename is invalid to appengine.""" for name in INVALID_FILENAMES: if os.path.exists(name): os.remove(name)
Install dependencies for bots.
def install_dependencies(platform_name=None): """Install dependencies for bots.""" _pipfile_to_requirements('src', 'src/requirements.txt') # Hack: Use "dev-packages" to specify App Engine only packages. _pipfile_to_requirements('src', 'src/appengine/requirements.txt', dev=True) _install_pip('src/requirements.txt', 'src/third_party') if platform_name: _install_platform_pip( 'src/platform_requirements.txt', 'src/third_party', platform_name=platform_name) _install_pip('src/appengine/requirements.txt', 'src/appengine/third_party') _remove_invalid_files() execute('bower install --allow-root') _install_chromedriver()
Removes a symlink.
def remove_symlink(target): """Removes a symlink.""" if not os.path.exists(target): return if os.path.isdir(target) and get_platform() == 'windows': os.rmdir(target) else: os.remove(target)
Create the target to link to the src.
def symlink(src, target): """Create the target to link to the src.""" src = os.path.abspath(src) target = os.path.abspath(target) remove_symlink(target) if get_platform() == 'windows': execute(rf'cmd /c mklink /j {target} {src}') else: os.symlink(src, target) assert os.path.exists(target), f'Failed to create {target} symlink for {src}.' print(f'Created symlink: source: {src}, target {target}.')
Copy directory.
def copy_dir(src, target): """Copy directory.""" if os.path.exists(target): shutil.rmtree(target, ignore_errors=True) shutil.copytree(src, target)
Check to see if filename exists in the user's PATH.
def has_file_in_path(filename): """Check to see if filename exists in the user's PATH.""" path = os.getenv('PATH') for path_component in path.split(':'): if os.path.isfile(os.path.join(path_component, filename)): return True return False
Returns a list of all files recursively under a given directory.
def get_all_files(directory): """Returns a list of all files recursively under a given directory.""" all_files = [] for root, _, files in os.walk(directory): for filename in files: filepath = os.path.join(root, filename) all_files.append(filepath) return all_files
Get the integration test bucket.
def test_bucket(env_var): """Get the integration test bucket.""" bucket = os.getenv(env_var) if not bucket: raise RuntimeError(f'You need to specify {env_var} for integration testing') return bucket
Kill leftover instances of cloud emulators and dev_appserver.
def kill_leftover_emulators(): """Kill leftover instances of cloud emulators and dev_appserver.""" kill_process('dev_appserver.py') kill_process('CloudDatastore.jar') kill_process('pubsub-emulator') kill_process('run_bot')
Get the platform.
def get_platform(): """Get the platform.""" if platform.system() == 'Linux': return 'linux' if platform.system() == 'Darwin': return 'macos' if platform.system() == 'Windows': return 'windows' raise OSError(f'Unknown platform: {platform.system()}.')
Recursively copy from src_dir to dst_dir, replacing files but only if they're newer or don't exist.
def update_dir(src_dir, dst_dir): """Recursively copy from src_dir to dst_dir, replacing files but only if they're newer or don't exist.""" # TODO(metzman): Replace this with # shutil.copytree(src_dir, dst_dir, copy_function=copy_if_newer) # After we migrate to python3.9. dir_util.copy_tree(src_dir, dst_dir, update=True)
Get the numeric project ID.
def get_numeric_project_id(gcloud, project_id): """Get the numeric project ID.""" project_info = json.loads( gcloud.run('projects', 'describe', project_id, '--format=json')) return project_info['projectNumber']
Get the default App Engine service account.
def app_engine_service_account(project_id): """Get the default App Engine service account.""" return project_id + '@appspot.gserviceaccount.com'
Get the default compute engine service account.
def compute_engine_service_account(gcloud, project_id): """Get the default compute engine service account.""" return (get_numeric_project_id(gcloud, project_id) + '[email protected]')
Enable required services.
def enable_services(gcloud): """Enable required services.""" for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE): end = i + _ENABLE_SERVICE_BATCH_SIZE gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])
Replace contents of a file.
def replace_file_contents(file_path, replacements): """Replace contents of a file.""" with open(file_path, encoding='utf-8') as f: old_contents = f.read() contents = old_contents for find, replace in replacements: contents = contents.replace(find, replace) if contents == old_contents: return with open(file_path, 'w', encoding='utf-8') as f: f.write(contents)
Return a project-specific bucket name.
def project_bucket(project_id, bucket_name): """Return a project-specific bucket name.""" return f'{bucket_name}.{project_id}.appspot.com'
Create a new config directory.
def create_new_config(gcloud, project_id, new_config_dir, domain_verification_tag, bucket_replacements, gae_location, gce_zone, firebase_api_key): """Create a new config directory.""" if os.path.exists(new_config_dir): print('Overwriting existing directory.') shutil.rmtree(new_config_dir) gae_region = appengine.region_from_location(gae_location) replacements = [ ('test-clusterfuzz-service-account-email', compute_engine_service_account(gcloud, project_id)), ('test-clusterfuzz', project_id), ('test-project', project_id), ('domain-verification-tag', domain_verification_tag), ('gae-region', gae_region), ('gce-zone', gce_zone), ('firebase-api-key', firebase_api_key), ] replacements.extend(bucket_replacements) shutil.copytree(os.path.join('configs', 'test'), new_config_dir) for root_dir, _, filenames in os.walk(new_config_dir): for filename in filenames: file_path = os.path.join(root_dir, filename) replace_file_contents(file_path, replacements)
Deploy to App Engine.
def deploy_appengine(gcloud, config_dir, appengine_location): """Deploy to App Engine.""" try: gcloud.run('app', 'describe') except common.GcloudError: # Create new App Engine app if it does not exist. gcloud.run('app', 'create', '--region=' + appengine_location) subprocess.check_call([ 'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine', '--prod', '--config-dir', config_dir ])
Deploy source zips.
def deploy_zips(config_dir): """Deploy source zips.""" subprocess.check_call([ 'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod', '--config-dir', config_dir ])
Create buckets.
def create_buckets(project_id, buckets): """Create buckets.""" gsutil = common.Gsutil() for bucket in buckets: try: gsutil.run('defstorageclass', 'get', 'gs://' + bucket) except common.GsutilError: # Create the bucket if it does not exist. gsutil.run('mb', '-p', project_id, 'gs://' + bucket)
Sets cors settings.
def set_cors(config_dir, buckets): """Sets cors settings.""" gsutil = common.Gsutil() cors_file_path = os.path.join(config_dir, 'gae', 'cors.json') for bucket in buckets: gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)
Add an IAM role to a service account.
def add_service_account_role(gcloud, project_id, service_account, role): """Add an IAM role to a service account.""" gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member', 'serviceAccount:' + service_account, '--role', role)
Create a new config directory and deployment.
def execute(args): """Create a new config directory and deployment.""" # Check this early on, as the deployment at the end would fail otherwise. if common.is_git_dirty(): print('Your checkout contains uncommitted changes. Cannot proceed.') sys.exit(1) verifier = DomainVerifier(args.oauth_client_secrets_path) gcloud = common.Gcloud(args.project_id) enable_services(gcloud) # Get tag for domain verification. appspot_domain = 'https://' + args.project_id + '.appspot.com/' domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain) blobs_bucket = project_bucket(args.project_id, 'blobs') deployment_bucket = project_bucket(args.project_id, 'deployment') bucket_replacements = ( ('test-blobs-bucket', blobs_bucket), ('test-deployment-bucket', deployment_bucket), ('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')), ('test-backup-bucket', project_bucket(args.project_id, 'backup')), ('test-coverage-bucket', project_bucket(args.project_id, 'coverage')), ('test-fuzzer-logs-bucket', project_bucket(args.project_id, 'fuzzer-logs')), ('test-corpus-bucket', project_bucket(args.project_id, 'corpus')), ('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')), ('test-shared-corpus-bucket', project_bucket(args.project_id, 'shared-corpus')), ('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')), ) # Write new configs. create_new_config(gcloud, args.project_id, args.new_config_dir, domain_verification_tag, bucket_replacements, args.appengine_location, args.gce_zone, args.firebase_api_key) prev_dir = os.getcwd() os.chdir(args.new_config_dir) # Deploy App Engine and finish verification of domain. os.chdir(prev_dir) deploy_appengine( gcloud, args.new_config_dir, appengine_location=args.appengine_location) verifier.verify(appspot_domain) # App Engine service account requires: # - Domain ownership to create domain namespaced GCS buckets # - Datastore export permission for periodic backups. # - Service account signing permission for GCS uploads. service_account = app_engine_service_account(args.project_id) verifier.add_owner(appspot_domain, service_account) add_service_account_role(gcloud, args.project_id, service_account, 'roles/datastore.importExportAdmin') add_service_account_role(gcloud, args.project_id, service_account, 'roles/iam.serviceAccountTokenCreator') # Create buckets now that domain is verified. create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements]) # Set CORS settings on the buckets. set_cors(args.new_config_dir, [blobs_bucket]) # Set deployment bucket for the cloud project. gcloud.run('compute', 'project-info', 'add-metadata', '--metadata=deployment-bucket=' + deployment_bucket) # Deploy source zips. deploy_zips(args.new_config_dir)
Used for mocks.
def now(): """Used for mocks.""" return datetime.datetime.now()
Get list of services from deployment yamls.
def _get_services(paths): """Get list of services from deployment yamls.""" services = [] for path in paths: for line in open(path): match = SERVICE_REGEX.search(line) if match: matched_service = match.group(1) if matched_service not in services: services.append(matched_service) break return services
Get the redis IP address.
def _get_redis_ip(project): """Get the redis IP address.""" region = appengine.region(project) return_code, ip = common.execute( 'gcloud redis instances describe redis-instance ' '--project={project} --region={region} ' '--format="value(host)"'.format(project=project, region=region)) if return_code: raise RuntimeError('Failed to get redis IP.') return ip.decode('utf-8').strip()
Additional environment variables to include for App Engine.
def _additional_app_env_vars(project): """Additional environment variables to include for App Engine.""" return { 'REDIS_HOST': _get_redis_ip(project), }
Deploy app in production.
def _deploy_app_prod(project, deployment_bucket, yaml_paths, package_zip_paths, deploy_appengine=True, test_deployment=False): """Deploy app in production.""" if deploy_appengine: services = _get_services(yaml_paths) rebased_yaml_paths = appengine.copy_yamls_and_preprocess( yaml_paths, _additional_app_env_vars(project)) _deploy_appengine( project, [INDEX_YAML_PATH] + rebased_yaml_paths, stop_previous_version=False) for path in rebased_yaml_paths: os.remove(path) for service in services: _delete_old_versions(project, service, VERSION_DELETE_WINDOW_MINUTES) if package_zip_paths: for package_zip_path in package_zip_paths: _deploy_zip( deployment_bucket, package_zip_path, test_deployment=test_deployment) _deploy_manifest( deployment_bucket, constants.PACKAGE_TARGET_MANIFEST_PATH, test_deployment=test_deployment)
Deploy app in staging.
def _deploy_app_staging(project, yaml_paths): """Deploy app in staging.""" services = _get_services(yaml_paths) rebased_yaml_paths = appengine.copy_yamls_and_preprocess( yaml_paths, _additional_app_env_vars(project)) _deploy_appengine(project, rebased_yaml_paths, stop_previous_version=True) for path in rebased_yaml_paths: os.remove(path) for service in services: _delete_old_versions(project, service, 0)
Return the versions that should be deleted.
def _versions_to_delete(versions, window): """Return the versions that should be deleted.""" # gcloud app versions list returns local time. cutoff = now() - datetime.timedelta(minutes=window) # Don't delete any versions that stopped serving within # |window| minutes before now (or the latest one, since that's definitely # still serving). # This is so that cron jobs have a chance to finish. # Find the first version for which the deploy time of the next version is # after the cutoff. This is the first version that we do not delete, because # it was still serving after the cutoff. delete_end = 0 while (delete_end < len(versions) - 1 and versions[delete_end + 1].deploy_time <= cutoff): delete_end += 1 return versions[:delete_end]
Delete old versions.
def _delete_old_versions(project, service, delete_window): """Delete old versions.""" def _to_datetime(entry): """Parse datetime entry.""" return datetime.datetime(entry['year'], entry['month'], entry['day'], entry['hour'], entry['minute'], entry['second']) _, versions = common.execute('gcloud app versions list --format=json ' '--project=%s --service=%s' % (project, service)) versions = [ Version(version['id'], _to_datetime(version['last_deployed_time']), version['traffic_split']) for version in json.loads(versions) ] versions.sort(key=lambda v: v.deploy_time) assert versions[-1].traffic_split == 1.0 to_delete = _versions_to_delete(versions, delete_window) if not to_delete: return versions = ' '.join(version.id for version in to_delete) common.execute('gcloud app versions delete --quiet ' '--project=%s --service=%s %s' % (project, service, versions))
Deploy to appengine using `yamls`.
def _deploy_appengine(project, yamls, stop_previous_version, version=None): """Deploy to appengine using `yamls`.""" stop_previous_version_arg = ('--stop-previous-version' if stop_previous_version else '--no-stop-previous-version') version_arg = '--version=' + version if version else '' for retry_num in range(DEPLOY_RETRIES + 1): return_code, _ = common.execute( 'gcloud app deploy %s --quiet ' '--project=%s %s %s' % (stop_previous_version_arg, project, version_arg, ' '.join(yamls)), exit_on_error=False) if return_code == 0: break if retry_num == DEPLOY_RETRIES: print('Failed to deploy after %d retries.' % DEPLOY_RETRIES) sys.exit(return_code) print('gcloud deployment failed, retrying...') time.sleep(RETRY_WAIT_SECONDS)
Find one individual file that exceeds limit within path (recursively).
def find_file_exceeding_limit(path, limit): """Find one individual file that exceeds limit within path (recursively).""" for root, _, filenames in os.walk(path): for filename in filenames: full_path = os.path.join(root, filename) if os.path.getsize(full_path) >= limit: return full_path return None
Deploy zip to GCS.
def _deploy_zip(bucket_name, zip_path, test_deployment=False): """Deploy zip to GCS.""" if test_deployment: common.execute(f'gsutil cp {zip_path} gs://{bucket_name}/test-deployment/' f'{os.path.basename(zip_path)}') else: common.execute('gsutil cp %s gs://%s/%s' % (zip_path, bucket_name, os.path.basename(zip_path)))
Deploy source manifest to GCS.
def _deploy_manifest(bucket_name, manifest_path, test_deployment=False): """Deploy source manifest to GCS.""" if sys.version_info.major == 3: manifest_suffix = '.3' else: manifest_suffix = '' if test_deployment: common.execute(f'gsutil cp {manifest_path} ' f'gs://{bucket_name}/test-deployment/' f'clusterfuzz-source.manifest{manifest_suffix}') else: common.execute(f'gsutil cp {manifest_path} ' f'gs://{bucket_name}/' f'clusterfuzz-source.manifest{manifest_suffix}')
Update deployment manager settings.
def _update_deployment_manager(project, name, config_path): """Update deployment manager settings.""" if not os.path.exists(config_path): return gcloud = common.Gcloud(project) operation = 'update' try: gcloud.run('deployment-manager', 'deployments', 'describe', name) except common.GcloudError: # Does not exist. operation = 'create' for _ in range(DEPLOY_RETRIES + 1): try: gcloud.run('deployment-manager', 'deployments', operation, name, '--config=' + config_path) break except common.GcloudError: time.sleep(RETRY_WAIT_SECONDS)
Update pubsub queues.
def _update_pubsub_queues(project): """Update pubsub queues.""" _update_deployment_manager( project, 'pubsub', os.path.join(environment.get_config_directory(), 'pubsub', 'queues.yaml'))
Get region instance counts.
def _get_region_counts(): """Get region instance counts.""" counts = {} regions = local_config.MonitoringRegionsConfig() clusters = local_config.Config(local_config.GCE_CLUSTERS_PATH).get() def get_region(name): """Get the region.""" for pattern in regions.get('patterns'): if re.match(pattern['pattern'], name + '-0000'): return pattern['name'] return None # Compute expected bot counts per region. for config in clusters.values(): for name, cluster in config['clusters'].items(): region = get_region(name) if not region: continue counts.setdefault(region, 0) counts[region] += cluster['instance_count'] return counts
Preprocess alerts.
def _preprocess_alerts(alerts_path): """Preprocess alerts.""" with open(alerts_path) as f: alerts_data = f.read() counts = _get_region_counts() for region, count in counts.items(): alerts_data = re.sub('BOT_COUNT:' + region + r'(?=\s|$)', str(int(count * EXPECTED_BOT_COUNT_PERCENT)), alerts_data) with tempfile.NamedTemporaryFile(mode='w') as f: f.write(alerts_data) f.flush() yield f.name
Update pubsub topics.
def _update_alerts(project): """Update pubsub topics.""" if not local_config.ProjectConfig().get('monitoring.enabled'): return alerts_path = os.path.join(environment.get_config_directory(), 'monitoring', 'alerts.yaml') with _preprocess_alerts(alerts_path) as processed_alerts_path: _update_deployment_manager(project, 'alerts', processed_alerts_path)
Update bigquery datasets and tables.
def _update_bigquery(project): """Update bigquery datasets and tables.""" _update_deployment_manager( project, 'bigquery', os.path.join(environment.get_config_directory(), 'bigquery', 'datasets.yaml'))
Update redis instance.
def _update_redis(project): """Update redis instance.""" _update_deployment_manager( project, 'redis', os.path.join(environment.get_config_directory(), 'redis', 'instance.yaml')) region = appengine.region(project) return_code, _ = common.execute( 'gcloud compute networks vpc-access connectors describe ' 'connector --region={region} ' '--project={project}'.format(project=project, region=region), exit_on_error=False) if return_code: # Does not exist. common.execute('gcloud compute networks vpc-access connectors create ' 'connector --network=default --region={region} ' '--range=10.8.0.0/28 ' '--project={project}'.format(project=project, region=region))
Get remote sha of origin/master.
def get_remote_sha(): """Get remote sha of origin/master.""" _, remote_sha_line = common.execute('git ls-remote origin refs/heads/master') return re.split(br'\s+', remote_sha_line)[0]
Check if the current state is different from origin/master.
def is_diff_origin_master(): """Check if the current state is different from origin/master.""" common.execute('git fetch') remote_sha = get_remote_sha() _, local_sha = common.execute('git rev-parse HEAD') _, diff_output = common.execute('git diff origin/master --stat') return diff_output.strip() or remote_sha.strip() != local_sha.strip()
Helper for staging deployment.
def _staging_deployment_helper(python3=True): """Helper for staging deployment.""" config = local_config.Config(local_config.GAE_CONFIG_PATH) project = config.get('application_id') print('Deploying %s to staging.' % project) deployment_config = config.sub_config('deployment') if python3: path = 'staging3' else: path = 'staging' yaml_paths = deployment_config.get_absolute_path(path) _deploy_app_staging(project, yaml_paths) print('Staging deployment finished.')
Helper for production deployment.
def _prod_deployment_helper(config_dir, package_zip_paths, deploy_appengine=True, deploy_k8s=True, python3=True, test_deployment=False): """Helper for production deployment.""" config = local_config.Config() deployment_bucket = config.get('project.deployment.bucket') gae_config = config.sub_config(local_config.GAE_CONFIG_PATH) gae_deployment = gae_config.sub_config('deployment') project = gae_config.get('application_id') print('Deploying %s to prod.' % project) if python3: path = 'prod3' else: path = 'prod' yaml_paths = gae_deployment.get_absolute_path(path, default=[]) if not yaml_paths: deploy_appengine = False if deploy_appengine: _update_pubsub_queues(project) _update_alerts(project) _update_bigquery(project) _update_redis(project) _deploy_app_prod( project, deployment_bucket, yaml_paths, package_zip_paths, deploy_appengine=deploy_appengine, test_deployment=test_deployment) if deploy_appengine: common.execute( f'python butler.py run setup --config-dir {config_dir} --non-dry-run') if deploy_k8s: _deploy_terraform(config_dir) _deploy_k8s(config_dir) print('Production deployment finished.')
Deploys GKE cluster via terraform.
def _deploy_terraform(config_dir): """Deploys GKE cluster via terraform.""" terraform_dir = os.path.join(config_dir, 'terraform') terraform = f'terraform -chdir={terraform_dir}' common.execute(f'{terraform} init') common.execute(f'{terraform} apply -target=module.clusterfuzz -auto-approve') common.execute(f'rm -rf {terraform_dir}/.terraform*')
Deploys all k8s workloads.
def _deploy_k8s(config_dir): """Deploys all k8s workloads.""" k8s_dir = os.path.join('infra', 'k8s') k8s_instance_dir = os.path.join(config_dir, 'k8s') k8s_project = local_config.ProjectConfig().get('env.K8S_PROJECT') redis_host = _get_redis_ip(k8s_project) os.environ['REDIS_HOST'] = redis_host common.execute(f'gcloud config set project {k8s_project}') common.execute( 'gcloud container clusters get-credentials clusterfuzz-cronjobs-gke ' f'--region={appengine.region(k8s_project)}') for workload in common.get_all_files(k8s_dir): # pylint:disable=anomalous-backslash-in-string common.execute(fr'envsubst \$REDIS_HOST < {workload} | kubectl apply -f -') # Deploys cron jobs that are defined in the current instance configuration. for workload in common.get_all_files(k8s_instance_dir): # pylint:disable=anomalous-backslash-in-string common.execute(fr'envsubst \$REDIS_HOST < {workload} | kubectl apply -f -')
Deploy Clusterfuzz to Appengine.
def execute(args): """Deploy Clusterfuzz to Appengine.""" if sys.version_info.major != 3 or sys.version_info.minor != 7: print('You can only deploy from Python 3.7. Install Python 3.7 and ' 'run: `PYTHON=python3.7 local/install_deps.bash`') sys.exit(1) os.environ['ROOT_DIR'] = '.' if not os.path.exists(args.config_dir): print('Please provide a valid configuration directory.') sys.exit(1) os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir if not common.has_file_in_path('gcloud'): print('Please install gcloud.') sys.exit(1) is_ci = os.getenv('TEST_BOT_ENVIRONMENT') if not is_ci and common.is_git_dirty(): print('Your branch is dirty. Please fix before deploying.') sys.exit(1) if not common.has_file_in_path('gsutil'): print('gsutil not found in PATH.') sys.exit(1) # Build templates before deployment. appengine.build_templates() if not is_ci and not args.staging: if is_diff_origin_master(): if args.force: print('You are not on origin/master. --force is used. Continue.') for _ in range(3): print('.') time.sleep(1) print() else: print('You are not on origin/master. Please fix or use --force.') sys.exit(1) if args.staging: revision = common.compute_staging_revision() platforms = ['linux'] # No other platforms required. elif args.prod: revision = common.compute_prod_revision() platforms = list(constants.PLATFORMS.keys()) else: print('Please specify either --prod or --staging. For production ' 'deployments, you probably want to use deploy.sh from your ' 'configs directory instead.') sys.exit(1) deploy_zips = 'zips' in args.targets deploy_appengine = 'appengine' in args.targets deploy_k8s = 'k8s' in args.targets test_deployment = 'test_deployment' in args.targets if test_deployment: deploy_appengine = False deploy_k8s = False deploy_zips = True is_python3 = sys.version_info.major == 3 package_zip_paths = [] if deploy_zips: for platform_name in platforms: package_zip_paths.append( package.package( revision, platform_name=platform_name, python3=is_python3)) else: # package.package calls these, so only set these up if we're not packaging, # since they can be fairly slow. appengine.symlink_dirs() common.install_dependencies('linux') with open(constants.PACKAGE_TARGET_MANIFEST_PATH, 'w') as f: f.write('%s\n' % revision) too_large_file_path = find_file_exceeding_limit('src/appengine', APPENGINE_FILESIZE_LIMIT) if too_large_file_path: print(("%s is larger than %d bytes. It wouldn't be deployed to appengine." ' Please fix.') % (too_large_file_path, APPENGINE_FILESIZE_LIMIT)) sys.exit(1) if args.staging: _staging_deployment_helper(python3=is_python3) else: _prod_deployment_helper( args.config_dir, package_zip_paths, deploy_appengine, deploy_k8s, python3=is_python3, test_deployment=test_deployment) with open(constants.PACKAGE_TARGET_MANIFEST_PATH) as f: print('Source updated to %s' % f.read()) if platforms[-1] != common.get_platform(): # Make sure the installed dependencies are for the current platform. common.install_dependencies()
Format changed code.
def execute(_): """Format changed code.""" _, output = common.execute('git diff --name-only FETCH_HEAD') file_paths = [ f.decode('utf-8') for f in output.splitlines() if os.path.exists(f) ] py_changed_file_paths = [ f for f in file_paths if f.endswith('.py') and # Exclude auto-generated files. not f.endswith('_pb2.py') and not f.endswith('_pb2_grpc.py') ] if py_changed_file_paths: common.execute(f'yapf -p -i {" ".join(py_changed_file_paths)}') common.execute(f'{ISORT_CMD} {" ".join(py_changed_file_paths)}') go_changed_file_paths = [f for f in file_paths if f.endswith('.go')] for file_path in go_changed_file_paths: common.execute('gofmt -w ' + file_path)
Check that we're in a virtualenv.
def check_virtualenv(): """Check that we're in a virtualenv.""" if sys.version_info.major != 3: raise RuntimeError('Python 2 is no longer supported!') is_in_virtualenv = bool(os.getenv('VIRTUAL_ENV')) if not is_in_virtualenv: raise RuntimeError( 'You are not in a virtual env environment. Please install it with' ' `./local/install_deps.bash` or load it with' ' `pipenv shell`. Then, you can re-run this command.')
Check if we are in virtualenv and dev requirements are installed.
def check(): """Check if we are in virtualenv and dev requirements are installed.""" if os.getenv('TEST_BOT_ENVIRONMENT'): # Don't need to do these checks if we're in the bot environment. return check_virtualenv()
Run integration tests.
def execute(_): """Run integration tests.""" command = 'run_server' indicator = b'Booting worker' try: lines = [] server = common.execute_async( 'python -u butler.py {} --skip-install-deps'.format(command)) test_utils.wait_for_emulator_ready( server, command, indicator, timeout=RUN_SERVER_TIMEOUT, output_lines=lines) # Sleep a small amount of time to ensure the server is definitely ready. time.sleep(1) # Call setup ourselves instead of passing --bootstrap since we have no idea # when that finishes. # TODO(ochang): Make bootstrap a separate butler command and just call that. common.execute( ('python butler.py run setup ' '--non-dry-run --local --config-dir={config_dir}' ).format(config_dir=constants.TEST_CONFIG_DIR), exit_on_error=False) request = urllib.request.urlopen('http://' + constants.DEV_APPSERVER_HOST) request.read() # Raises exception on error except Exception: print('Error occurred:') print(b''.join(lines)) raise finally: server.terminate() # TODO(ochang): Test that bot runs, and do a basic fuzzing session to ensure # things work end to end. print('All end-to-end integration tests passed.')
Parse failed test report from Mocha HTML result
def _parse_error_report(driver): """Parse failed test report from Mocha HTML result""" error_report = '' # Remove the replay buttons next to test names for elem in driver.find_elements_by_css_selector('#mocha-report .suite h2 a'): driver.execute_script('arguments[0].remove()', elem) suites = driver.find_elements_by_css_selector('#mocha-report .suite .suite') for suite in suites: failed_tests = suite.find_elements_by_css_selector('.test.fail') if not failed_tests: continue suite_name = suite.find_element_by_css_selector('h1').text.strip() error_report += '\n\n%s\n' % _SUITE_SEPARATOR error_report += '%s\n' % suite_name for failed_test in failed_tests: name = failed_test.find_element_by_css_selector('h2').text.strip() trace = failed_test.find_element_by_css_selector('.error').text.strip() trace = re.sub('^', '| ', trace) trace = re.sub('\n', '\n| ', trace) error_report += '%s\n' % _TEST_SEPARATOR error_report += 'Failed test: %s\n' % name error_report += '%s\n' % trace return error_report
Run Javascript unit tests. Here are the steps: 1. Execute the HTML with chromedriver. 2. Read the test result from the HTML.
def execute(args): """Run Javascript unit tests. Here are the steps: 1. Execute the HTML with chromedriver. 2. Read the test result from the HTML.""" test_filepath = os.path.join('src', 'appengine', 'private', 'test.html') print('Running chromedriver on %s' % test_filepath) chrome_options = webdriver.ChromeOptions() chrome_options.add_argument('--allow-file-access-from-files') is_ci = os.getenv('TEST_BOT_ENVIRONMENT') if is_ci: # Turn off sandbox since running under root, with trusted tests. chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--headless') driver = webdriver.Chrome( executable_path=common.get_chromedriver_path(), chrome_options=chrome_options) try: driver.get('file://%s' % os.path.abspath(test_filepath)) # Wait for tests to be completed. while True: success_count = driver.execute_script( 'return WCT._reporter.stats.passes;') failure_count = driver.execute_script( 'return WCT._reporter.stats.failures;') sys.stdout.write( '\rSuccess: %d, Failure: %d' % (success_count, failure_count)) sys.stdout.flush() is_complete = driver.execute_script('return WCT._reporter.complete;') if is_complete: break time.sleep(0.1) sys.stdout.write('\r' + (' ' * 70)) sys.stdout.flush() success_count = int( driver.find_element_by_css_selector('#mocha-stats .passes em').text) failure_count = int( driver.find_element_by_css_selector('#mocha-stats .failures em').text) error_report = _parse_error_report(driver) if error_report: print(error_report) print() print(_SUITE_SEPARATOR) print('Test results:') print('| Success: %d' % success_count) print('| Failure: %d' % failure_count) print(_SUITE_SEPARATOR) print() if args.persist: # pylint: disable=eval-used eval( input('--persist is used. Leave the browser open.' ' Press ENTER to close it:')) finally: driver.quit() if failure_count > 0: sys.exit(1)
Print error and track state via a global.
def _error(message=None): """Print error and track state via a global.""" if message: print(message) global _error_occurred _error_occurred = True
Executes command, tracks error state.
def _execute_command_and_track_error(command): """Executes command, tracks error state.""" returncode, output = common.execute(command, exit_on_error=False) if returncode != 0: _error() return output.decode('utf-8')
Run license header validation.
def license_validate(file_path): """Run license header validation.""" filename = os.path.basename(file_path) extension = os.path.splitext(file_path)[1] if (filename not in _LICENSE_CHECK_FILENAMES and extension not in _LICENSE_CHECK_EXTENSIONS): return path_directories = file_path.split(os.sep) if any(d in _LICENSE_CHECK_IGNORE_DIRECTORIES for d in path_directories): return source_filename = os.path.basename(file_path) if source_filename in _LICENSE_CHECK_IGNORE_FILENAMES: return with open(file_path) as f: data = f.read() if _LICENSE_CHECK_STRING in data or _LICENSE_CHECK_IGNORE in data: return _error('Failed: Missing license header for %s.' % file_path)
Validate that python imports are alphabetized.
def py_import_order(file_path): """Validate that python imports are alphabetized.""" def _validate_block(import_block): """Ensure that a single block is ordered properly.""" if not import_block: return [] sorted_import_block = sorted(import_block, key=lambda i: i.lower()) if sorted_import_block == import_block: return [] return ['\n'.join(sorted_import_block)] with open(file_path) as f: file_content = f.read() imports = [] corrected_import_blocks = [] for line in file_content.splitlines(): if line.startswith('import ') or line.startswith('from '): imports.append(line) else: corrected_import_blocks += _validate_block(imports) imports = [] # Though rare, if a file ends with an import we must still validate them. corrected_import_blocks += _validate_block(imports) if not corrected_import_blocks: return suggestions = '\n\n--------\n\n'.join(corrected_import_blocks) _error(('Failed: File {filename} has non-alphabetized import blocks. ' 'Suggested order:\n\n{suggestions}').format( filename=file_path, suggestions=suggestions))
Check test directory has a __init__.py file. Otherwise, the test does not execute at all.
def py_test_init_check(file_path): """Check test directory has a __init__.py file. Otherwise, the test does not execute at all.""" if not file_path.endswith(_PY_TEST_SUFFIX): return test_directory = os.path.dirname(file_path) if _PY_INIT_FILENAME not in os.listdir(test_directory): _error(f'Failed: Missing {_PY_INIT_FILENAME} file in test ' f'directory {test_directory}.')
Run yaml validation.
def yaml_validate(file_path): """Run yaml validation.""" if os.path.basename(file_path) in _YAML_EXCEPTIONS: return try: with open(file_path) as f: yaml.safe_load(f.read()) except Exception as e: _error('Failed: Invalid yaml file %s.\n\n%s' % (file_path, e))
Check if file is auto-generated so we dont lint it
def is_auto_generated_file(filepath): """Check if file is auto-generated so we dont lint it""" return (filepath.endswith('_pb2.py') or filepath.endswith('pb2_grpc.py') or os.path.dirname(filepath) == os.path.join( 'src', 'clusterfuzz', '_internal', 'bot', 'tokenizer', 'grammars'))
Lint changed code.
def execute(_): """Lint changed code.""" pythonpath = os.getenv('PYTHONPATH', '') module_parent_path = os.path.abspath(os.path.join(__file__, '..', '..', '..')) third_party_path = os.path.join(module_parent_path, 'third_party') os.environ['PYTHONPATH'] = ':'.join( [third_party_path, module_parent_path, pythonpath]) if 'GOOGLE_CLOUDBUILD' in os.environ: # Explicitly compare against master if we're running on the CI _, output = common.execute('git diff --name-only master FETCH_HEAD') else: _, output = common.execute('git diff --name-only FETCH_HEAD') file_paths = [ f.decode('utf-8') for f in output.splitlines() if os.path.exists(f) ] module_path = os.path.join(module_parent_path, 'clusterfuzz') py_changed_tests = [] py_changed_noncf_module = [] py_changed_nontests = [] go_changed_file_paths = [] yaml_changed_file_paths = [] for file_path in file_paths: if file_path.endswith('.go'): go_changed_file_paths.append(file_path) continue if file_path.endswith('.yaml'): yaml_changed_file_paths.append(file_path) continue if not file_path.endswith('.py') or is_auto_generated_file(file_path): continue if file_path.endswith('_test.py'): py_changed_tests.append(file_path) else: py_changed_nontests.append(file_path) if not os.path.abspath(file_path).startswith(module_path): py_changed_noncf_module.append(file_path) # Use --score no to make output less noisy. base_pylint_cmd = 'pylint --score=no --jobs=0' # Test for existence of files before running tools to avoid errors from # misusing the tools. if py_changed_nontests: _execute_command_and_track_error( f'{base_pylint_cmd} --ignore=protos,tests,grammars clusterfuzz ' + ' '.join(py_changed_noncf_module)) if py_changed_tests: _execute_command_and_track_error( f'{base_pylint_cmd} --ignore=protos,grammars --max-line-length=240 ' '--disable no-member clusterfuzz._internal.tests') py_changed_file_paths = py_changed_nontests + py_changed_tests if py_changed_file_paths: _execute_command_and_track_error( f'yapf -p -d {" ".join(py_changed_file_paths)}') _execute_command_and_track_error(f'{formatter.ISORT_CMD} -c ' f'{" ".join(py_changed_file_paths)}') for file_path in py_changed_file_paths: py_test_init_check(file_path) golint_path = os.path.join('local', 'bin', 'golint') for file_path in go_changed_file_paths: if not os.path.basename(file_path) in _GOLINT_EXCEPTIONS: _execute_command_and_track_error(golint_path + ' ' + file_path) output = _execute_command_and_track_error('gofmt -d ' + file_path) if output.strip(): _error() for file_path in yaml_changed_file_paths: yaml_validate(file_path) for file_path in file_paths: license_validate(file_path) if _error_occurred: print('Linting failed, see errors above.') sys.exit(1) else: print('Linting passed.')
Remove zip and manifest file.
def _clear_zip(target_zip_path): """Remove zip and manifest file.""" if os.path.exists(constants.PACKAGE_TARGET_MANIFEST_PATH): os.remove(constants.PACKAGE_TARGET_MANIFEST_PATH) if os.path.exists(target_zip_path): os.remove(target_zip_path)
Add the src_file_path to the output_file with the right target path.
def _add_to_zip(output_file, src_file_path, dest_file_path=None): """Add the src_file_path to the output_file with the right target path.""" if dest_file_path is None: dest_file_path = src_file_path output_file.write(src_file_path, os.path.join('clusterfuzz', dest_file_path))
Check if node is of version MINIMUM_NODEJS_VERSION.
def _is_nodejs_up_to_date(): """Check if node is of version MINIMUM_NODEJS_VERSION.""" return_code, output = common.execute('node -v') if return_code != 0: return False m = re.match(br'v([0-9]+)\..+', output.strip()) if not m: return False major_version = int(m.group(1)) return major_version >= MIN_SUPPORTED_NODEJS_VERSION
Iterate through files in path.
def _get_files(path): """Iterate through files in path.""" for root, _, filenames in os.walk(path): for filename in filenames: if filename.endswith('.pyc') or (os.sep + '.git') in root: continue yield os.path.join(root, filename)
Prepare clusterfuzz-source.zip.
def package(revision, target_zip_dir=constants.PACKAGE_TARGET_ZIP_DIRECTORY, target_manifest_path=constants.PACKAGE_TARGET_MANIFEST_PATH, platform_name=None, python3=False): """Prepare clusterfuzz-source.zip.""" is_ci = os.getenv('TEST_BOT_ENVIRONMENT') if not is_ci and common.is_git_dirty(): print('Your branch is dirty. Please fix before packaging.') sys.exit(1) if not _is_nodejs_up_to_date(): print('You do not have nodejs, or your nodejs is not at least version 4.') sys.exit(1) common.install_dependencies(platform_name=platform_name) # This needs to be done before packaging step to let src/appengine/config be # archived for bot. appengine.symlink_dirs() _, ls_files_output = common.execute('git -C . ls-files', print_output=False) file_paths = [path.decode('utf-8') for path in ls_files_output.splitlines()] if not os.path.exists(target_zip_dir): os.makedirs(target_zip_dir) target_zip_name = constants.LEGACY_ZIP_NAME if platform_name: if python3: target_zip_name = platform_name + '-3.zip' else: target_zip_name = platform_name + '.zip' target_zip_path = os.path.join(target_zip_dir, target_zip_name) _clear_zip(target_zip_path) output_file = zipfile.ZipFile(target_zip_path, 'w', zipfile.ZIP_DEFLATED) # Add files from git. for file_path in file_paths: if (file_path.startswith('config') or file_path.startswith('local') or file_path.startswith(os.path.join('src', 'appengine')) or file_path.startswith(os.path.join('src', 'local')) or file_path.startswith( os.path.join('src', 'clusterfuzz', '_internal', 'tests'))): continue _add_to_zip(output_file, file_path) # These are project configuration yamls. for path in _get_files(os.path.join('src', 'appengine', 'config')): _add_to_zip(output_file, path) # These are third party dependencies. for path in _get_files(os.path.join('src', 'third_party')): _add_to_zip(output_file, path) output_file.close() with open(target_manifest_path, 'w') as f: f.write('%s\n' % revision) with zipfile.ZipFile(target_zip_path, 'a', zipfile.ZIP_DEFLATED) as f: _add_to_zip(f, target_manifest_path, constants.PACKAGE_TARGET_MANIFEST_PATH) print('Revision: %s' % revision) print() print('%s is ready.' % target_zip_path) return target_zip_path
Initialise test worker process.
def test_worker_init(): """Initialise test worker process.""" if platform.system() != 'Windows': # Prevent KeyboardInterrupt error output. signal.signal(signal.SIGINT, signal.SIG_IGN)
Test worker.
def run_one_test_parallel(args): """Test worker.""" try: os.environ['PARALLEL_TESTS'] = '1' test_modules, suppress_output = args suite = unittest.loader.TestLoader().loadTestsFromNames(test_modules) stream = io.StringIO() # Verbosity=0 since we cannot see real-time test execution order when tests # are executed in parallel. tests = ', '.join(test_modules) print('Running', tests) result = unittest.TextTestRunner( stream=stream, verbosity=0, buffer=suppress_output).run(suite) print('Done running', tests) stream.flush() value = stream.getvalue() return TestResult(value, len(result.errors), len(result.failures), len(result.skipped), result.testsRun) except BaseException: # Print exception traceback here, as it will be lost otherwise. traceback.print_exc() raise
Run tests (single CPU).
def run_tests_single_core(args, test_directory, top_level_dir): """Run tests (single CPU).""" suites = unittest.loader.TestLoader().discover( test_directory, pattern=args.pattern, top_level_dir=top_level_dir) # TODO(mbarbella): Re-implement code coverage after migrating to Python 3. # Verbosity=2 since we want to see real-time test execution with test name # and result. result = TrackedTestRunner( verbosity=2, buffer=not args.unsuppress_output).run(suites) if result.errors or result.failures: sys.exit(1)
Run tests (multiple CPUs).
def run_tests_parallel(args, test_directory, top_level_dir): """Run tests (multiple CPUs).""" suites = unittest.loader.TestLoader().discover( test_directory, pattern=args.pattern, top_level_dir=top_level_dir) test_classes = [] # pylint: disable=protected-access for suite in suites: for subsuite in suite._tests: # pylint: disable=protected-access # According to: # https://github.com/python/cpython/blob/2.7/Lib/unittest/loader.py#L24, # this is how we can get a ModuleImportFailure error. if subsuite.__class__.__name__ == 'ModuleImportFailure': unittest.TextTestRunner(verbosity=1).run(subsuite) raise RuntimeError('A failure occurred while importing the module.') try: for test_class in subsuite._tests: # pylint: disable=protected-access test_classes.append((test_class.__module__, test_class.__class__.__name__)) except AttributeError: subsuite.debug() test_classes = sorted(test_classes) test_modules = [] for module_path, _ in itertools.groupby(test_classes, key=lambda k: k[0]): test_modules.append(module_path) test_modules = sorted(test_modules) cpu_count = multiprocessing.cpu_count() pool = multiprocessing.Pool(cpu_count, test_worker_init) total_result = TestResult('', 0, 0, 0, 0) # partition tests test_args = [] tests_per_cpu = max(1, len(test_modules) // cpu_count) for i in range(0, len(test_modules), tests_per_cpu): group = test_modules[i:i + tests_per_cpu] test_args.append((group, not args.unsuppress_output)) results = pool.map_async(run_one_test_parallel, test_args) while True: try: # KeyboardInterrupt never gets raised unless we pass a timeout. results = results.get(timeout=TESTS_TIMEOUT) break except KeyboardInterrupt: pool.terminate() pool.join() sys.exit(1) pool.close() pool.join() for result in results: if result.num_failures or result.num_errors: print(result.output) total_result.num_errors += result.num_errors total_result.num_failures += result.num_failures total_result.num_skipped += result.num_skipped total_result.total_run += result.total_run print('Ran %d tests (%d skipped, %d errors, %d failures).' % (total_result.total_run, total_result.num_skipped, total_result.num_errors, total_result.num_failures)) if total_result.num_errors or total_result.num_failures: sys.exit(1)
Run Python unit tests. For unittests involved appengine, sys.path needs certain modification.
def execute(args): """Run Python unit tests. For unittests involved appengine, sys.path needs certain modification.""" os.environ['PY_UNITTESTS'] = 'True' if os.getenv('INTEGRATION') or os.getenv('UNTRUSTED_RUNNER_TESTS'): # Set up per-user buckets used by integration tests. os.environ['CORPUS_BUCKET'] = common.test_bucket('TEST_CORPUS_BUCKET') os.environ['QUARANTINE_BUCKET'] = common.test_bucket( 'TEST_QUARANTINE_BUCKET') os.environ['BACKUP_BUCKET'] = common.test_bucket('TEST_BACKUP_BUCKET') os.environ['COVERAGE_BUCKET'] = common.test_bucket('TEST_COVERAGE_BUCKET') # Kill leftover instances of emulators and dev appserver. common.kill_leftover_emulators() # Don't use absolute paths to make it easier to compare results in tests. os.environ['CONFIG_DIR_OVERRIDE'] = os.path.join('.', 'configs', 'test') top_level_dir = os.path.join('src', 'clusterfuzz', '_internal') if args.target == 'appengine': # Build template files. appengine.build_templates() test_directory = APPENGINE_TEST_DIRECTORY sys.path.insert(0, os.path.abspath(os.path.join('src', 'appengine'))) for i, path in enumerate(sys.path): if 'third_party' in path: # Replace third_party with App Engine third_party/. sys.path[i] = os.path.abspath( os.path.join('src', 'appengine', 'third_party')) elif args.target == 'core': test_directory = CORE_TEST_DIRECTORY else: # Config module tests. os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir test_directory = os.path.join(args.config_dir, 'modules') top_level_dir = None # Modules may use libs from our App Engine directory. sys.path.insert(0, os.path.abspath(os.path.join('src', 'appengine'))) # Fix paths again to get config modules added to the import path. from clusterfuzz._internal.base import modules modules.fix_module_search_paths() # Set expected environment variables. local_config.ProjectConfig().set_environment() # Needed for NDB to work with cloud datastore emulator. os.environ['DATASTORE_USE_PROJECT_ID_AS_APP_ID'] = 'true' if args.verbose: # Force logging to console for this process and child processes. os.environ['LOG_TO_CONSOLE'] = 'True' else: # Disable logging. logging.disable(logging.CRITICAL) if args.pattern is None: args.pattern = '*_test.py' if args.parallel: run_tests_parallel(args, test_directory, top_level_dir) else: run_tests_single_core(args, test_directory, top_level_dir)
Get a Handler class given arguments.
def _get_handler_ctor(args): """Get a Handler class given arguments.""" if 'linux' in args.instance_name: return linux.Handler if 'windows' in args.instance_name: return windows.Handler if 'golo' in args.instance_name: return mac.Handler if 'android-build' in args.instance_name: return android_chrome_lab.Handler raise NotImplementedError('Unsupported platform.')
Convert args to dict that is compatible with the method's argument.
def _args_to_dict(args, method): """Convert args to dict that is compatible with the method's argument.""" arg_names = inspect.getfullargspec(method).args[1:] args_dict = { k: v for k, v in vars(args.items()) if k in arg_names and v is not None } return args_dict
Run command-line tasks on a remote bot.
def execute(args): """Run command-line tasks on a remote bot.""" handler_ctor = _get_handler_ctor(args) handler = handler_ctor(**_args_to_dict(args, handler_ctor.__init__)) method = getattr(handler, args.remote) method(**_args_to_dict(args, method))
Run Python unit tests under v2. For unittests involved appengine, sys.path needs certain modification.
def execute(args): """Run Python unit tests under v2. For unittests involved appengine, sys.path needs certain modification.""" print(args.script_args) sys.path.insert(0, os.path.abspath(os.path.join('src', 'appengine'))) sys.path.insert( 0, os.path.abspath(os.path.join('src', 'appengine', 'third_party'))) os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir local_config.ProjectConfig().set_environment() if args.local: os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST os.environ['DATASTORE_USE_PROJECT_ID_AS_APP_ID'] = 'true' os.environ['LOCAL_DEVELOPMENT'] = 'True' if not args.non_dry_run: print('Running in dry-run mode, no datastore writes are committed. ' 'For permanent modifications, re-run with --non-dry-run.') with ndb_init.context(): script = importlib.import_module(f'local.butler.scripts.{args.script_name}') script.execute(args) if not args.local: print() print('Please remember to run the migration individually on all projects.') print()
Set up the bot directory.
def _setup_bot_directory(args): """Set up the bot directory.""" appengine.symlink_config_dir() src_root_dir = os.path.abspath('.') if os.path.exists(args.directory): print('Bot directory already exists. Re-using...') else: print('Creating new CF bot directory...') os.makedirs(args.directory) clusterfuzz_dir = os.path.join(args.directory, 'clusterfuzz') bot_src_dir = os.path.join(clusterfuzz_dir, 'src') if not os.path.exists(clusterfuzz_dir): os.makedirs(clusterfuzz_dir) os.mkdir(bot_src_dir) common.update_dir( os.path.join(src_root_dir, 'src', 'appengine'), os.path.join(bot_src_dir, 'appengine')) common.update_dir( os.path.join(src_root_dir, 'src', 'python'), os.path.join(bot_src_dir, 'python')) common.update_dir( os.path.join(src_root_dir, 'src', 'clusterfuzz'), os.path.join(bot_src_dir, 'clusterfuzz')) common.update_dir( os.path.join(src_root_dir, 'src', 'third_party'), os.path.join(bot_src_dir, 'third_party')) common.update_dir( os.path.join(src_root_dir, 'resources'), os.path.join(clusterfuzz_dir, 'resources')) common.update_dir( os.path.join(src_root_dir, 'bot'), os.path.join(clusterfuzz_dir, 'bot'))
Set up environment variables and configuration files.
def _setup_environment_and_configs(args, appengine_path): """Set up environment variables and configuration files.""" clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz')) # Matches startup scripts. os.environ['PYTHONPATH'] = ':'.join([ os.getenv('PYTHONPATH', ''), appengine_path, os.path.join(clusterfuzz_dir, 'src'), ]) os.environ['ROOT_DIR'] = clusterfuzz_dir if not os.getenv('BOT_NAME'): os.environ['BOT_NAME'] = args.name os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format( os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal', 'scripts'), os.getenv('LD_LIBRARY_PATH', '')) tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir') if not os.path.exists(tmpdir): os.mkdir(tmpdir) os.environ['TMPDIR'] = tmpdir os.environ['BOT_TMPDIR'] = tmpdir os.environ['KILL_STALE_INSTANCES'] = 'False' os.environ['LOCAL_DEVELOPMENT'] = 'True' os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST os.environ['APPLICATION_ID'] = constants.TEST_APP_ID if not os.getenv('UNTRUSTED_WORKER'): local_gcs_buckets_path = os.path.abspath( os.path.join(args.server_storage_path, 'local_gcs')) assert os.path.exists(local_gcs_buckets_path), ( 'Server storage path not found, make sure to start run_server with ' 'the same storage path.') os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path if args.android_serial: if not os.getenv('OS_OVERRIDE'): os.environ['OS_OVERRIDE'] = 'ANDROID' os.environ['ANDROID_SERIAL'] = args.android_serial
Run the bot.
def execute(args): """Run the bot.""" appengine_path = appengine.find_sdk_path() _setup_bot_directory(args) _setup_environment_and_configs(args, appengine_path) exit_code = 0 try: os.chdir(os.path.join(args.directory, 'clusterfuzz')) proc = common.execute_async('python src/python/bot/startup/run_bot.py') def _stop_handler(*_): print('Bot has been stopped. Exit.') proc.kill() signal.signal(signal.SIGTERM, _stop_handler) common.process_proc_output(proc) exit_code = proc.wait() except KeyboardInterrupt: _stop_handler() # exit_code should be 0 when Ctrl-Ced. return exit_code
Bootstrap the DB.
def bootstrap_db(): """Bootstrap the DB.""" def bootstrap(): # Wait for the server to run. time.sleep(10) print('Bootstrapping datastore...') common.execute( ('python butler.py run setup ' '--non-dry-run --local --config-dir={config_dir}' ).format(config_dir=constants.TEST_CONFIG_DIR), exit_on_error=False) thread = threading.Thread(target=bootstrap) thread.start()
Create a local bucket.
def create_local_bucket(local_gcs_buckets_path, name): """Create a local bucket.""" blobs_bucket = os.path.join(local_gcs_buckets_path, name) if not os.path.exists(blobs_bucket): os.mkdir(blobs_bucket)
Bootstrap GCS.
def bootstrap_gcs(storage_path): """Bootstrap GCS.""" local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs') if not os.path.exists(local_gcs_buckets_path): os.mkdir(local_gcs_buckets_path) config = local_config.ProjectConfig() test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET') if test_blobs_bucket: create_local_bucket(local_gcs_buckets_path, test_blobs_bucket) else: create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket')) create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket')) create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket')) create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket')) create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket')) create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET')) create_local_bucket(local_gcs_buckets_path, config.get('env.QUARANTINE_BUCKET')) create_local_bucket(local_gcs_buckets_path, config.get('env.FUZZ_LOGS_BUCKET')) # Symlink local GCS bucket path to appengine src dir to bypass sandboxing # issues. common.symlink( src=local_gcs_buckets_path, target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))
Start threads to trigger essential cron jobs.
def start_cron_threads(): """Start threads to trigger essential cron jobs.""" request_timeout = 10 * 60 # 10 minutes. def trigger(interval_seconds, target): """Trigger a cron job.""" while True: time.sleep(interval_seconds) try: url = 'http://{host}/{target}'.format( host=constants.CRON_SERVICE_HOST, target=target) request = urllib.request.Request(url) request.add_header('X-Appengine-Cron', 'true') response = urllib.request.urlopen(request, timeout=request_timeout) response.read(60) # wait for request to finish. except Exception: continue crons = ( (90, 'cleanup'), (60, 'triage'), (6 * 3600, 'schedule-progression-tasks'), (12 * 3600, 'schedule-corpus-pruning'), ) for interval, cron in crons: thread = threading.Thread(target=trigger, args=(interval, cron)) thread.daemon = True thread.start()
Run the server.
def execute(args): """Run the server.""" os.environ['LOCAL_DEVELOPMENT'] = 'True' common.kill_leftover_emulators() if not args.skip_install_deps: common.install_dependencies() # Do this everytime as a past deployment might have changed these. appengine.symlink_dirs() # Deploy all yaml files from test project for basic appengine deployment and # local testing to work. This needs to be called on every iteration as a past # deployment might have overwritten or deleted these config files. yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3') appengine.copy_yamls_and_preprocess(yaml_paths) # Build templates. appengine.build_templates() # Clean storage directory if needed. if args.bootstrap or args.clean: if os.path.exists(args.storage_path): print('Clearing local datastore by removing %s.' % args.storage_path) shutil.rmtree(args.storage_path) if not os.path.exists(args.storage_path): os.makedirs(args.storage_path) # Set up local GCS buckets and symlinks. bootstrap_gcs(args.storage_path) # Start pubsub emulator. pubsub_emulator = test_utils.start_cloud_emulator( 'pubsub', args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST], data_dir=args.storage_path) test_utils.setup_pubsub(constants.TEST_APP_ID) # Start Datastore emulator datastore_emulator = test_utils.start_cloud_emulator( 'datastore', args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST], data_dir=args.storage_path, store_on_disk=True) # Start our custom GCS emulator. local_gcs = common.execute_async( 'go run emulators/gcs.go -storage-path=' + os.path.join( os.path.abspath(args.storage_path), 'local_gcs'), cwd='local') if args.bootstrap: bootstrap_db() start_cron_threads() os.environ['APPLICATION_ID'] = constants.TEST_APP_ID os.environ['LOCAL_DEVELOPMENT'] = 'True' os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs' os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST os.environ['GAE_ENV'] = 'dev' cron_server = common.execute_async( f'gunicorn -b 127.0.0.1:{constants.CRON_SERVICE_PORT} main:app', cwd=os.path.join('src', 'appengine')) try: common.execute( f'gunicorn -b 127.0.0.1:{constants.DEV_APPSERVER_PORT} main:app', cwd=os.path.join('src', 'appengine')) except KeyboardInterrupt: print('Server has been stopped. Exit.') cron_server.terminate() datastore_emulator.cleanup() pubsub_emulator.cleanup() local_gcs.terminate()
Queries Datastore for matching FuzzerJob entries.
def _query_fuzzer_jobs( platforms: Optional[Sequence[str]] = None, fuzzers: Optional[Sequence[str]] = None, jobs: Optional[Sequence[str]] = None, ) -> Sequence[data_types.FuzzerJob]: """Queries Datastore for matching FuzzerJob entries.""" query = data_types.FuzzerJob.query() if platforms: query = query.filter( data_types.FuzzerJob.platform.IN([p.upper() for p in platforms])) if fuzzers: query = query.filter(data_types.FuzzerJob.fuzzer.IN(fuzzers)) if jobs: query = query.filter(data_types.FuzzerJob.job.IN(jobs)) return query
Queries Datastore for matching FuzzTargetJob entries.
def _query_fuzz_target_jobs( targets: Optional[Sequence[str]] = None, jobs: Optional[Sequence[str]] = None, engines: Optional[Sequence[str]] = None, ) -> Sequence[data_types.FuzzTargetJob]: """Queries Datastore for matching FuzzTargetJob entries.""" query = data_types.FuzzTargetJob.query() if targets: query = query.filter(data_types.FuzzTargetJob.fuzz_target_name.IN(targets)) if jobs: query = query.filter(data_types.FuzzTargetJob.job.IN(jobs)) if engines: query = query.filter(data_types.FuzzTargetJob.engine.IN(engines)) return query
Lists the given FuzzerJob entries on stdout.
def _display_fuzzer_jobs(fuzzer_jobs: Sequence[data_types.FuzzerJob], prefix='') -> None: """Lists the given FuzzerJob entries on stdout.""" printer = _print_with_prefix(prefix) fuzzer_jobs = list(fuzzer_jobs) fuzzer_jobs.sort(key=lambda fj: fj.actual_weight, reverse=True) total_weight = _sum_weights(fuzzer_jobs) for fuzzer_job in fuzzer_jobs: probability = fuzzer_job.actual_weight / total_weight printer('FuzzerJob:') printer(f' Fuzzer: {fuzzer_job.fuzzer}') printer(f' Job: {fuzzer_job.job}') printer(f' Platform: {fuzzer_job.platform}') printer(f' Weight: {fuzzer_job.actual_weight} = ' + f'{fuzzer_job.weight} * {fuzzer_job.multiplier}') printer(f' Probability: {_display_prob(probability)}') printer(f'Count: {len(fuzzer_jobs)}') printer(f'Total weight: {total_weight}')
Lists the given FuzzerJobs entries on stdout.
def _display_fuzzer_jobs_batches( batches: Sequence[data_types.FuzzerJobs]) -> None: """Lists the given FuzzerJobs entries on stdout.""" count = 0 for batch in batches: count += 1 print('FuzzerJobs:') print(f' ID: {batch.key.id()}') print(f' Platform: {batch.platform}') _display_fuzzer_jobs(batch.fuzzer_jobs, prefix=' ') print(f'Count: {count}')
Lists the given FuzzTargetJob entries on stdout.
def _display_fuzz_target_jobs( fuzz_target_jobs: Sequence[data_types.FuzzTargetJob]) -> None: """Lists the given FuzzTargetJob entries on stdout.""" fuzz_target_jobs = list(fuzz_target_jobs) fuzz_target_jobs.sort(key=lambda ftj: ftj.weight, reverse=True) total_weight = sum(ftj.weight for ftj in fuzz_target_jobs) for ftj in fuzz_target_jobs: probability = ftj.weight / total_weight print('FuzzTargetJob:') print(f' Fuzz target name: {ftj.fuzz_target_name}') print(f' Job: {ftj.job}') print(f' Engine: {ftj.engine}') print(f' Weight: {ftj.weight}') print(f' Relative probability: {_display_prob(probability)}') print(f' Last run: {ftj.last_run}') print(f'Count: {len(fuzz_target_jobs)}') print(f'Total weight: {total_weight}')
Converts the given FuzzerJob to a dictionary of CSV column values.
def _fuzzer_job_to_dict( fuzzer_job: data_types.FuzzerJob) -> Dict[str, Union[str, float]]: """Converts the given FuzzerJob to a dictionary of CSV column values.""" return { 'fuzzer': fuzzer_job.fuzzer, 'job': fuzzer_job.job, 'platform': fuzzer_job.platform, 'weight': fuzzer_job.weight, 'multiplier': fuzzer_job.multiplier, 'actual_weight': fuzzer_job.actual_weight, }
Dumps the provided FuzzerJob entries to stdout in CSV format.
def _dump_fuzzer_jobs(fuzzer_jobs: Sequence[data_types.FuzzerJob]) -> None: """Dumps the provided FuzzerJob entries to stdout in CSV format.""" writer = csv.DictWriter(sys.stdout, fieldnames=_FUZZER_JOB_FIELDS) writer.writeheader() for fuzzer_job in fuzzer_jobs: writer.writerow(_fuzzer_job_to_dict(fuzzer_job))
Dumps the provided FuzzerJobs entries to stdout in CSV format.
def _dump_fuzzer_jobs_batches(batches: Sequence[data_types.FuzzerJobs]) -> None: """Dumps the provided FuzzerJobs entries to stdout in CSV format.""" writer = csv.DictWriter(sys.stdout, fieldnames=['batch'] + _FUZZER_JOB_FIELDS) writer.writeheader() for batch in batches: for fuzzer_job in batch.fuzzer_jobs: fields = _fuzzer_job_to_dict(fuzzer_job) fields['batch'] = batch.key.id() writer.writerow(fields)
Dumps the provided FuzzTargetJob entries to stdout in CSV format.
def _dump_fuzz_target_jobs( fuzz_target_jobs: Sequence[data_types.FuzzTargetJob]) -> None: """Dumps the provided FuzzTargetJob entries to stdout in CSV format.""" writer = csv.DictWriter( sys.stdout, fieldnames=[ 'fuzz_target_name', 'job', 'engine', 'weight', 'last_run', ]) writer.writeheader() for entry in fuzz_target_jobs: writer.writerow({ 'fuzz_target_name': entry.fuzz_target_name, 'job': entry.job, 'engine': entry.engine, 'weight': entry.weight, 'last_run': entry.last_run, })
Returns whether the given FuzzerJob matches the given optional filters.
def _fuzzer_job_matches( fuzzer_job: data_types.FuzzerJob, fuzzers: Optional[Sequence[str]], jobs: Optional[Sequence[str]], ) -> bool: """Returns whether the given FuzzerJob matches the given optional filters.""" if fuzzers and fuzzer_job.fuzzer not in fuzzers: return False if jobs and fuzzer_job.job not in jobs: return False return True
Helper for `_aggregate_fuzzer_jobs()`.
def _print_stats(fuzzer_jobs: List[data_types.FuzzerJob], total_weight: float) -> None: """Helper for `_aggregate_fuzzer_jobs()`.""" weight = _sum_weights(fuzzer_jobs) probability = weight / total_weight print(f' Count: {len(fuzzer_jobs)}') print(f' Total weight: {weight}') print(f' Total probability: {_display_prob(probability)}') # New in Python 3.8. We appease the linter by disabling `no-member` below. if len(fuzzer_jobs) < 2 or not hasattr(statistics, 'quantiles'): return # `quantiles()` returns n-1 cut points between n quantiles. # `weight_deciles[0]` separates the first from the second decile, i.e. it is # the 10% percentile value. `weight_deciles[i]` is the (i+1)*10-th. weight_deciles = statistics.quantiles(_iter_weights(fuzzer_jobs), n=10) # pylint: disable=no-member weight_median = weight_deciles[4] weight_90p = weight_deciles[8] prob_median = weight_median / total_weight prob_90p = weight_90p / total_weight print(f' Median weight: {weight_median}') print(f' Median probability: {_display_prob(prob_median)}') print(f' 90th percentile weight: {weight_90p}') print(f' 90th percentile probability: {_display_prob(prob_90p)}')
Aggregates statistics for matching and non-matching FuzzerJob entries.
def _aggregate_fuzzer_jobs( platform: str, fuzzers: Optional[Sequence[str]] = None, jobs: Optional[Sequence[str]] = None, ) -> None: """Aggregates statistics for matching and non-matching FuzzerJob entries.""" fuzzer_jobs = list(_query_fuzzer_jobs(platforms=[platform.upper()])) total_weight = _sum_weights(fuzzer_jobs) matches = [] others = [] for fuzzer_job in fuzzer_jobs: if _fuzzer_job_matches(fuzzer_job, fuzzers, jobs): matches.append(fuzzer_job) else: others.append(fuzzer_job) print('Matching FuzzerJob entries:') _print_stats(matches, total_weight) print('Other FuzzerJob entries:') _print_stats(others, total_weight)
Sets the matching FuzzTargetJob's weight to the given value.
def _set_fuzz_target_job_weight( fuzz_target_name: str, job: str, weight: float, ) -> None: """Sets the matching FuzzTargetJob's weight to the given value.""" key = ndb.Key(data_types.FuzzTargetJob, data_types.fuzz_target_job_key(fuzz_target_name, job)) ftj = key.get() if ftj is None: print(f'No FuzzTargetJob entry found for key {key.id()}.') return print(f'Fuzz target name: {ftj.fuzz_target_name}') print(f'Job: {ftj.job}') print(f'Engine: {ftj.engine}') print(f'Last run: {ftj.last_run}') print(f'Old weight: {ftj.weight}') print(f'-> New weight: {weight}') answer = input('Do you want to apply this mutation? [y,n] ') if answer.lower() != 'y': print('Not applying mutation.') return ftj.weight = weight ftj.put() print('Mutation applied.')
Executes the `fuzzer` command.
def _execute_fuzzer_command(args) -> None: """Executes the `fuzzer` command.""" cmd = args.fuzzer_command if cmd == 'platforms': _display_platforms() elif cmd == 'list': fuzzer_jobs = _query_fuzzer_jobs( platforms=args.platforms, fuzzers=args.fuzzers, jobs=args.jobs) if args.format == 'text': _display_fuzzer_jobs(fuzzer_jobs) elif args.format == 'csv': _dump_fuzzer_jobs(fuzzer_jobs) else: raise TypeError(f'--format {repr(args.format)} unrecognized') elif cmd == 'aggregate': _aggregate_fuzzer_jobs(args.platform, fuzzers=args.fuzzers, jobs=args.jobs) else: raise TypeError(f'weights fuzzer command {repr(cmd)} unrecognized')