response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Executes the `fuzzer-batch` command. | def _execute_fuzzer_batch_command(args) -> None:
"""Executes the `fuzzer-batch` command."""
cmd = args.fuzzer_batch_command
if cmd == 'list':
batches = _query_fuzzer_jobs_batches(platforms=args.platforms)
if args.format == 'text':
_display_fuzzer_jobs_batches(batches)
elif args.format == 'csv':
_dump_fuzzer_jobs_batches(batches)
else:
raise TypeError(f'--format {repr(args.format)} unrecognized')
else:
raise TypeError(f'weights fuzzer-batch command {repr(cmd)} unrecognized') |
Executes the `fuzz-target` command. | def _execute_fuzz_target_command(args) -> None:
"""Executes the `fuzz-target` command."""
cmd = args.fuzz_target_command
if cmd == 'list':
fuzz_target_jobs = _query_fuzz_target_jobs(
targets=args.targets, jobs=args.jobs, engines=args.engines)
if args.format == 'text':
_display_fuzz_target_jobs(fuzz_target_jobs)
elif args.format == 'csv':
_dump_fuzz_target_jobs(fuzz_target_jobs)
else:
raise TypeError(f'--format {repr(args.format)} unrecognized')
elif cmd == 'set':
_set_fuzz_target_job_weight(args.target, args.job, args.weight)
else:
raise TypeError(f'weights fuzz-target command {repr(cmd)} unrecognized') |
Executes the `weights` command. | def _execute_command(args) -> None:
"""Executes the `weights` command."""
cmd = args.weights_command
if cmd == 'fuzzer':
_execute_fuzzer_command(args)
elif cmd == 'fuzzer-batch':
_execute_fuzzer_batch_command(args)
elif cmd == 'fuzz-target':
_execute_fuzz_target_command(args)
else:
raise TypeError(f'weights command {repr(cmd)} unrecognized') |
Entrypoint from butler.py. | def execute(args) -> None:
"""Entrypoint from butler.py."""
os.environ['CONFIG_DIR_OVERRIDE'] = args.config_dir
local_config.ProjectConfig().set_environment()
with ndb_init.context():
_execute_command(args) |
An example for changing testcase's attributes. | def populate_example(testcase): # pylint: disable=unused-argument
"""An example for changing testcase's attributes.""" |
Build attributes for one testcase. Return true if the entity is
modified. | def populate(testcase):
"""Build attributes for one testcase. Return true if the entity is
modified."""
populate_example(testcase)
testcase.populate_indices()
# The number of testcases are low enough; we can mark every entity as
# modified.
return True |
Query Testcases of the given projects,
and conditionally file them to the corresponding GitHub repo. | def execute(args):
"""Query Testcases of the given projects,
and conditionally file them to the corresponding GitHub repo."""
if not args.script_args:
print('Need at least one project name (with -p) '
'when running the backfiler.')
return
for project_name in args.script_args:
print(f'Back filing project {project_name}')
for testcase in data_types.Testcase.query(
ndb_utils.is_true(data_types.Testcase.open),
ndb_utils.is_false(data_types.Testcase.one_time_crasher_flag),
data_types.Testcase.status == 'Processed',
data_types.Testcase.project_name == project_name,
):
if not testcase.bug_information:
print(f'Skip testcase without bugs: {testcase.key.id()}')
continue
print(f'Back filing testcase id: {testcase.key.id()}')
if args.non_dry_run:
oss_fuzz_github.file_issue(testcase)
testcase.put() |
Query in batches. | def iterate(query, batch_size):
"""Query in batches."""
count = 0
batch = []
for item in query:
batch.append(item)
count += 1
if len(batch) >= batch_size:
yield batch
batch = []
if batch:
yield batch |
Convert a db.Model instance to a dict. | def to_dict(entity):
"""Convert a db.Model instance to a dict."""
entity_dict = entity.to_dict()
entity_dict['id'] = entity.key.id()
for k, v in entity_dict.items():
if isinstance(v, datetime.datetime):
entity_dict[k] = utils.utc_datetime_to_timestamp(v)
return entity_dict |
Return differences in string between the two dicts, before and after. | def get_diff(before, after):
"""Return differences in string between the two dicts, before and after."""
diffs = []
for k, v in before.items():
if k in after:
if v != after[k]:
diffs.append((k, (v, after[k])))
else:
diffs.append((k, (v, '<MISSING>')))
for k, v in after.items():
if k not in before:
diffs.append((k, ('<MISSING>', v)))
diffs.sort()
s = ''
for (key, (before_value, after_value)) in diffs:
s += '%s:\n' % key
s += '-%s\n' % before_value
s += '+%s\n\n' % after_value
return s |
Build keywords. | def execute(args):
"""Build keywords."""
count_diff = 0
query = data_types.Testcase.query().order(-data_types.Testcase.timestamp)
for testcases in batcher.iterate(query, BATCH_SIZE):
for testcase in testcases:
before_testcase = to_dict(testcase)
attribute_builder.populate(testcase)
after_testcase = to_dict(testcase)
diff = get_diff(before_testcase, after_testcase)
if (count_diff % 10) == 0 and diff:
print('Migrate (dry=%s) id:%s\n%s' % (not args.non_dry_run,
testcase.key.id(), diff))
if diff:
count_diff += 1
if args.non_dry_run:
try:
ndb_utils.put_multi(testcases)
except Exception:
for testcase in testcases:
try:
testcase.put()
except Exception:
print('Error: %s %s' % (testcase.key.id(), sys.exc_info()))
print('Done (count_diff=%d)' % count_diff) |
Retrieve testcase based on its bug information. | def _get_testcase(bug_information):
"""Retrieve testcase based on its bug information."""
candidates = list(
data_types.Testcase.query(
data_types.Testcase.bug_information == bug_information))
if not candidates:
print('No candidate found when '
'querying bug information {bug_information}\n')
elif len(candidates) > 1:
print('Multiple candidates found when '
f'querying bug information {bug_information}:\n')
for testcase in candidates:
print(f' {testcase.key.id()}')
else:
return candidates[0]
return None |
Given a GitHub issue, parse its corresponding bug information. | def _get_bug_information(issue):
"""Given a GitHub issue, parse its corresponding bug information."""
bug_information = issue.title[len(oss_fuzz_github.ISSUE_TITTLE_TEXT_PREFIX) +
1:]
if bug_information.isdigit():
return bug_information
return None |
Verify if a testcase correctly stores its GitHub issue information. | def _testcase_information_verified(testcase, issue):
"""Verify if a testcase correctly stores its GitHub issue information."""
if testcase.github_repo_id == issue.repository.id and \
testcase.github_issue_num == issue.number:
print(f'Testcase {testcase.bug_information} was properly stored.')
return True
if testcase.github_repo_id is None and testcase.github_issue_num is None:
print(f'Testcase {testcase.bug_information} was not stored.')
else:
print(f'Testcase {testcase.bug_information} stored '
f'is inconsistent with GitHub:\n'
f' Issue number (Storage) {testcase.github_issue_num} '
f'!= {issue.number} (GitHub)\n'
f' Repository ID (Storage) {testcase.github_repo_id} '
f'!= {issue.repository.id} (GitHub).')
return False |
Backtrack GitHub issues filed in the past,
update their information in gcloud, and close them when necessary. | def execute(args):
"""Backtrack GitHub issues filed in the past,
update their information in gcloud, and close them when necessary."""
issue_tracker = issue_tracker_utils.get_issue_tracker('oss-fuzz')
for issue in oss_fuzz_github.get_my_issues():
print('========================================')
# Track testcase.
bug_information = _get_bug_information(issue)
if not bug_information:
print('Unable to extract bug information: '
f'Repo {issue.repository.id} Issue {issue.number}.\n'
f'Issue title: {issue.title}.\n'
f'Issue url: {issue.url}.')
continue
testcase = _get_testcase(bug_information)
# Update testcase.
if not _testcase_information_verified(testcase, issue):
print(
f'Updating testcase (bug information: {testcase.bug_information}):\n'
f' Issue number {issue.number}\n'
f' Repository ID {issue.repository.id}\n')
if args.non_dry_run:
oss_fuzz_github.update_testcase_properties(testcase, issue.repository,
issue)
testcase.put()
# Backclose issues.
if issue.state == 'closed':
continue
monorail_issue = issue_tracker.get_original_issue(bug_information)
if monorail_issue.is_open:
continue
print(f'Closing testcase (bug information: {testcase.bug_information}):\n'
f' Issue number {issue.number}\n'
f' Repository ID {issue.repository.id}\n')
if args.non_dry_run:
oss_fuzz_github.close_issue(testcase) |
Set up configuration. | def setup_config(non_dry_run):
"""Set up configuration."""
config = data_types.Config.query().get()
if not config:
config = data_types.Config()
if non_dry_run:
print('Creating config')
config.put()
else:
print('Skip creating config (dry-run mode)') |
Set up fuzzers. | def setup_fuzzers(non_dry_run):
"""Set up fuzzers."""
for fuzzer_defaults in [
AflDefaults(),
LibFuzzerDefaults(),
HonggfuzzDefaults(),
GoogleFuzzTestDefaults(),
SyzkallerDefaults(),
CentipedeDefaults(),
]:
fuzzer = data_types.Fuzzer.query(
data_types.Fuzzer.name == fuzzer_defaults.name).get()
if fuzzer:
print(fuzzer_defaults.name, 'fuzzer already exists')
if non_dry_run:
print('Updating stats metrics.')
fuzzer.stats_columns = fuzzer_defaults.stats_columns
fuzzer.stats_column_descriptions = (
fuzzer_defaults.stats_column_descriptions)
fuzzer.put()
continue
if non_dry_run:
print('Creating fuzzer', fuzzer_defaults.name)
fuzzer_defaults.create_fuzzer().put()
else:
print('Skip creating fuzzer', fuzzer_defaults.name, '(dry-run mode)') |
Set up templates. | def setup_templates(non_dry_run):
"""Set up templates."""
for name, template in TEMPLATES.items():
job = data_types.JobTemplate.query(
data_types.JobTemplate.name == name).get()
if job:
print('Template with name', name, 'already exists.')
continue
if non_dry_run:
print('Creating template', name)
data_types.JobTemplate(name=name, environment_string=template).put()
else:
print('Skip creating template', name, '(dry-run mode)') |
Set up metrics. | def setup_metrics(non_dry_run):
"""Set up metrics."""
client = monitoring_v3.MetricServiceClient()
project_name = utils.get_application_id()
project_path = client.project_path(project_name)
for name in dir(monitoring_metrics):
metric = getattr(monitoring_metrics, name)
if not isinstance(metric, monitor.Metric):
continue
descriptor = monitoring_v3.types.MetricDescriptor() # pylint: disable=no-member
metric.monitoring_v3_metric_descriptor(descriptor)
if non_dry_run:
print('Creating metric', descriptor)
try:
client.create_metric_descriptor(project_path, descriptor)
except exceptions.AlreadyExists:
client.delete_metric_descriptor(name=project_path +
'/metricDescriptors/' + descriptor.type)
client.create_metric_descriptor(project_path, descriptor)
else:
print('Skip creating metric', descriptor, '(dry-run mode)') |
Set up initial Datastore models. | def execute(args):
"""Set up initial Datastore models."""
setup_config(args.non_dry_run)
setup_fuzzers(args.non_dry_run)
setup_templates(args.non_dry_run)
if not args.local:
setup_metrics(args.non_dry_run)
print('Done') |
Build keywords for jobs. | def execute(args):
"""Build keywords for jobs."""
jobs = list(data_types.Job.query())
if args.non_dry_run:
ndb.put_multi(jobs)
print("Done building keywords for jobs.") |
Query Testcases of a project, and update the bug_information
and/or group_bug_information fields to reflect the Issue Tracker issue
id rather than the Monorail issue id. | def execute(args):
"""Query Testcases of a project, and update the bug_information
and/or group_bug_information fields to reflect the Issue Tracker issue
id rather than the Monorail issue id."""
# Read the required enviroment variables.
file_loc = os.environ.get('FILE_LOC')
if not file_loc:
raise ValueError('Must specify FILE_LOC env variable')
project_name = os.environ.get('PROJECT_NAME')
if not project_name:
raise ValueError('Must specify PROJECT_NAME env variable')
batch_size = int(os.environ.get('BATCH_SIZE', DEFAULT_BATCH_SIZE))
roll_back = os.environ.get('ROLL_BACK') == 'True'
issue_id_dict = get_monorail_issuetracker_issue_id_dictionary(
file_loc, roll_back)
print(f'Size of issue_id_dict: {len(issue_id_dict)}')
testcases = []
count_of_updated = 0
for testcase in data_types.Testcase.query(
# only target testcases in single project
data_types.Testcase.project_name == project_name,):
testcase_updated = False
if testcase.bug_information and issue_id_dict.get(testcase.bug_information):
testcase.bug_information = issue_id_dict[testcase.bug_information]
testcase_updated = True
if testcase.group_bug_information and issue_id_dict.get(
str(testcase.group_bug_information)):
# group_bug_information is an int unlike bug_information which is a str.
testcase.group_bug_information = int(issue_id_dict[str(
testcase.group_bug_information)])
testcase_updated = True
if testcase_updated:
print(f'We will update testcase id: {testcase.key.id()}')
testcases.append(testcase)
if args.non_dry_run and len(testcases) >= batch_size:
put_multi(testcases)
count_of_updated += len(testcases)
print(f'Updated {len(testcases)}. Total {count_of_updated}')
testcases = []
if args.non_dry_run and len(testcases) > 0:
put_multi(testcases)
count_of_updated += len(testcases)
print(f'Updated {len(testcases)}. Total {count_of_updated}') |
Attempts to batch put the specified slice of testcases.
If there is a 'payload size exceeds the limit' error then it will halve the
testcases and try again. If that does not work then will go into a debugger. | def put_multi(testcases):
"""Attempts to batch put the specified slice of testcases.
If there is a 'payload size exceeds the limit' error then it will halve the
testcases and try again. If that does not work then will go into a debugger.
"""
try:
ndb.put_multi(testcases)
except Exception as e:
if PAYLOAD_SIZE_ERROR in str(e) and len(testcases) > 1:
half_batch_size = len(testcases) // 2
print('Reached payload size limit. Retrying batch put with half the '
f'specified batch size: {half_batch_size}')
try:
ndb.put_multi(testcases[:half_batch_size])
ndb.put_multi(testcases[half_batch_size:])
except Exception as ie:
if PAYLOAD_SIZE_ERROR in str(ie):
print(f'Got exception: {e}')
print('Opening debugger to investigate further:')
# pylint: disable=forgotten-debug-statement
import pdb
pdb.set_trace()
else:
raise |
Creates a mapping of monorail/issuetracker issue ids. | def get_monorail_issuetracker_issue_id_dictionary(file_loc, roll_back):
"""Creates a mapping of monorail/issuetracker issue ids."""
issue_id_dictionary = {}
# csv should be structured with no headers and contain two columns:
# a monorail issue id, and a issuetracker issue id
# (ex. row: "600469, 40003765")
with open(file_loc, 'r') as csvfile:
reader = csv.reader(csvfile)
for monorail_id, issuetracker_id in reader:
if roll_back:
issue_id_dictionary[issuetracker_id] = monorail_id
else:
issue_id_dictionary[monorail_id] = issuetracker_id
return issue_id_dictionary |
Return content of a file. | def get_file_content(file_path):
"""Return content of a file."""
return open(file_path).read().strip() |
Return a tuple of (hostname, username and ssh_key_path). | def get_host_user_and_ssh_key_path(instance_name, project, zone):
"""Return a tuple of (hostname, username and ssh_key_path)."""
output = api.local(
f'gcloud compute ssh --project "{project}" --zone "{zone}" '
f'{instance_name} --dry-run',
capture=True)
print(output)
m = re.match('/usr/bin/ssh .*-i ([^ ]+)(?: -o [^ ]+)* ([^ ]+)@([^ ]+)',
output)
return (m.group(3), m.group(2), m.group(1)) |
Return password from |PASSWORD_FILE_PATH| environment variable. | def get_password():
"""Return password from |PASSWORD_FILE_PATH| environment variable."""
password_file_path = os.getenv('PASSWORD_FILE_PATH')
if not password_file_path:
raise RuntimeError('Please set PASSWORD_FILE_PATH in environment.')
return get_file_content(password_file_path) |
Run a cycle of heartbeat checks to ensure Android device is running. | def main():
"""Run a cycle of heartbeat checks to ensure Android device is running."""
logs.configure('android_heartbeat')
dates.initialize_timezone_from_environment()
environment.set_bot_environment()
monitor.initialize()
if environment.is_android_cuttlefish():
android.adb.set_cuttlefish_device_serial()
device_serial = environment.get_value('ANDROID_SERIAL')
while True:
state = android.adb.get_device_state()
if state == android.adb.DEVICE_NOT_FOUND_STRING.format(
serial=device_serial) and environment.is_android_cuttlefish():
android.adb.connect_to_cuttlefish_device()
state = android.adb.get_device_state()
logs.log('Android device %s state: %s' % (device_serial, state))
monitoring_metrics.ANDROID_UPTIME.increment_by(
int(state == 'device'), {
'serial': device_serial or '',
'platform': environment.get_platform_group() or '',
})
time.sleep(data_types.ANDROID_HEARTBEAT_WAIT_INTERVAL)
if data_handler.bot_run_timed_out():
break |
Start a HTTP server to respond to the health checker. | def run_server():
"""Start a HTTP server to respond to the health checker."""
if utils.is_oss_fuzz() or environment.is_android_real_device():
# OSS-Fuzz & Android multiple instances per host model isn't supported
# yet.
return
health_check_responder_server = HTTPServer((RESPONDER_IP, RESPONDER_PORT),
RequestHandler)
server_thread = threading.Thread(
target=health_check_responder_server.serve_forever)
server_thread.start() |
Run a cycle of heartbeat checks to ensure bot is running. | def beat(previous_state, log_filename):
"""Run a cycle of heartbeat checks to ensure bot is running."""
# Handle case when run_bot.py script is stuck. If yes, kill its process.
task_end_time = tasks.get_task_end_time()
if psutil and task_end_time and dates.time_has_expired(
task_end_time, seconds=tasks.TASK_COMPLETION_BUFFER):
# Get absolute path to |run_bot| script. We use this to identify unique
# instances of bot running on a particular host.
startup_scripts_directory = environment.get_startup_scripts_directory()
bot_file_path = os.path.join(startup_scripts_directory, 'run_bot')
for process in psutil.process_iter():
try:
command_line = ' '.join(process.cmdline())
except (psutil.AccessDenied, psutil.NoSuchProcess, OSError):
continue
# Find the process running the main bot script.
if bot_file_path not in command_line:
continue
process_id = process.pid
logs.log(
'Killing stale bot (pid %d) which seems to have stuck.' % process_id)
try:
process_handler.terminate_root_and_child_processes(process_id)
except Exception:
logs.log_error('Failed to terminate stale bot processes.')
# Minor cleanup to avoid disk space issues on bot restart.
process_handler.terminate_stale_application_instances()
shell.clear_temp_directory()
shell.clear_testcase_directories()
# Concerned stale processes should be killed. Now, delete the stale task.
tasks.track_task_end()
# Figure out when the log file was last modified.
try:
current_state = str(os.path.getmtime(log_filename))
except Exception:
current_state = None
# Only update the heartbeat if the log file was modified.
if current_state and current_state != previous_state:
# Try updating the heartbeat. If an error occurs, just
# wait and return None.
if not data_handler.update_heartbeat():
return None
# Heartbeat is successfully updated.
return current_state |
Start the bot process. | def start_bot(bot_command):
"""Start the bot process."""
command = shell.get_command(bot_command)
# Wait until the process terminates or until run timed out.
run_timeout = environment.get_value('RUN_TIMEOUT')
if run_timeout and run_timeout > MAX_SUBPROCESS_TIMEOUT:
logs.log_error(
'Capping RUN_TIMEOUT to max allowed value: %d' % MAX_SUBPROCESS_TIMEOUT)
run_timeout = MAX_SUBPROCESS_TIMEOUT
try:
result = subprocess.run(
command,
timeout=run_timeout,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=False)
exit_code = result.returncode
output = result.stdout
except subprocess.TimeoutExpired as e:
exit_code = 0
output = e.stdout
except Exception:
logs.log_error('Unable to start bot process (%s).' % bot_command)
return 1
if output:
output = output.decode('utf-8', errors='ignore')
log_message = f'Command: {command} (exit={exit_code})\n{output}'
if exit_code == 0:
logs.log(log_message)
elif exit_code == 1:
# Anecdotally, exit=1 means there's a fatal Python exception.
logs.log_error(log_message)
else:
logs.log_warn(log_message)
return exit_code |
time.sleep wrapper for mocking. | def sleep(seconds):
"""time.sleep wrapper for mocking."""
time.sleep(seconds) |
Start the heartbeat (in another process). | def start_heartbeat(heartbeat_command):
"""Start the heartbeat (in another process)."""
global _heartbeat_handle
if _heartbeat_handle:
# If heartbeat is already started, no work to do. Bail out.
return
try:
command = shell.get_command(heartbeat_command)
process_handle = subprocess.Popen(command) # pylint: disable=consider-using-with
except Exception:
logs.log_error(
'Unable to start heartbeat process (%s).' % heartbeat_command)
return
# If heartbeat is successfully started, set its handle now.
_heartbeat_handle = process_handle
# Artificial delay to let heartbeat's start time update first.
sleep(HEARTBEAT_START_WAIT_TIME) |
Stop the heartbeat process. | def stop_heartbeat():
"""Stop the heartbeat process."""
global _heartbeat_handle
if not _heartbeat_handle:
# If there is no heartbeat started yet, no work to do. Bail out.
return
try:
_heartbeat_handle.kill()
except Exception:
pass
_heartbeat_handle = None |
Start the android heartbeat (in another process). | def start_android_heartbeat():
"""Start the android heartbeat (in another process)."""
global _android_heartbeat_handle
if _android_heartbeat_handle:
# If heartbeat is already started, no work to do. Bail out.
return
base_directory = environment.get_startup_scripts_directory()
android_beat_script_path = os.path.join(base_directory,
ANDROID_HEARTBEAT_SCRIPT)
android_beat_interpreter = shell.get_interpreter(android_beat_script_path)
assert android_beat_interpreter
android_beat_command = [android_beat_interpreter, android_beat_script_path]
try:
process_handle = subprocess.Popen(android_beat_command)
except Exception:
logs.log_error('Unable to start android heartbeat process (%s).' %
android_beat_command)
return
# If heartbeat is successfully started, set its handle now.
_android_heartbeat_handle = process_handle |
Stop the android heartbeat process. | def stop_android_heartbeat():
"""Stop the android heartbeat process."""
global _android_heartbeat_handle
if not _android_heartbeat_handle:
# If there is no heartbeat started yet, no work to do. Bail out.
return
try:
_android_heartbeat_handle.kill()
except Exception as e:
logs.log_error('Unable to stop android heartbeat process: %s' % str(e))
_android_heartbeat_handle = None |
Update source code if needed. | def update_source_code_if_needed():
"""Update source code if needed."""
try:
# Update the bot source, if there's a newer version.
newer_source_revision = update_task.get_newer_source_revision()
if newer_source_revision is not None:
# If source code needs update, stop the heartbeat first. As otherwise,
# we can run into exceptions if source code changed from underneath
# a running process.
stop_heartbeat()
update_task.update_source_code()
except Exception:
logs.log_error('Failed to update source.') |
Run infinite loop with bot's command. | def run_loop(bot_command, heartbeat_command):
"""Run infinite loop with bot's command."""
atexit.register(stop_heartbeat)
if environment.is_android():
atexit.register(stop_android_heartbeat)
while True:
update_source_code_if_needed()
if not environment.is_uworker():
if environment.is_android():
start_android_heartbeat()
start_heartbeat(heartbeat_command)
exit_code = start_bot(bot_command)
if environment.is_uworker():
logs.log(f'Batch job exited with code: {exit_code}. Exiting.')
sys.exit(exit_code)
# See if our run timed out, if yes bail out.
try:
if data_handler.bot_run_timed_out():
break
except Exception:
logs.log_error('Failed to check for bot run timeout.')
sleep(LOOP_SLEEP_INTERVAL) |
Set START_TIME. | def set_start_time():
"""Set START_TIME."""
environment.set_value('START_TIME', time.time()) |
Creates a context manager that leases every task in tasks_list. | def lease_all_tasks(task_list):
"""Creates a context manager that leases every task in tasks_list."""
with contextlib.ExitStack() as exit_stack:
for task in task_list:
monitoring_metrics.TASK_COUNT.increment({
'task': task.command or '',
'job': task.job or '',
})
exit_stack.enter_context(task.lease())
yield |
Schedules utask_mains from preprocessed utasks on Google Cloud Batch. | def schedule_utask_mains():
"""Schedules utask_mains from preprocessed utasks on Google Cloud Batch."""
from clusterfuzz._internal.google_cloud_utils import batch
logs.log('Attempting to combine batch tasks.')
utask_mains = taskslib.get_utask_mains()
if not utask_mains:
logs.log('No utask mains.')
return
logs.log(f'Combining {len(utask_mains)} batch tasks.')
batch_tasks = []
with lease_all_tasks(utask_mains):
batch_tasks = [
batch.BatchTask(task.command, task.job, task.argument)
for task in utask_mains
]
batch.create_uworker_main_batch_jobs(batch_tasks) |
Executes tasks indefinitely. | def task_loop():
"""Executes tasks indefinitely."""
# Defer heavy task imports to prevent issues with multiprocessing.Process
from clusterfuzz._internal.bot.tasks import commands
clean_exit = False
while True:
stacktrace = ''
exception_occurred = False
task = None
# This caches the current environment on first run. Don't move this.
environment.reset_environment()
try:
# Run regular updates.
# TODO(metzman): Move this after utask_main execution so that utasks can't
# be updated on subsequent attempts.
update_task.run()
update_task.track_revision()
if environment.is_uworker():
# Batch tasks only run one at a time.
sys.exit(utasks.uworker_bot_main())
if environment.get_value('SCHEDULE_UTASK_MAINS'):
# If the bot is configured to schedule utask_mains, don't run any other
# tasks because scheduling these tasks is more important than executing
# any one other task.
# TODO(metzman): Convert this to a k8s cron.
schedule_utask_mains()
continue
task = taskslib.get_task()
if not task:
continue
with _Monitor(task):
with task.lease():
# Execute the command and delete the task.
commands.process_command(task)
except SystemExit as e:
exception_occurred = True
clean_exit = e.code == 0
if not clean_exit and not isinstance(e, untrusted.HostError):
logs.log_error('SystemExit occurred while working on task.')
stacktrace = traceback.format_exc()
except commands.AlreadyRunningError:
exception_occurred = False
except task_utils.UworkerMsgParseError:
logs.log_error('Task cannot be retried because of utask parse error.')
task.dont_retry()
exception_occurred = True
stacktrace = traceback.format_exc()
except Exception:
logs.log_error('Error occurred while working on task.')
exception_occurred = True
stacktrace = traceback.format_exc()
if exception_occurred:
# Prevent looping too quickly. See: crbug.com/644830
failure_wait_interval = environment.get_value('FAIL_WAIT')
time.sleep(utils.random_number(1, failure_wait_interval))
break
task_payload = task.payload() if task else None
return stacktrace, clean_exit, task_payload |
Prepare the configuration options and start requesting tasks. | def main():
"""Prepare the configuration options and start requesting tasks."""
logs.configure('run_bot')
root_directory = environment.get_value('ROOT_DIR')
if not root_directory:
print('Please set ROOT_DIR environment variable to the root of the source '
'checkout before running. Exiting.')
print('For an example, check init.bash in the local directory.')
return
dates.initialize_timezone_from_environment()
environment.set_bot_environment()
monitor.initialize()
if not profiler.start_if_needed('python_profiler_bot'):
sys.exit(-1)
fuzzers_init.run()
if environment.is_trusted_host(ensure_connected=False):
from clusterfuzz._internal.bot.untrusted_runner import host
host.init()
if environment.is_untrusted_worker():
# Track revision since we won't go into the task_loop.
update_task.track_revision()
from clusterfuzz._internal.bot.untrusted_runner import \
untrusted as untrusted_worker
untrusted_worker.start_server()
assert False, 'Unreachable code'
while True:
# task_loop should be an infinite loop,
# unless we run into an exception.
error_stacktrace, clean_exit, task_payload = task_loop()
# Print the error trace to the console.
if not clean_exit:
print('Exception occurred while running "%s".' % task_payload)
print('-' * 80)
print(error_stacktrace)
print('-' * 80)
should_terminate = (
clean_exit or errors.error_in_list(error_stacktrace,
errors.BOT_ERROR_TERMINATION_LIST))
if should_terminate:
return
logs.log_error(
'Task exited with exception (payload="%s").' % task_payload,
error_stacktrace=error_stacktrace)
should_hang = errors.error_in_list(error_stacktrace,
errors.BOT_ERROR_HANG_LIST)
if should_hang:
logs.log('Start hanging forever.')
while True:
# Sleep to avoid consuming 100% of CPU.
time.sleep(60)
# See if our run timed out, if yes bail out.
if data_handler.bot_run_timed_out():
return |
Runs the cron jobs | def main():
"""Runs the cron jobs"""
logs.configure('run_cron')
root_directory = environment.get_value('ROOT_DIR')
local_config.ProjectConfig().set_environment()
if not root_directory:
print('Please set ROOT_DIR environment variable to the root of the source '
'checkout before running. Exiting.')
print('For an example, check init.bash in the local directory.')
return 1
config_modules_path = os.path.join(root_directory, 'src', 'appengine',
'config', 'modules')
if os.path.exists(config_modules_path):
sys.path.append(config_modules_path)
try:
# Run any module initialization code.
import module_init
module_init.init()
except ImportError:
pass
task = sys.argv[1]
task_module_name = f'clusterfuzz._internal.cron.{task}'
with ndb_init.context():
task_module = importlib.import_module(task_module_name)
return 0 if task_module.main() else 1 |
Update the heartbeat if there is bot activity. | def main():
"""Update the heartbeat if there is bot activity."""
if len(sys.argv) < 2:
print('Usage: %s <log file>' % sys.argv[0])
return
environment.set_bot_environment()
logs.configure('run_heartbeat')
log_filename = sys.argv[1]
previous_state = None
# Get absolute path to heartbeat script and interpreter needed to execute it.
startup_scripts_directory = environment.get_startup_scripts_directory()
beat_script_path = os.path.join(startup_scripts_directory, BEAT_SCRIPT)
beat_interpreter = shell.get_interpreter(beat_script_path)
assert beat_interpreter
run_health_responser_server()
while True:
beat_command = [
beat_interpreter, beat_script_path,
str(previous_state), log_filename
]
try:
previous_state = subprocess.check_output(
beat_command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
logs.log_error('Failed to beat.', output=e.output)
except Exception:
logs.log_error('Failed to beat.')
# See if our run timed out, if yes bail out.
if data_handler.bot_run_timed_out():
break |
Unpacks the old crash testcases in the provided directory. | def unpack_crash_testcases(crash_testcases_directory):
"""Unpacks the old crash testcases in the provided directory."""
for testcase in ndb_utils.get_all_from_model(data_types.Testcase):
testcase_id = testcase.key.id()
# 1. If we have already stored the testcase, then just skip.
if testcase_id in STORED_TESTCASES_LIST:
continue
# 2. Make sure that it is a unique crash testcase. Ignore duplicates,
# uploaded repros.
if testcase.status != 'Processed':
continue
# 3. Check if the testcase is fixed. If not, skip.
if testcase.open:
continue
# 4. Check if the testcase has a minimized repro. If not, skip.
if not testcase.minimized_keys or testcase.minimized_keys == 'NA':
continue
# 5. Only use testcases that have bugs associated with them.
if not testcase.bug_information:
continue
# 6. Existing IPC testcases are un-interesting and unused in further
# mutations. Due to size bloat, ignoring these for now.
if testcase.absolute_path.endswith(testcase_manager.IPCDUMP_EXTENSION):
continue
# 7. Ignore testcases that are archives (e.g. Langfuzz fuzzer tests).
if archive.get_archive_type(testcase.absolute_path):
continue
# 8. Skip in-process fuzzer testcases, since these are only applicable to
# fuzz targets and don't run with blackbox binaries.
if testcase.fuzzer_name and testcase.fuzzer_name in ['afl', 'libFuzzer']:
continue
# Un-pack testcase.
try:
setup.unpack_testcase(testcase)
except Exception:
logs.log_error('Failed to unpack testcase %d.' % testcase.key.id())
continue
# Move this to our crash testcases directory.
crash_testcase_directory = os.path.join(crash_testcases_directory,
str(testcase_id))
input_directory = environment.get_value('FUZZ_INPUTS')
shell.move(input_directory, crash_testcase_directory)
# Re-create input directory for unpacking testcase in next iteration.
shell.create_directory(input_directory)
STORED_TESTCASES_LIST.append(testcase_id)
# Remove testcase directories that exceed the max size limit.
for directory_name in os.listdir(crash_testcases_directory):
directory_path = os.path.join(crash_testcases_directory, directory_name)
if not os.path.isdir(directory_path):
continue
if shell.get_directory_size(directory_path) <= MAX_TESTCASE_DIRECTORY_SIZE:
continue
shell.remove_directory(directory_path)
# Rename all fuzzed testcase files as regular files.
for root, _, files in os.walk(crash_testcases_directory):
for filename in files:
if not filename.startswith(testcase_manager.FUZZ_PREFIX):
continue
file_path = os.path.join(root, filename)
stripped_file_name = os.path.basename(file_path)[len(
testcase_manager.FUZZ_PREFIX):]
stripped_file_path = os.path.join(
os.path.dirname(file_path), stripped_file_name)
try:
os.rename(file_path, stripped_file_path)
except:
raise Exception('Failed to rename testcase %s.' % file_path)
# Remove empty files and dirs to avoid the case where a fuzzer randomly
# chooses an empty dir/file and generates zero testcases.
shell.remove_empty_files(crash_testcases_directory)
shell.remove_empty_directories(crash_testcases_directory) |
Clone a git repo. | def clone_git_repository(tests_directory, name, repo_url):
"""Clone a git repo."""
logs.log('Syncing %s tests.' % name)
directory = os.path.join(tests_directory, name)
if not os.path.exists(directory):
subprocess.check_call(
['git', 'clone', '--depth=1', repo_url, name], cwd=tests_directory)
if os.path.exists(directory):
subprocess.check_call(['git', 'pull'], cwd=directory)
else:
raise Exception('Unable to checkout %s tests.' % name) |
Checkout a SVN repo. | def checkout_svn_repository(tests_directory, name, repo_url):
"""Checkout a SVN repo."""
logs.log('Syncing %s tests.' % name)
directory = os.path.join(tests_directory, name)
if not os.path.exists(directory):
subprocess.check_call(
['svn', 'checkout', repo_url, directory], cwd=tests_directory)
if os.path.exists(directory):
subprocess.check_call(['svn', 'update', directory], cwd=tests_directory)
else:
raise Exception('Unable to checkout %s tests.' % name) |
Create symbolic link. | def create_symbolic_link(tests_directory, source_subdirectory,
target_subdirectory):
"""Create symbolic link."""
source_directory = os.path.join(tests_directory, source_subdirectory)
target_directory = os.path.join(tests_directory, target_subdirectory)
if not os.path.exists(source_directory):
raise Exception('Unable to find source directory %s for symbolic link.' %
source_directory)
if os.path.exists(target_directory):
# Symbolic link already exists, bail out.
return
target_parent_directory = os.path.dirname(target_directory)
if not os.path.exists(target_parent_directory):
# Create parent dirs if needed, otherwise symbolic link creation will fail.
os.makedirs(target_parent_directory)
subprocess.check_call(['ln', '-s', source_directory, target_directory]) |
Create Gecko tests directory from a Gecko source checkout using links. | def create_gecko_tests_directory(tests_directory, gecko_checkout_subdirectory,
gecko_tests_subdirectory):
"""Create Gecko tests directory from a Gecko source checkout using links."""
gecko_checkout_directory = os.path.join(tests_directory,
gecko_checkout_subdirectory)
if not os.path.exists(gecko_checkout_directory):
raise Exception(
'Unable to find Gecko source directory %s.' % gecko_checkout_directory)
web_platform_sub_directory = 'testing%sweb-platform%s' % (os.sep, os.sep)
for root, directories, _ in os.walk(gecko_checkout_directory):
for directory in directories:
if not re.match('.*tests?$', directory):
continue
directory_absolute_path = os.path.join(root, directory)
sub_directory = utils.strip_from_left(directory_absolute_path,
gecko_checkout_directory + os.sep)
source_subdirectory = gecko_checkout_subdirectory + os.sep + sub_directory
target_subdirectory = gecko_tests_subdirectory + os.sep + sub_directory
if sub_directory.startswith(web_platform_sub_directory):
# Exclude web-platform tests already included in blink layout tests.
continue
create_symbolic_link(tests_directory, source_subdirectory,
target_subdirectory) |
Main sync routine. | def main():
"""Main sync routine."""
tests_archive_bucket = environment.get_value('TESTS_ARCHIVE_BUCKET')
tests_archive_name = environment.get_value('TESTS_ARCHIVE_NAME')
tests_directory = environment.get_value('TESTS_DIR')
sync_interval = environment.get_value('SYNC_INTERVAL') # in seconds.
shell.create_directory(tests_directory)
# Sync old crash tests.
logs.log('Syncing old crash tests.')
crash_testcases_directory = os.path.join(tests_directory, 'CrashTests')
shell.create_directory(crash_testcases_directory)
unpack_crash_testcases(crash_testcases_directory)
# Sync web tests.
logs.log('Syncing web tests.')
src_directory = os.path.join(tests_directory, 'src')
gclient_file_path = os.path.join(tests_directory, '.gclient')
if not os.path.exists(gclient_file_path):
subprocess.check_call(
['fetch', '--no-history', 'chromium', '--nosvn=True'],
cwd=tests_directory)
if os.path.exists(src_directory):
subprocess.check_call(['gclient', 'revert'], cwd=src_directory)
subprocess.check_call(['git', 'pull'], cwd=src_directory)
subprocess.check_call(['gclient', 'sync'], cwd=src_directory)
else:
raise Exception('Unable to checkout web tests.')
clone_git_repository(tests_directory, 'v8',
'https://chromium.googlesource.com/v8/v8')
clone_git_repository(tests_directory, 'ChakraCore',
'https://github.com/Microsoft/ChakraCore.git')
clone_git_repository(tests_directory, 'gecko-dev',
'https://github.com/mozilla/gecko-dev.git')
clone_git_repository(tests_directory, 'webgl-conformance-tests',
'https://github.com/KhronosGroup/WebGL.git')
checkout_svn_repository(
tests_directory, 'WebKit/LayoutTests',
'http://svn.webkit.org/repository/webkit/trunk/LayoutTests')
checkout_svn_repository(
tests_directory, 'WebKit/JSTests/stress',
'http://svn.webkit.org/repository/webkit/trunk/JSTests/stress')
checkout_svn_repository(
tests_directory, 'WebKit/JSTests/es6',
'http://svn.webkit.org/repository/webkit/trunk/JSTests/es6')
create_gecko_tests_directory(tests_directory, 'gecko-dev', 'gecko-tests')
# Upload tests archive to google cloud storage.
logs.log('Uploading tests archive to cloud.')
tests_archive_local = os.path.join(tests_directory, tests_archive_name)
tests_archive_remote = 'gs://{bucket_name}/{archive_name}'.format(
bucket_name=tests_archive_bucket, archive_name=tests_archive_name)
shell.remove_file(tests_archive_local)
create_symbolic_link(tests_directory, 'gecko-dev/js/src/tests',
'spidermonkey')
create_symbolic_link(tests_directory, 'ChakraCore/test', 'chakra')
# FIXME: Find a way to rename LayoutTests to web_tests without breaking
# compatibility with older testcases.
create_symbolic_link(tests_directory, 'src/third_party/blink/web_tests',
'LayoutTests')
subprocess.check_call(
[
'zip',
'-r',
tests_archive_local,
'CrashTests',
'LayoutTests',
'WebKit',
'gecko-tests',
'v8/test/mjsunit',
'spidermonkey',
'chakra',
'webgl-conformance-tests',
'-x',
'*.cc',
'-x',
'*.cpp',
'-x',
'*.py',
'-x',
'*.txt',
'-x',
'*-expected.*',
'-x',
'*.git*',
'-x',
'*.svn*',
],
cwd=tests_directory)
subprocess.check_call(
['gsutil', 'cp', tests_archive_local, tests_archive_remote])
logs.log('Completed cycle, sleeping for %s seconds.' % sync_interval)
time.sleep(sync_interval) |
Read in dataset for variable var
:param varin: Variable for which to read in data. | def read(varin, fname='MS2_L10.mat.txt'):
'''Read in dataset for variable var
:param varin: Variable for which to read in data.
'''
# # fname = 'MS09_L10.mat.txt'
# # fname = 'MS09_L05.mat.txt' # has PAR
# fname = 'MS2_L10.mat.txt' # empty PAR
d = np.loadtxt(fname, comments='*')
if fname == 'MS2_L10.mat.txt':
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
elif (fname == 'MS09_L05.mat.txt') or (fname == 'MS09_L10.mat.txt') or (fname == 'MS08_L12.mat.txt'):
var = ['lat', 'lon', 'depth', 'temp', 'density', 'sigma', 'oxygen',
'voltage 2', 'voltage 3', 'voltage 4', 'fluorescence-CDOM', 'fluorescence-ECO',
'turbidity', 'pressure', 'salinity', 'RINKO temperature',
'RINKO DO - CTD temp', 'RINKO DO - RINKO temp', 'bottom', 'PAR']
# return data for variable varin
return d[:, 0], d[:, 1], d[:, 2], d[:, var.index(varin)] |
Show a colormap for a chosen input variable var side by side with
black and white and jet colormaps.
:param cmap: Colormap instance
:param var: Variable to plot.
:param vmin=None: Min plot value.
:param vmax=None: Max plot value. | def show(cmap, var, vmin=None, vmax=None):
'''Show a colormap for a chosen input variable var side by side with
black and white and jet colormaps.
:param cmap: Colormap instance
:param var: Variable to plot.
:param vmin=None: Min plot value.
:param vmax=None: Max plot value.
'''
# get variable data
lat, lon, z, data = read(var)
fig = plt.figure(figsize=(16, 12))
# Plot with grayscale
ax = fig.add_subplot(3, 1, 1)
map1 = ax.scatter(lon, -z, c=data, cmap='gray', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with jet
ax = fig.add_subplot(3, 1, 2)
map1 = ax.scatter(lon, -z, c=data, cmap='jet', s=10, linewidths=0., vmin=vmin, vmax=vmax)
plt.colorbar(map1, ax=ax)
# Plot with cmap
ax = fig.add_subplot(3, 1, 3)
map1 = ax.scatter(lon, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=vmin, vmax=vmax)
ax.set_xlabel('Longitude [degrees]')
ax.set_ylabel('Depth [m]')
plt.colorbar(map1, ax=ax)
plt.suptitle(var) |
Plot sample data up with the fancy colormaps.
| def plot_data():
'''Plot sample data up with the fancy colormaps.
'''
var = ['temp', 'oxygen', 'salinity', 'fluorescence-ECO', 'density', 'PAR', 'turbidity', 'fluorescence-CDOM']
# colorbar limits for each property
lims = np.array([[26, 33], [0, 10], [0, 36], [0, 6], [1005, 1025], [0, 0.6], [0, 2], [0, 9]]) # reasonable values
# lims = np.array([[20,36], [26,33], [1.5,5.6], [0,4], [0,9], [0,1.5]]) # values to show colormaps
for fname in fnames:
fig, axes = plt.subplots(nrows=4, ncols=2)
fig.set_size_inches(20, 10)
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.0, hspace=0.07)
i = 0
for ax, Var, cmap in zip(axes.flat, var, cmaps): # loop through data to plot up
# get variable data
lat, lon, z, data = test.read(Var, fname)
map1 = ax.scatter(lat, -z, c=data, cmap=cmap, s=10, linewidths=0., vmin=lims[i, 0], vmax=lims[i, 1])
# no stupid offset
y_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(y_formatter)
if i == 6:
ax.set_xlabel('Latitude [degrees]')
ax.set_ylabel('Depth [m]')
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_ylim(-z.max(), 0)
ax.set_xlim(lat.min(), lat.max())
cb = plt.colorbar(map1, ax=ax, pad=0.02)
cb.set_label(cmap.name + ' [' + '$' + cmap.units + '$]')
i += 1
fig.savefig('figures/' + fname.split('.')[0] + '.png', bbox_inches='tight') |
Plot lightness of colormaps together.
| def plot_lightness(saveplot=False):
'''Plot lightness of colormaps together.
'''
from colorspacious import cspace_converter
dc = 1.
x = np.linspace(0.0, 1.0, 256)
locs = [] # locations for text labels
fig = plt.figure(figsize=(16, 5))
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.03, right=0.97)
ax.set_xlim(-0.1, len(cm.cmap_d)/5. + 0.1)
ax.set_ylim(0, 100)
ax.set_xlabel('Lightness for each colormap', fontsize=14)
for j, cmapname in enumerate(cm.cmapnames):
if '_r' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
lab = cspace_converter("sRGB1", "CAM02-UCS")(rgb)
L = lab[0, :, 0]
if L[-1] > L[0]:
ax.scatter(x+j*dc, L, c=x, cmap=cmap, s=200, linewidths=0.)
else:
ax.scatter(x+j*dc, L[::-1], c=x[::-1], cmap=cmap, s=200, linewidths=0.)
locs.append(x[-1]+j*dc) # store locations for colormap labels
# Set up labels for colormaps
ax.xaxis.set_ticks_position('top')
ticker = mpl.ticker.FixedLocator(locs)
ax.xaxis.set_major_locator(ticker)
formatter = mpl.ticker.FixedFormatter([cmapname for cmapname in cm.cmapnames])
ax.xaxis.set_major_formatter(formatter)
labels = ax.get_xticklabels()
for label in labels:
label.set_rotation(60)
if saveplot:
fig.savefig('cmocean_lightness.png', bbox_inches='tight')
fig.savefig('cmocean_lightness.pdf', bbox_inches='tight')
plt.show() |
Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not. | def plot_gallery(saveplot=False):
'''Make plot of colormaps and labels, like in the matplotlib
gallery.
:param saveplot=False: Whether to save the plot or not.
'''
from colorspacious import cspace_converter
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
x = np.linspace(0.0, 1.0, 256)
fig, axes = plt.subplots(nrows=int(len(cm.cmap_d)/5), ncols=1, figsize=(6, 12))
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
for ax, cmapname in zip(axes, cm.cmapnames):
if '_r' in cmapname or '_i' in cmapname: # skip reversed versions for plot
continue
cmap = cm.cmap_d[cmapname] # get the colormap instance
rgb = cmap(x)[np.newaxis, :, :3]
# Find a good conversion to grayscale
jch = cspace_converter("sRGB1", "CAM02-UCS")(rgb) # Not sure why to use JCh instead so using this.
L = jch[0, :, 0]
L = np.float32(np.vstack((L, L, L)))
ax.imshow(gradient, aspect='auto', cmap=cmap)
pos1 = ax.get_position() # get the original position
pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]
axbw = fig.add_axes(pos2) # colorbar axes
axbw.set_axis_off()
axbw.imshow(L, aspect='auto', cmap=cm.gray, vmin=0, vmax=100.)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, cmap.name, va='center', ha='right')
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
if saveplot:
fig.savefig('cmocean_gallery.pdf', bbox_inches='tight')
fig.savefig('cmocean_gallery.png', bbox_inches='tight')
plt.show() |
Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not. | def wrap_viscm(cmap, dpi=100, saveplot=False):
'''Evaluate goodness of colormap using perceptual deltas.
:param cmap: Colormap instance.
:param dpi=100: dpi for saved image.
:param saveplot=False: Whether to save the plot or not.
'''
from viscm import viscm
viscm(cmap)
fig = plt.gcf()
fig.set_size_inches(22, 10)
plt.show()
if saveplot:
fig.savefig('cmocean_eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)
fig.savefig('cmocean_eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi) |
Test colormap by plotting.
:param cmap: A colormap instance. Use a named one with cm.get_cmap(colormap) | def test(cmap, fig=None, ax=None):
'''Test colormap by plotting.
:param cmap: A colormap instance. Use a named one with cm.get_cmap(colormap)
'''
from colorspacious import cspace_converter
# indices to step through colormap
x = np.linspace(0.0, 1.0, 100)
# will plot colormap and lightness
rgb = cmap(x)[np.newaxis, :, :3]
lab = cspace_converter("sRGB1", "CAM02-UCS")(rgb)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(x, lab[0, :, 0], c=x, cmap=cmap, s=300, linewidths=0.)
ax.set_title(cmap.name, fontsize=14)
ax.set_ylabel('Lightness', fontsize=14)
ax.set_xticks([]) |
Show quick test of a colormap.
| def quick_plot(cmap, fname=None, fig=None, ax=None, N=10):
'''Show quick test of a colormap.
'''
x = np.linspace(0, 10, N)
X, _ = np.meshgrid(x, x)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
mappable = ax.pcolor(X, cmap=cmap)
ax.set_title(cmap.name, fontsize=14)
ax.set_xticks([])
ax.set_yticks([])
plt.colorbar(mappable)
plt.show()
if fname is not None:
plt.savefig(fname + '.png', bbox_inches='tight') |
Print colormaps in 256 RGB colors to text files.
:param returnrgb=False: Whether or not to return the rgb array. Only makes sense to do if print one colormaps' rgb. | def print_colormaps(cmaps, N=256, returnrgb=True, savefiles=False):
'''Print colormaps in 256 RGB colors to text files.
:param returnrgb=False: Whether or not to return the rgb array. Only makes sense to do if print one colormaps' rgb.
'''
rgb = []
for cmap in cmaps:
rgbtemp = cmap(np.linspace(0, 1, N))[np.newaxis, :, :3][0]
if savefiles:
np.savetxt(cmap.name + '-rgb.txt', rgbtemp)
rgb.append(rgbtemp)
if returnrgb:
return rgb |
Change from rgb to dictionary that LinearSegmentedColormap expects.
Code from https://mycarta.wordpress.com/2014/04/25/convert-color-palettes-to-python-matplotlib-colormaps/
and http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb | def get_dict(cmap, N=256):
'''Change from rgb to dictionary that LinearSegmentedColormap expects.
Code from https://mycarta.wordpress.com/2014/04/25/convert-color-palettes-to-python-matplotlib-colormaps/
and http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
'''
x = np.linspace(0, 1, N) # position of sample n - ranges from 0 to 1
rgb = cmap(x)
# flip colormap to follow matplotlib standard
if rgb[0, :].sum() < rgb[-1, :].sum():
rgb = np.flipud(rgb)
b3 = rgb[:, 2] # value of blue at sample n
b2 = rgb[:, 2] # value of blue at sample n
# Setting up columns for tuples
g3 = rgb[:, 1]
g2 = rgb[:, 1]
r3 = rgb[:, 0]
r2 = rgb[:, 0]
# Creating tuples
R = list(zip(x, r2, r3))
G = list(zip(x, g2, g3))
B = list(zip(x, b2, b3))
# Creating dictionary
k = ['red', 'green', 'blue']
LinearL = dict(zip(k, [R, G, B]))
return LinearL |
Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to. | def cmap(rgbin, N=256):
'''Input an array of rgb values to generate a colormap.
:param rgbin: An [mx3] array, where m is the number of input color triplets which
are interpolated between to make the colormap that is returned. hex values
can be input instead, as [mx1] in single quotes with a #.
:param N=10: The number of levels to be interpolated to.
'''
# rgb inputs here
if not isinstance(rgbin[0], _string_types):
# normalize to be out of 1 if out of 256 instead
if rgbin.max() > 1:
rgbin = rgbin/256.
cmap = mpl.colors.LinearSegmentedColormap.from_list('mycmap', rgbin, N=N)
return cmap |
Lighten a colormap by adding alpha < 1.
:param cmap: A colormap object, like cmocean.cm.matter.
:param alpha: An alpha or transparency value to assign the colormap. Alpha
of 1 is opaque and of 1 is fully transparent.
Outputs resultant colormap object.
This will lighten the appearance of a plot you make using the output
colormap object. It is also possible to lighten many plots in the
plotting function itself (e.g. pcolormesh or contourf). | def lighten(cmapin, alpha):
'''Lighten a colormap by adding alpha < 1.
:param cmap: A colormap object, like cmocean.cm.matter.
:param alpha: An alpha or transparency value to assign the colormap. Alpha
of 1 is opaque and of 1 is fully transparent.
Outputs resultant colormap object.
This will lighten the appearance of a plot you make using the output
colormap object. It is also possible to lighten many plots in the
plotting function itself (e.g. pcolormesh or contourf).
'''
# set the alpha value while retaining the number of rows in original cmap
return cmap(cmapin(np.linspace(0,1,cmapin.N), alpha)) |
Crop end or ends of a diverging colormap by vmin/vmax values.
:param cmap: A colormap object, like cmocean.cm.matter.
:param vmin/vmax: vmin/vmax for use in plot with colormap.
:param pivot: center point to be used in plot with diverging colormap.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
:param dmax=None: dmax is the highest number to be included in a plot with
the colormap; values higher in magnitude than dmax are removed from both
ends of colormap. It should be less than abs(vmin) and abs(vmax), which
should be equal for this parameter to be used.
Outputs resultant colormap object.
This function can be used for sequential and other non-diverging colormaps
but it is easier to use that way through crop_by_percent().
This should be useful for plotting bathymetry and topography data with the
topo colormap when max bathymetry value is different from max topography.
Example usage:
# example for crop on min end of diverging colormap
vmin = -2; vmax = 5; pivot = 0
newcmap = crop(cmocean.cm.curl, vmin, vmax, pivot)
A = np.random.randint(vmin, vmax, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example for crop on max end of diverging colormap
vmin = -10; vmax = 8; pivot = 0
newcmap = crop(cmocean.cm.delta, vmin, vmax, pivot)
A = np.random.randint(vmin, vmax, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar() | def crop(cmapin, vmin, vmax, pivot, N=None, dmax=None):
'''Crop end or ends of a diverging colormap by vmin/vmax values.
:param cmap: A colormap object, like cmocean.cm.matter.
:param vmin/vmax: vmin/vmax for use in plot with colormap.
:param pivot: center point to be used in plot with diverging colormap.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
:param dmax=None: dmax is the highest number to be included in a plot with
the colormap; values higher in magnitude than dmax are removed from both
ends of colormap. It should be less than abs(vmin) and abs(vmax), which
should be equal for this parameter to be used.
Outputs resultant colormap object.
This function can be used for sequential and other non-diverging colormaps
but it is easier to use that way through crop_by_percent().
This should be useful for plotting bathymetry and topography data with the
topo colormap when max bathymetry value is different from max topography.
Example usage:
# example for crop on min end of diverging colormap
vmin = -2; vmax = 5; pivot = 0
newcmap = crop(cmocean.cm.curl, vmin, vmax, pivot)
A = np.random.randint(vmin, vmax, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example for crop on max end of diverging colormap
vmin = -10; vmax = 8; pivot = 0
newcmap = crop(cmocean.cm.delta, vmin, vmax, pivot)
A = np.random.randint(vmin, vmax, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
'''
assert pivot >= vmin and pivot <= vmax
# dmax used if and only if ends are equal
if vmax-pivot == pivot-vmin:
assert dmax is not None
# allow user to input N, but otherwise use N for incoming colormap
if N is None:
N = cmapin.N
else:
N = N
# ratio of the colormap to remove
below = pivot - vmin # below pivot
above = vmax - pivot # above pivot
ranges = (above, below)
half_range = max(ranges)
full_range = half_range*2
reduced_range = min(ranges)
range_to_keep = half_range + reduced_range
ratio = (full_range-range_to_keep)/full_range
if below < above: # reducing colormap on side below pivot
# start colormap partway through
shortcmap = cmapin(np.linspace(0,1,N))[int(np.ceil(N*ratio)):]
elif above < below: # reducing colormap on side above pivot
# end colormap early
shortcmap = cmapin(np.linspace(0,1,N))[:-int(np.ceil(N*ratio))]
elif (below == above) and (dmax is not None): # equal
ratio = dmax/full_range
shortcmap = cmapin(np.linspace(0,1,N))[int(np.ceil(N*ratio)):-int(np.ceil(N*ratio))]
# interpolate to original number of rows in colormap
newrgb = np.zeros((N, 4))
shnum = shortcmap.shape[0]
for i in range(4): # loop through each column of cmap
newrgb[:,i] = np.interp(np.linspace(0,shnum,N), np.arange(0,shnum), shortcmap[:,i])
newcmap = cmap(newrgb)
return newcmap |
Crop end or ends of a colormap by per percent.
:param cmap: A colormap object, like cmocean.cm.matter.
:param per: Percent of colormap to remove. If which=='both', take this
percent off both ends of colormap. If which=='min' or which=='max',
take percent only off the specified end of colormap.
:param which='both': which end or ends of colormap to cut off. which='both'
removes from both ends, which='min' from bottom end, and which='max'
from top end.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
Outputs resultant colormap object.
This is a wrapper around crop() to make it easier to use for cropping
based on percent.
Examples:
# example with oxy map: cut off yellow part which is top 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 0; vmax = 8; pivot = 5
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='max', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example with oxy map: cut off red part which is bottom 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 2; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='min', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# crop both dark ends off colormap to reduce range
newcmap = crop_by_percent(cmocean.cm.balance, 10, which='both', N=None)
plt.figure()
A = np.random.randint(-5, 5, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar() | def crop_by_percent(cmap, per, which='both', N=None):
'''Crop end or ends of a colormap by per percent.
:param cmap: A colormap object, like cmocean.cm.matter.
:param per: Percent of colormap to remove. If which=='both', take this
percent off both ends of colormap. If which=='min' or which=='max',
take percent only off the specified end of colormap.
:param which='both': which end or ends of colormap to cut off. which='both'
removes from both ends, which='min' from bottom end, and which='max'
from top end.
:param N=None: User can specify the number of rows for the outgoing colormap.
If unspecified, N from incoming colormap will be used and values will
be interpolated as needed to fill in rows.
Outputs resultant colormap object.
This is a wrapper around crop() to make it easier to use for cropping
based on percent.
Examples:
# example with oxy map: cut off yellow part which is top 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 0; vmax = 8; pivot = 5
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='max', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# example with oxy map: cut off red part which is bottom 20%
# compare with full colormap
vmin = 0; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
fig, axes = plt.subplots(1, 2)
mappable = axes[0].pcolormesh(A, vmin=vmin, vmax=vmax, cmap=cmocean.cm.oxy)
fig.colorbar(mappable, ax=axes[0])
vmin = 2; vmax = 10; pivot = 5
A = np.random.randint(vmin, vmax, (5,5))
newcmap = crop_by_percent(cmocean.cm.oxy, 20, which='min', N=None)
plt.figure()
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
# crop both dark ends off colormap to reduce range
newcmap = crop_by_percent(cmocean.cm.balance, 10, which='both', N=None)
plt.figure()
A = np.random.randint(-5, 5, (5,5))
plt.pcolormesh(A, vmin=vmin, vmax=vmax, cmap=newcmap)
plt.colorbar()
'''
if which == 'both': # take percent off both ends of cmap
vmin = -100; vmax = 100; pivot = 0
dmax = per
elif which == 'min': # take percent off bottom of cmap
vmax = 10; pivot = 5
vmin = (0 + per/100)*2*pivot
dmax = None
elif which == 'max': # take percent off top of cmap
vmin = 0; pivot = 5
vmax = (1 - per/100)*2*pivot
dmax = None
newcmap = crop(cmap, vmin, vmax, pivot, dmax=dmax, N=N)
return newcmap |
Can we import colormaps and make basic plot.
| def test_cmap_import():
'''Can we import colormaps and make basic plot.
'''
from cmocean import cm
# Loop through all methods in cmocean.
for name, cmap in vars(cm).items():
# See if it is a colormap.
if isinstance(cmap, matplotlib.colors.LinearSegmentedColormap):
print(name)
x = np.linspace(0, 10)
X, _ = np.meshgrid(x, x)
plt.figure()
plt.pcolor(X, cmap=cmap)
plt.title(name)
plt.close(plt.gcf()) |
Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl. | def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"':'"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split() |
Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them. | def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
return ([len(ref) for ref in refs], maxcounts) |
Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it. | def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts)=item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens))/len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen-len(test)) < min_diff:
min_diff = abs(reflen-len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test)-k+1,0) for k in range(1,n+1)]
result['correct'] = [0]*n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result |
Read examples from filename. | def read_examples(filename):
"""Read examples from filename."""
examples=[]
with open(filename,encoding="utf-8") as f:
for idx, line in enumerate(f):
line=line.strip()
js=json.loads(line)
if 'idx' not in js:
js['idx']=idx
code=' '.join(js['code_tokens']).replace('\n',' ')
code=' '.join(code.strip().split())
nl=' '.join(js['docstring_tokens']).replace('\n','')
nl=' '.join(nl.strip().split())
examples.append(
Example(
idx = idx,
source=code,
target = nl,
)
)
return examples |
set random seed. | def set_seed(args):
"""set random seed."""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed) |
Train the model | def train(args, train_dataset, model, tokenizer, optimizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
scheduler = get_linear_schedule_with_warmup(optimizer, args.warmup_steps, t_total)
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last))
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = args.start_step
tr_loss, logging_loss = 0.0, 0.0
best_acc = 0.0
model.zero_grad()
train_iterator = trange(args.start_epoch, int(args.num_train_epochs), desc="Epoch",
disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
model.train()
for idx, _ in enumerate(train_iterator):
tr_loss = 0.0
for step, batch in enumerate(train_dataloader):
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
ouputs = model(**inputs)
loss = ouputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, checkpoint=str(global_step))
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
logger.info('loss %s', str(tr_loss - logging_loss))
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.max_steps > 0 and global_step > args.max_steps:
# epoch_iterator.close()
break
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
results = evaluate(args, model, tokenizer, checkpoint=str(args.start_epoch + idx))
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(last_output_dir)
logger.info("Saving model checkpoint to %s", last_output_dir)
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
with open(idx_file, 'w', encoding='utf-8') as idxf:
idxf.write(str(args.start_epoch + idx) + '\n')
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
step_file = os.path.join(last_output_dir, 'step_file.txt')
with open(step_file, 'w', encoding='utf-8') as stepf:
stepf.write(str(global_step) + '\n')
if (results['acc'] > best_acc):
best_acc = results['acc']
output_dir = os.path.join(args.output_dir, 'checkpoint-best')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,
'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_{}.bin'.format(idx)))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step |
Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) | def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)[:50]
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features |
Truncates a sequence pair in place to the maximum length. | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() |
convert examples to token ids | def convert_examples_to_features(js,tokenizer,args,lang):
"""convert examples to token ids"""
code = " ".join(remove_comments_and_docstrings(js['original_code'],lang).split())
code_tokens = tokenizer.tokenize(code)[:args.code_length-4]
code_tokens =[tokenizer.cls_token,"<encoder-only>",tokenizer.sep_token]+code_tokens+[tokenizer.sep_token]
code_ids = tokenizer.convert_tokens_to_ids(code_tokens)
padding_length = args.code_length - len(code_ids)
code_ids += [tokenizer.pad_token_id]*padding_length
return InputFeatures(code_tokens,code_ids,js["code_id"],int(js['problem_id'])) |
Truncates a sequence pair in place to the maximum length. | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() |
Truncates a sequence pair in place to the maximum length. | def _truncate_seq_pair_two_length(tokens_a, tokens_b, max_length_a, max_length_b):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length_a + max_length_b:
break
if len(tokens_b) > max_length_b:
tokens_b.pop()
else: # len(tokens_a) > max_length_a
tokens_a.pop() |
Truncates a sequence pair in place to the maximum length. | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() |
Truncates a sequence pair in place to the maximum length. | def _truncate_seq_pair_two_length(tokens_a, tokens_b, max_length_a, max_length_b):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length_a + max_length_b:
break
if len(tokens_b) > max_length_b:
tokens_b.pop()
else: # len(tokens_a) > max_length_a
tokens_a.pop() |
Train the model | def train(args, train_datasets, eval_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_samplers = [RandomSampler(train_dataset) for train_dataset in train_datasets]
train_dataloaders = [cycle(DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size,drop_last = True,num_workers = 0)) for train_dataset,train_sampler in zip(train_datasets,train_samplers)]
model.to(args.device)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr = args.learning_rate, eps = args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = args.warmup_steps,
num_training_steps = args.max_steps)
# use reintialized scheduler and opitmizer actually
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
if os.path.exists(scheduler_last):
scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
if os.path.exists(optimizer_last):
optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
if args.local_rank == 0:
torch.distributed.barrier()
# using fp16 to accelerate training
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.local_rank%args.gpu_per_node],
output_device = args.local_rank%args.gpu_per_node,
find_unused_parameters = True)
# Train!
logger.warning("***** Running training *****")
logger.warning(" Num examples = %d",sum([len(train_dataset) for train_dataset in train_datasets]) * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.warning(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.warning(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.warning(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.warning(" Total optimization steps = %d", args.max_steps)
global_step = args.start_step
losses, contras_losses, align_losses, dual_losses, step = [], [], [], [], 0
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
probs = [0.34,0.33,0.33]
while True:
train_dataloader = np.random.choice(train_dataloaders, 1, p=probs)[0]
batch = next(train_dataloader)
model.train()
step+=1
# forward
dual_gen_ids, dual_gen_type_ids =[x.to(args.device) for x in batch]
loss, dual_loss, align_loss, contras_loss = model(dual_gen_ids, dual_gen_type_ids)
# store loss
losses.append(loss.item())
if contras_loss != 0:
contras_losses.append(contras_loss)
if align_loss != 0:
align_losses.append(align_loss)
if dual_loss != 0:
dual_losses.append(dual_loss)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
# backward
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# update model
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if global_step %100 == 0:
logger.warning("steps: %s dual: %s", global_step,
round(np.mean(dual_losses),3),
)
losses, contras_losses, align_losses, dual_losses = [], [], [], []
# evaluate model and save model
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = 'checkpoint'
results = evaluate(args, model, tokenizer,eval_dataset)
for key, value in results.items():
logger.warning(" %s = %s", key, round(value,6))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix,
global_step,
round(results['loss'], 6)))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module.encoder if hasattr(model,'module') else model.encoder
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.warning("Saving model checkpoint to %s", output_dir)
last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
if not os.path.exists(last_output_dir):
os.makedirs(last_output_dir)
model_to_save.save_pretrained(last_output_dir)
tokenizer.save_pretrained(last_output_dir)
idx_file = os.path.join(last_output_dir, 'idx_file.txt')
with open(idx_file, 'w', encoding='utf-8') as idxf:
idxf.write(str(0) + '\n')
torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
logger.warning("Saving optimizer and scheduler states to %s", last_output_dir)
step_file = os.path.join(last_output_dir, 'step_file.txt')
with open(step_file, 'w', encoding='utf-8') as stepf:
stepf.write(str(global_step) + '\n')
if args.max_steps > 0 and global_step > args.max_steps:
break |
Evaluate the model | def evaluate(args, model, tokenizer, eval_dataset,prefix=""):
""" Evaluate the model """
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataloader = DataLoader(eval_dataset,
sampler = SequentialSampler(eval_dataset),
batch_size = args.eval_batch_size,
num_workers = 4,
drop_last = True)
# Eval!
logger.warning("***** Running evaluation *****")
logger.warning(" Num examples = %d", len(eval_dataset))
logger.warning(" Batch size = %d", args.eval_batch_size)
model.eval()
losses, contras_losses, align_losses, dual_losses = [], [], [], []
for batch in eval_dataloader:
dual_gen_ids, dual_gen_type_ids =[x.to(args.device) for x in batch]
with torch.no_grad():
loss, dual_loss, align_loss, contras_loss = model(dual_gen_ids, dual_gen_type_ids)
losses.append(loss.item())
if contras_loss != 0:
contras_losses.append(contras_loss)
if align_loss != 0:
align_losses.append(align_loss)
if dual_loss != 0:
dual_losses.append(dual_loss)
result = {
"loss": round(np.mean(losses),4),
"dual": round(np.mean(dual_losses),4),
}
return result |
set random seed. | def set_seed(args):
"""set random seed."""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed) |
Read examples from filename. | def read_review_examples(filename, data_num=-1, tokenizer=None):
"""Read examples from filename."""
examples = []
idx = 0
with open(filename) as f:
for line in f:
try:
js = json.loads(line.strip())
except:
print("Error during reading json data.")
continue
maxl = 200
if "y" not in js:
js["y"] = 0
if "msg" in js and len(js["msg"]) > 0:
js["y"] = 1
example = ReviewExample(
idx=idx,
oldf=js["oldf"],
diff=js["patch"],
msg=js["msg"] if "msg" in js else "",
cmtid=js["cmtid"] if "cmtid" in js else "",
max_len=maxl,
y=js["y"]
)
if example.avail:
examples.append(example)
idx += 1
if idx == data_num:
break
else:
# print(f"Passing {idx} because of invalid diff.")
idx += 1
if idx == data_num:
break
return examples |
Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred. | def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts |
Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty. | def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus,
translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length) |
Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl. | def normalize(s):
'''Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'''
# Added to bypass NIST-style pre-processing of hyp and ref files -- wade
if (nonorm):
return s.split()
if type(s) is not str:
s = " ".join(s)
# language-independent part:
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"': '"'})
# language-dependent part (assuming Western languages):
s = " %s " % s
if not preserve_case:
s = s.lower() # this might not be identical to the original
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split() |
Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them. | def cook_refs(refs, n=4):
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram, count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
return ([len(ref) for ref in refs], maxcounts) |
Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it. | def cook_test(test, item, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflens, refmaxcounts) = item
test = normalize(test)
result = {}
result["testlen"] = len(test)
# Calculate effective reference sentence length.
if eff_ref_len == "shortest":
result["reflen"] = min(reflens)
elif eff_ref_len == "average":
result["reflen"] = float(sum(reflens)) / len(reflens)
elif eff_ref_len == "closest":
min_diff = None
for reflen in reflens:
if min_diff is None or abs(reflen - len(test)) < min_diff:
min_diff = abs(reflen - len(test))
result['reflen'] = reflen
result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)]
result['correct'] = [0] * n
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
return result |
Calculate BLEU score (Bilingual Evaluation Understudy) from
Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
"BLEU: a method for automatic evaluation of machine translation."
In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
0.5045...
If there is no ngrams overlap for any order of n-grams, BLEU returns the
value 0. This is because the precision for the order of n-grams without
overlap is 0, and the geometric mean in the final BLEU score computation
multiplies the 0 with the precision of other n-grams. This results in 0
(independently of the precision of the othe n-gram orders). The following
example has zero 3-gram and 4-gram overlaps:
>>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
0.0
To avoid this harsh behaviour when no ngram overlaps are found a smoothing
function can be used.
>>> chencherry = SmoothingFunction()
>>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
0.0370...
The default BLEU calculates a score for up to 4-grams using uniform
weights (this is called BLEU-4). To evaluate your translations with
higher/lower order ngrams, use customized weights. E.g. when accounting
for up to 5-grams with uniform weights (this is called BLEU-5) use:
>>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
0.3920...
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The sentence-level BLEU score.
:rtype: float | def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate BLEU score (Bilingual Evaluation Understudy) from
Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
"BLEU: a method for automatic evaluation of machine translation."
In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
0.5045...
If there is no ngrams overlap for any order of n-grams, BLEU returns the
value 0. This is because the precision for the order of n-grams without
overlap is 0, and the geometric mean in the final BLEU score computation
multiplies the 0 with the precision of other n-grams. This results in 0
(independently of the precision of the othe n-gram orders). The following
example has zero 3-gram and 4-gram overlaps:
>>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
0.0
To avoid this harsh behaviour when no ngram overlaps are found a smoothing
function can be used.
>>> chencherry = SmoothingFunction()
>>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
0.0370...
The default BLEU calculates a score for up to 4-grams using uniform
weights (this is called BLEU-4). To evaluate your translations with
higher/lower order ngrams, use customized weights. E.g. when accounting
for up to 5-grams with uniform weights (this is called BLEU-5) use:
>>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
0.3920...
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The sentence-level BLEU score.
:rtype: float
"""
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh
) |
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float | def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various precision values for the different ngram orders.
p_n = [
Fraction(p_numerators[i], p_denominators[i], _normalize=False)
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method1
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
s = (w_i * math.log(p_i) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s |
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction | def modified_precision(references, hypothesis, n):
"""
Calculate modified ngram precision.
The normal precision method may lead to some wrong translations with
high-precision, e.g., the translation, in which a word of reference
repeats several times, has very high precision.
This function only returns the Fraction object that contains the numerator
and denominator necessary to calculate the corpus-level precision.
To calculate the modified precision for a single pair of hypothesis and
references, cast the Fraction object into a float.
The famous "the the the ... " example shows that you can get BLEU precision
by duplicating high frequency words.
>>> reference1 = 'the cat is on the mat'.split()
>>> reference2 = 'there is a cat on the mat'.split()
>>> hypothesis1 = 'the the the the the the the'.split()
>>> references = [reference1, reference2]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.2857...
In the modified n-gram precision, a reference word will be considered
exhausted after a matching hypothesis word is identified, e.g.
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hypothesis = 'of the'.split()
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis, n=1))
1.0
>>> float(modified_precision(references, hypothesis, n=2))
1.0
An example of a normal machine translation hypothesis:
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will',
... 'forever', 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> references = [reference1, reference2, reference3]
>>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
0.9444...
>>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
0.5714...
>>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
0.5882352941176471
>>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
0.07692...
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = {}
for reference in references:
reference_counts = (
Counter(ngrams(reference, n)) if len(reference) >= n else Counter()
)
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {
ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()
}
numerator = sum(clipped_counts.values())
# Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# Usually this happens when the ngram order is > len(reference).
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False) |
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int | def closest_ref_length(references, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len |
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float | def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len) |
Returns a padded sequence of items before ngram extraction.
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
['<s>', 1, 2, 3, 4, 5, '</s>']
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
['<s>', 1, 2, 3, 4, 5]
>>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[1, 2, 3, 4, 5, '</s>']
:param sequence: the source data to be padded
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter | def pad_sequence(
sequence,
n,
pad_left=False,
pad_right=False,
left_pad_symbol=None,
right_pad_symbol=None,
):
"""
Returns a padded sequence of items before ngram extraction.
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
['<s>', 1, 2, 3, 4, 5, '</s>']
>>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
['<s>', 1, 2, 3, 4, 5]
>>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[1, 2, 3, 4, 5, '</s>']
:param sequence: the source data to be padded
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = iter(sequence)
if pad_left:
sequence = chain((left_pad_symbol,) * (n - 1), sequence)
if pad_right:
sequence = chain(sequence, (right_pad_symbol,) * (n - 1))
return sequence |
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter | def ngrams(
sequence,
n,
pad_left=False,
pad_right=False,
left_pad_symbol=None,
right_pad_symbol=None,
):
"""
Return the ngrams generated from a sequence of items, as an iterator.
For example:
>>> from nltk.util import ngrams
>>> list(ngrams([1,2,3,4,5], 3))
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
Wrap with list for a list version of this function. Set pad_left
or pad_right to true in order to get additional ngrams:
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]
>>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))
[(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))
[('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]
:param sequence: the source data to be converted into ngrams
:type sequence: sequence or iter
:param n: the degree of the ngrams
:type n: int
:param pad_left: whether the ngrams should be left-padded
:type pad_left: bool
:param pad_right: whether the ngrams should be right-padded
:type pad_right: bool
:param left_pad_symbol: the symbol to use for left padding (default is None)
:type left_pad_symbol: any
:param right_pad_symbol: the symbol to use for right padding (default is None)
:type right_pad_symbol: any
:rtype: sequence or iter
"""
sequence = pad_sequence(
sequence, n, pad_left, pad_right, left_pad_symbol, right_pad_symbol
)
history = []
while n > 1:
# PEP 479, prevent RuntimeError from being raised when StopIteration bubbles out of generator
try:
next_item = next(sequence)
except StopIteration:
# no more data, terminate the generator
return
history.append(next_item)
n -= 1
for item in sequence:
history.append(item)
yield tuple(history)
del history[0] |
Calculate BLEU score (Bilingual Evaluation Understudy) from
Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
"BLEU: a method for automatic evaluation of machine translation."
In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
0.5045...
If there is no ngrams overlap for any order of n-grams, BLEU returns the
value 0. This is because the precision for the order of n-grams without
overlap is 0, and the geometric mean in the final BLEU score computation
multiplies the 0 with the precision of other n-grams. This results in 0
(independently of the precision of the othe n-gram orders). The following
example has zero 3-gram and 4-gram overlaps:
>>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
0.0
To avoid this harsh behaviour when no ngram overlaps are found a smoothing
function can be used.
>>> chencherry = SmoothingFunction()
>>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
0.0370...
The default BLEU calculates a score for up to 4-grams using uniform
weights (this is called BLEU-4). To evaluate your translations with
higher/lower order ngrams, use customized weights. E.g. when accounting
for up to 5-grams with uniform weights (this is called BLEU-5) use:
>>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
0.3920...
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The sentence-level BLEU score.
:rtype: float | def sentence_bleu(
references,
hypothesis,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate BLEU score (Bilingual Evaluation Understudy) from
Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
"BLEU: a method for automatic evaluation of machine translation."
In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf
>>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
... 'forever', 'hearing', 'the', 'activity', 'guidebook',
... 'that', 'party', 'direct']
>>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the',
... 'Party']
>>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
0.5045...
If there is no ngrams overlap for any order of n-grams, BLEU returns the
value 0. This is because the precision for the order of n-grams without
overlap is 0, and the geometric mean in the final BLEU score computation
multiplies the 0 with the precision of other n-grams. This results in 0
(independently of the precision of the othe n-gram orders). The following
example has zero 3-gram and 4-gram overlaps:
>>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
0.0
To avoid this harsh behaviour when no ngram overlaps are found a smoothing
function can be used.
>>> chencherry = SmoothingFunction()
>>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
0.0370...
The default BLEU calculates a score for up to 4-grams using uniform
weights (this is called BLEU-4). To evaluate your translations with
higher/lower order ngrams, use customized weights. E.g. when accounting
for up to 5-grams with uniform weights (this is called BLEU-5) use:
>>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
>>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
0.3920...
:param references: reference sentences
:type references: list(list(str))
:param hypothesis: a hypothesis sentence
:type hypothesis: list(str)
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The sentence-level BLEU score.
:rtype: float
"""
return corpus_bleu(
[references], [hypothesis], weights, smoothing_function, auto_reweigh
) |
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float | def corpus_bleu(
list_of_references,
hypotheses,
weights=(0.25, 0.25, 0.25, 0.25),
smoothing_function=None,
auto_reweigh=False,
):
"""
Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
the hypotheses and their respective references.
Instead of averaging the sentence level BLEU scores (i.e. marco-average
precision), the original BLEU metric (Papineni et al. 2002) accounts for
the micro-average precision (i.e. summing the numerators and denominators
for each hypothesis-reference(s) pairs before the division).
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'military', 'always',
... 'obeys', 'the', 'commands', 'of', 'the', 'party']
>>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'military', 'will', 'forever',
... 'heed', 'Party', 'commands']
>>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'military', 'forces', 'always',
... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'army', 'always', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'party']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
0.5920...
The example below show that corpus_bleu() is different from averaging
sentence_bleu() for hypotheses
>>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
>>> score2 = sentence_bleu([ref2a], hyp2)
>>> (score1 + score2) / 2 # doctest: +ELLIPSIS
0.6223...
:param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
:type list_of_references: list(list(list(str)))
:param hypotheses: a list of hypothesis sentences
:type hypotheses: list(list(str))
:param weights: weights for unigrams, bigrams, trigrams and so on
:type weights: list(float)
:param smoothing_function:
:type smoothing_function: SmoothingFunction
:param auto_reweigh: Option to re-normalize the weights uniformly.
:type auto_reweigh: bool
:return: The corpus-level BLEU score.
:rtype: float
"""
# Before proceeding to compute BLEU, perform sanity checks.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
assert len(list_of_references) == len(hypotheses), (
"The number of hypotheses and their reference(s) should be the " "same "
)
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# For each order of ngram, calculate the numerator and
# denominator for the corpus-level modified precision.
for i, _ in enumerate(weights, start=1):
p_i_numeraotr, p_i_denominator = modified_recall(references, hypothesis, i)
p_numerators[i] += p_i_numeraotr
p_denominators[i] += p_i_denominator
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
# Calculate corpus-level brevity penalty.
bp = brevity_penalty(ref_lengths, hyp_lengths)
# Uniformly re-weighting based on maximum hypothesis lengths if largest
# order of n-grams < 4 and weights is set at default.
if auto_reweigh:
if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):
weights = (1 / hyp_lengths,) * hyp_lengths
# Collects the various recall values for the different ngram orders.
p_n = [
(p_numerators[i], p_denominators[i])
for i, _ in enumerate(weights, start=1)
]
# Returns 0 if there's no matching n-grams
# We only need to check for p_numerators[1] == 0, since if there's
# no unigrams, there won't be any higher order ngrams.
if p_numerators[1] == 0:
return 0
# If there's no smoothing, set use method0 from SmoothinFunction class.
if not smoothing_function:
smoothing_function = SmoothingFunction().method1
# Smoothen the modified precision.
# Note: smoothing_function() may convert values into floats;
# it tries to retain the Fraction object as much as the
# smoothing method allows.
p_n = smoothing_function(
p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths
)
# pdb.set_trace()
s = (w_i * math.log(p_i[0]/p_i[1]) for w_i, p_i in zip(weights, p_n))
s = bp * math.exp(math.fsum(s))
return s |
Calculate modified ngram recall.
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction | def modified_recall(references, hypothesis, n):
"""
Calculate modified ngram recall.
:param references: A list of reference translations.
:type references: list(list(str))
:param hypothesis: A hypothesis translation.
:type hypothesis: list(str)
:param n: The ngram order.
:type n: int
:return: BLEU's modified precision for the nth order ngram.
:rtype: Fraction
"""
# Extracts all ngrams in hypothesis
# Set an empty Counter if hypothesis is empty.
# pdb.set_trace()
numerator = 0
denominator = 0
counts = Counter(ngrams(hypothesis, n)) if len(hypothesis) >= n else Counter()
# Extract a union of references' counts.
# max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
max_counts = {}
for reference_and_weights in references:
reference = reference_and_weights[0]
weights = reference_and_weights[1]
reference_counts = (
Counter(ngrams(reference, n)) if len(reference) >= n else Counter()
)
# for ngram in reference_counts:
# max_counts[ngram] = max(max_counts.get(ngram, 0), counts[ngram])
clipped_counts = {
ngram: min(count, counts[ngram]) for ngram, count in reference_counts.items()
}
# reweight
if n == 1 and len(weights) == len(reference_counts):
def weighted_sum(weights, counts):
sum_counts = 0
for ngram, count in counts.items():
sum_counts += count * (weights[ngram[0]] if ngram[0] in weights else 1)
return sum_counts
numerator += weighted_sum(weights, clipped_counts)
denominator += max(1, weighted_sum(weights, reference_counts))
else:
numerator += sum(clipped_counts.values())
denominator += max(1, sum(reference_counts.values()))
# # Assigns the intersection between hypothesis and references' counts.
# clipped_counts = {
# ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()
# }
# numerator += sum(clipped_counts.values())
# # Ensures that denominator is minimum 1 to avoid ZeroDivisionError.
# # Usually this happens when the ngram order is > len(reference).
# denominator += max(1, sum(counts.values()))
#return Fraction(numerator, denominator, _normalize=False)
return numerator, denominator |
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int | def closest_ref_length(references, hyp_len):
"""
This function finds the reference that is the closest length to the
hypothesis. The closest reference length is referred to as *r* variable
from the brevity penalty formula in Papineni et. al. (2002)
:param references: A list of reference translations.
:type references: list(list(str))
:param hyp_len: The length of the hypothesis.
:type hyp_len: int
:return: The length of the reference that's closest to the hypothesis.
:rtype: int
"""
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(
ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len)
)
return closest_ref_len |
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float | def brevity_penalty(closest_ref_len, hyp_len):
"""
Calculate brevity penalty.
As the modified n-gram precision still has the problem from the short
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
An example from the paper. There are three references with length 12, 15
and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.
>>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15
>>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
>>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12
>>> references = [reference1, reference2, reference3]
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
In case a hypothesis translation is shorter than the references, penalty is
applied.
>>> references = [['a'] * 28, ['a'] * 28]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
0.2635971381157267
The length of the closest reference is used to compute the penalty. If the
length of a hypothesis is 12, and the reference lengths are 13 and 2, the
penalty is applied because the hypothesis length (12) is less then the
closest reference length (13).
>>> references = [['a'] * 13, ['a'] * 2]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.9200...
The brevity penalty doesn't depend on reference order. More importantly,
when two reference sentences are at the same distance, the shortest
reference sentence length is used.
>>> references = [['a'] * 13, ['a'] * 11]
>>> hypothesis = ['a'] * 12
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)
>>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
>>> bp1 == bp2 == 1
True
A test example from mteval-v13a.pl (starting from the line 705):
>>> references = [['a'] * 11, ['a'] * 8]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
0.8668...
>>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
>>> hypothesis = ['a'] * 7
>>> hyp_len = len(hypothesis)
>>> closest_ref_len = closest_ref_length(references, hyp_len)
>>> brevity_penalty(closest_ref_len, hyp_len)
1.0
:param hyp_len: The length of the hypothesis for a single sentence OR the
sum of all the hypotheses' lengths for a corpus
:type hyp_len: int
:param closest_ref_len: The length of the closest reference for a single
hypothesis OR the sum of all the closest references for every hypotheses.
:type closest_ref_len: int
:return: BLEU's brevity penalty.
:rtype: float
"""
if hyp_len > closest_ref_len:
return 1
# If hypothesis is empty, brevity penalty = 0 should result in BLEU = 0.0
elif hyp_len == 0:
return 0
else:
return math.exp(1 - closest_ref_len / hyp_len) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.