repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
LowFat | LowFat-master/llvm-4.0.0.src/utils/prepare-code-coverage-artifact.py | #!/usr/bin/env python
from __future__ import print_function
'''Prepare a code coverage artifact.
- Collate raw profiles into one indexed profile.
- Generate html reports for the given binaries.
'''
import argparse
import glob
import os
import subprocess
import sys
def merge_raw_profiles(host_llvm_profdata, profile_data_dir, preserve_profiles):
print(':: Merging raw profiles...', end='')
sys.stdout.flush()
raw_profiles = glob.glob(os.path.join(profile_data_dir, '*.profraw'))
manifest_path = os.path.join(profile_data_dir, 'profiles.manifest')
profdata_path = os.path.join(profile_data_dir, 'Coverage.profdata')
with open(manifest_path, 'w') as manifest:
manifest.write('\n'.join(raw_profiles))
subprocess.check_call([host_llvm_profdata, 'merge', '-sparse', '-f',
manifest_path, '-o', profdata_path])
if not preserve_profiles:
for raw_profile in raw_profiles:
os.remove(raw_profile)
os.remove(manifest_path)
print('Done!')
return profdata_path
def prepare_html_report(host_llvm_cov, profile, report_dir, binaries,
restricted_dirs):
print(':: Preparing html report for {0}...'.format(binaries), end='')
sys.stdout.flush()
objects = []
for i, binary in enumerate(binaries):
if i == 0:
objects.append(binary)
else:
objects.extend(('-object', binary))
invocation = [host_llvm_cov, 'show'] + objects + ['-format', 'html',
'-instr-profile', profile, '-o', report_dir,
'-show-line-counts-or-regions', '-Xdemangler', 'c++filt',
'-Xdemangler', '-n'] + restricted_dirs
subprocess.check_call(invocation)
with open(os.path.join(report_dir, 'summary.txt'), 'wb') as Summary:
subprocess.check_call([host_llvm_cov, 'report'] + objects +
['-instr-profile', profile], stdout=Summary)
print('Done!')
def prepare_html_reports(host_llvm_cov, profdata_path, report_dir, binaries,
unified_report, restricted_dirs):
if unified_report:
prepare_html_report(host_llvm_cov, profdata_path, report_dir, binaries,
restricted_dirs)
else:
for binary in binaries:
binary_report_dir = os.path.join(report_dir,
os.path.basename(binary))
prepare_html_report(host_llvm_cov, profdata_path, binary_report_dir,
[binary], restricted_dirs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('host_llvm_profdata', help='Path to llvm-profdata')
parser.add_argument('host_llvm_cov', help='Path to llvm-cov')
parser.add_argument('profile_data_dir',
help='Path to the directory containing the raw profiles')
parser.add_argument('report_dir',
help='Path to the output directory for html reports')
parser.add_argument('binaries', metavar='B', type=str, nargs='*',
help='Path to an instrumented binary')
parser.add_argument('--only-merge', action='store_true',
help='Only merge raw profiles together, skip report '
'generation')
parser.add_argument('--preserve-profiles',
help='Do not delete raw profiles', action='store_true')
parser.add_argument('--use-existing-profdata',
help='Specify an existing indexed profile to use')
parser.add_argument('--unified-report', action='store_true',
help='Emit a unified report for all binaries')
parser.add_argument('--restrict', metavar='R', type=str, nargs='*',
default=[],
help='Restrict the reporting to the given source paths')
args = parser.parse_args()
if args.use_existing_profdata and args.only_merge:
print('--use-existing-profdata and --only-merge are incompatible')
exit(1)
if args.use_existing_profdata:
profdata_path = args.use_existing_profdata
else:
profdata_path = merge_raw_profiles(args.host_llvm_profdata,
args.profile_data_dir,
args.preserve_profiles)
if not len(args.binaries):
print('No binaries specified, no work to do!')
exit(1)
if not args.only_merge:
prepare_html_reports(args.host_llvm_cov, profdata_path, args.report_dir,
args.binaries, args.unified_report, args.restrict)
| 4,697 | 42.5 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/update_llc_test_checks.py | #!/usr/bin/env python2.7
"""A test case update script.
This script is a utility to update LLVM X86 'llc' based test cases with new
FileCheck patterns. It can either update all of the tests in the file or
a single test function.
"""
import argparse
import os # Used to advertise this file's name ("autogenerated_note").
import string
import subprocess
import sys
import re
# Invoke the tool that is being tested.
def llc(args, cmd_args, ir):
with open(ir) as ir_file:
stdout = subprocess.check_output(args.llc_binary + ' ' + cmd_args,
shell=True, stdin=ir_file)
# Fix line endings to unix CR style.
stdout = stdout.replace('\r\n', '\n')
return stdout
# RegEx: this is where the magic happens.
SCRUB_WHITESPACE_RE = re.compile(r'(?!^(| \w))[ \t]+', flags=re.M)
SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M)
SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n')
ASM_FUNCTION_X86_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?'
r'(?P<body>^##?[ \t]+[^:]+:.*?)\s*'
r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section)',
flags=(re.M | re.S))
SCRUB_X86_SHUFFLES_RE = (
re.compile(
r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$',
flags=re.M))
SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)')
SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)')
SCRUB_X86_LCP_RE = re.compile(r'\.LCPI[0-9]+_[0-9]+')
ASM_FUNCTION_ARM_RE = re.compile(
r'^(?P<func>[0-9a-zA-Z_]+):\n' # f: (name of function)
r'\s+\.fnstart\n' # .fnstart
r'(?P<body>.*?)\n' # (body of the function)
r'.Lfunc_end[0-9]+:\n', # .Lfunc_end0:
flags=(re.M | re.S))
RUN_LINE_RE = re.compile('^\s*;\s*RUN:\s*(.*)$')
TRIPLE_ARG_RE = re.compile(r'-mtriple=([^ ]+)')
TRIPLE_IR_RE = re.compile(r'^target\s+triple\s*=\s*"([^"]+)"$')
IR_FUNCTION_RE = re.compile('^\s*define\s+(?:internal\s+)?[^@]*@(\w+)\s*\(')
CHECK_PREFIX_RE = re.compile('--check-prefix=(\S+)')
CHECK_RE = re.compile(r'^\s*;\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL)?:')
ASM_FUNCTION_PPC_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n'
r'\.Lfunc_begin[0-9]+:\n'
r'[ \t]+.cfi_startproc\n'
r'(?:\.Lfunc_[gl]ep[0-9]+:\n(?:[ \t]+.*?\n)*)*'
r'(?P<body>.*?)\n'
# This list is incomplete
r'(?:^[ \t]*(?:\.long[ \t]+[^\n]+|\.quad[ \t]+[^\n]+)\n)*'
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
def scrub_asm_x86(asm):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Detect shuffle asm comments and hide the operands in favor of the comments.
asm = SCRUB_X86_SHUFFLES_RE.sub(r'\1 {{.*#+}} \2', asm)
# Generically match the stack offset of a memory operand.
asm = SCRUB_X86_SP_RE.sub(r'{{[0-9]+}}(%\1)', asm)
# Generically match a RIP-relative memory operand.
asm = SCRUB_X86_RIP_RE.sub(r'{{.*}}(%rip)', asm)
# Generically match a LCP symbol.
asm = SCRUB_X86_LCP_RE.sub(r'{{\.LCPI.*}}', asm)
# Strip kill operands inserted into the asm.
asm = SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_arm_eabi(asm):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip kill operands inserted into the asm.
asm = SCRUB_KILL_COMMENT_RE.sub('', asm)
# Strip trailing whitespace.
asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
def scrub_asm_powerpc64le(asm):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
asm = SCRUB_WHITESPACE_RE.sub(r' ', asm)
# Expand the tabs used for indentation.
asm = string.expandtabs(asm, 2)
# Strip trailing whitespace.
asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
# Build up a dictionary of all the function bodies.
def build_function_body_dictionary(raw_tool_output, triple, prefixes, func_dict,
verbose):
target_handlers = {
'x86_64': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'i686': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'x86': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'arm-eabi': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),
'powerpc64le': (scrub_asm_powerpc64le, ASM_FUNCTION_PPC_RE),
}
handlers = None
for prefix, s in target_handlers.items():
if triple.startswith(prefix):
handlers = s
break
else:
raise KeyError('Triple %r is not supported' % (triple))
scrubber, function_re = handlers
for m in function_re.finditer(raw_tool_output):
if not m:
continue
func = m.group('func')
scrubbed_body = scrubber(m.group('body'))
if func.startswith('stress'):
# We only use the last line of the function body for stress tests.
scrubbed_body = '\n'.join(scrubbed_body.splitlines()[-1:])
if verbose:
print >>sys.stderr, 'Processing function: ' + func
for l in scrubbed_body.splitlines():
print >>sys.stderr, ' ' + l
for prefix in prefixes:
if func in func_dict[prefix] and func_dict[prefix][func] != scrubbed_body:
if prefix == prefixes[-1]:
print >>sys.stderr, ('WARNING: Found conflicting asm under the '
'same prefix: %r!' % (prefix,))
else:
func_dict[prefix][func] = None
continue
func_dict[prefix][func] = scrubbed_body
def add_checks(output_lines, run_list, func_dict, func_name):
printed_prefixes = []
for p in run_list:
checkprefixes = p[0]
for checkprefix in checkprefixes:
if checkprefix in printed_prefixes:
break
if not func_dict[checkprefix][func_name]:
continue
# Add some space between different check prefixes.
if len(printed_prefixes) != 0:
output_lines.append(';')
printed_prefixes.append(checkprefix)
output_lines.append('; %s-LABEL: %s:' % (checkprefix, func_name))
func_body = func_dict[checkprefix][func_name].splitlines()
output_lines.append('; %s: %s' % (checkprefix, func_body[0]))
for func_line in func_body[1:]:
output_lines.append('; %s-NEXT: %s' % (checkprefix, func_line))
# Add space between different check prefixes and the first line of code.
# output_lines.append(';')
break
return output_lines
def should_add_line_to_output(input_line, prefix_set):
# Skip any blank comment lines in the IR.
if input_line.strip() == ';':
return False
# Skip any blank lines in the IR.
#if input_line.strip() == '':
# return False
# And skip any CHECK lines. We're building our own.
m = CHECK_RE.match(input_line)
if m and m.group(1) in prefix_set:
return False
return True
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--llc-binary', default='llc',
help='The "llc" binary to use to generate the test case')
parser.add_argument(
'--function', help='The function in the test file to update')
parser.add_argument('tests', nargs='+')
args = parser.parse_args()
autogenerated_note = ('; NOTE: Assertions have been autogenerated by '
'utils/' + os.path.basename(__file__))
for test in args.tests:
if args.verbose:
print >>sys.stderr, 'Scanning for RUN lines in test file: %s' % (test,)
with open(test) as f:
input_lines = [l.rstrip() for l in f]
triple_in_ir = None
for l in input_lines:
m = TRIPLE_IR_RE.match(l)
if m:
triple_in_ir = m.groups()[0]
break
raw_lines = [m.group(1)
for m in [RUN_LINE_RE.match(l) for l in input_lines] if m]
run_lines = [raw_lines[0]] if len(raw_lines) > 0 else []
for l in raw_lines[1:]:
if run_lines[-1].endswith("\\"):
run_lines[-1] = run_lines[-1].rstrip("\\") + " " + l
else:
run_lines.append(l)
if args.verbose:
print >>sys.stderr, 'Found %d RUN lines:' % (len(run_lines),)
for l in run_lines:
print >>sys.stderr, ' RUN: ' + l
run_list = []
for l in run_lines:
commands = [cmd.strip() for cmd in l.split('|', 1)]
llc_cmd = commands[0]
triple_in_cmd = None
m = TRIPLE_ARG_RE.search(llc_cmd)
if m:
triple_in_cmd = m.groups()[0]
filecheck_cmd = ''
if len(commands) > 1:
filecheck_cmd = commands[1]
if not llc_cmd.startswith('llc '):
print >>sys.stderr, 'WARNING: Skipping non-llc RUN line: ' + l
continue
if not filecheck_cmd.startswith('FileCheck '):
print >>sys.stderr, 'WARNING: Skipping non-FileChecked RUN line: ' + l
continue
llc_cmd_args = llc_cmd[len('llc'):].strip()
llc_cmd_args = llc_cmd_args.replace('< %s', '').replace('%s', '').strip()
check_prefixes = [m.group(1)
for m in CHECK_PREFIX_RE.finditer(filecheck_cmd)]
if not check_prefixes:
check_prefixes = ['CHECK']
# FIXME: We should use multiple check prefixes to common check lines. For
# now, we just ignore all but the last.
run_list.append((check_prefixes, llc_cmd_args, triple_in_cmd))
func_dict = {}
for p in run_list:
prefixes = p[0]
for prefix in prefixes:
func_dict.update({prefix: dict()})
for prefixes, llc_args, triple_in_cmd in run_list:
if args.verbose:
print >>sys.stderr, 'Extracted LLC cmd: llc ' + llc_args
print >>sys.stderr, 'Extracted FileCheck prefixes: ' + str(prefixes)
raw_tool_output = llc(args, llc_args, test)
if not (triple_in_cmd or triple_in_ir):
print >>sys.stderr, "Cannot find a triple. Assume 'x86'"
build_function_body_dictionary(raw_tool_output,
triple_in_cmd or triple_in_ir or 'x86', prefixes, func_dict, args.verbose)
is_in_function = False
is_in_function_start = False
func_name = None
prefix_set = set([prefix for p in run_list for prefix in p[0]])
if args.verbose:
print >>sys.stderr, 'Rewriting FileCheck prefixes: %s' % (prefix_set,)
output_lines = []
output_lines.append(autogenerated_note)
for input_line in input_lines:
if is_in_function_start:
if input_line == '':
continue
if input_line.lstrip().startswith(';'):
m = CHECK_RE.match(input_line)
if not m or m.group(1) not in prefix_set:
output_lines.append(input_line)
continue
# Print out the various check lines here.
output_lines = add_checks(output_lines, run_list, func_dict, func_name)
is_in_function_start = False
if is_in_function:
if should_add_line_to_output(input_line, prefix_set) == True:
# This input line of the function body will go as-is into the output.
output_lines.append(input_line)
else:
continue
if input_line.strip() == '}':
is_in_function = False
continue
if input_line == autogenerated_note:
continue
# If it's outside a function, it just gets copied to the output.
output_lines.append(input_line)
m = IR_FUNCTION_RE.match(input_line)
if not m:
continue
func_name = m.group(1)
if args.function is not None and func_name != args.function:
# When filtering on a specific function, skip all others.
continue
is_in_function = is_in_function_start = True
if args.verbose:
print>>sys.stderr, 'Writing %d lines to %s...' % (len(output_lines), test)
with open(test, 'wb') as f:
f.writelines([l + '\n' for l in output_lines])
if __name__ == '__main__':
main()
| 12,178 | 34.199422 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/DSAclean.py | #! /usr/bin/python
#changelog:
#10/13/2005b: replaced the # in tmp(.#*)* with alphanumeric and _, this will then remove
#nodes such as %tmp.1.i and %tmp._i.3
#10/13/2005: exntended to remove variables of the form %tmp(.#)* rather than just
#%tmp.#, i.e. it now will remove %tmp.12.3.15 etc, additionally fixed a spelling error in
#the comments
#10/12/2005: now it only removes nodes and edges for which the label is %tmp.# rather
#than removing all lines for which the lable CONTAINS %tmp.#
import re
import sys
if( len(sys.argv) < 3 ):
print 'usage is: ./DSAclean <dot_file_to_be_cleaned> <out_put_file>'
sys.exit(1)
#get a file object
input = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
#we'll get this one line at a time...while we could just put the whole thing in a string
#it would kill old computers
buffer = input.readline()
while buffer != '':
if re.compile("label(\s*)=(\s*)\"\s%tmp(.\w*)*(\s*)\"").search(buffer):
#skip next line, write neither this line nor the next
buffer = input.readline()
else:
#this isn't a tmp Node, we can write it
output.write(buffer)
#prepare for the next iteration
buffer = input.readline()
input.close()
output.close()
| 1,187 | 35 | 89 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/create_ladder_graph.py | #!/usr/bin/env python
"""A ladder graph creation program.
This is a python program that creates c source code that will generate
CFGs that are ladder graphs. Ladder graphs are generally the worst case
for a lot of dominance related algorithms (Dominance frontiers, etc),
and often generate N^2 or worse behavior.
One good use of this program is to test whether your linear time algorithm is
really behaving linearly.
"""
import argparse
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('rungs', type=int,
help="Number of ladder rungs. Must be a multiple of 2")
args = parser.parse_args()
if (args.rungs % 2) != 0:
print "Rungs must be a multiple of 2"
return
print "int ladder(int *foo, int *bar, int x) {"
rung1 = xrange(0, args.rungs, 2)
rung2 = xrange(1, args.rungs, 2)
for i in rung1:
print "rung1%d:" % i
print "*foo = x++;"
if i != rung1[-1]:
print "if (*bar) goto rung1%d;" % (i+2)
print "else goto rung2%d;" % (i+1)
else:
print "goto rung2%d;" % (i+1)
for i in rung2:
print "rung2%d:" % i
print "*foo = x++;"
if i != rung2[-1]:
print "goto rung2%d;" % (i+2)
else:
print "return *foo;"
print "}"
if __name__ == '__main__':
main()
| 1,293 | 28.409091 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/shuffle_fuzz.py | #!/usr/bin/env python
"""A shuffle vector fuzz tester.
This is a python program to fuzz test the LLVM shufflevector instruction. It
generates a function with a random sequnece of shufflevectors, maintaining the
element mapping accumulated across the function. It then generates a main
function which calls it with a different value in each element and checks that
the result matches the expected mapping.
Take the output IR printed to stdout, compile it to an executable using whatever
set of transforms you want to test, and run the program. If it crashes, it found
a bug.
"""
import argparse
import itertools
import random
import sys
import uuid
def main():
element_types=['i8', 'i16', 'i32', 'i64', 'f32', 'f64']
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true',
help='Show verbose output')
parser.add_argument('--seed', default=str(uuid.uuid4()),
help='A string used to seed the RNG')
parser.add_argument('--max-shuffle-height', type=int, default=16,
help='Specify a fixed height of shuffle tree to test')
parser.add_argument('--no-blends', dest='blends', action='store_false',
help='Include blends of two input vectors')
parser.add_argument('--fixed-bit-width', type=int, choices=[128, 256],
help='Specify a fixed bit width of vector to test')
parser.add_argument('--fixed-element-type', choices=element_types,
help='Specify a fixed element type to test')
parser.add_argument('--triple',
help='Specify a triple string to include in the IR')
args = parser.parse_args()
random.seed(args.seed)
if args.fixed_element_type is not None:
element_types=[args.fixed_element_type]
if args.fixed_bit_width is not None:
if args.fixed_bit_width == 128:
width_map={'i64': 2, 'i32': 4, 'i16': 8, 'i8': 16, 'f64': 2, 'f32': 4}
(width, element_type) = random.choice(
[(width_map[t], t) for t in element_types])
elif args.fixed_bit_width == 256:
width_map={'i64': 4, 'i32': 8, 'i16': 16, 'i8': 32, 'f64': 4, 'f32': 8}
(width, element_type) = random.choice(
[(width_map[t], t) for t in element_types])
else:
sys.exit(1) # Checked above by argument parsing.
else:
width = random.choice([2, 4, 8, 16, 32, 64])
element_type = random.choice(element_types)
element_modulus = {
'i8': 1 << 8, 'i16': 1 << 16, 'i32': 1 << 32, 'i64': 1 << 64,
'f32': 1 << 32, 'f64': 1 << 64}[element_type]
shuffle_range = (2 * width) if args.blends else width
# Because undef (-1) saturates and is indistinguishable when testing the
# correctness of a shuffle, we want to bias our fuzz toward having a decent
# mixture of non-undef lanes in the end. With a deep shuffle tree, the
# probabilies aren't good so we need to bias things. The math here is that if
# we uniformly select between -1 and the other inputs, each element of the
# result will have the following probability of being undef:
#
# 1 - (shuffle_range/(shuffle_range+1))^max_shuffle_height
#
# More generally, for any probability P of selecting a defined element in
# a single shuffle, the end result is:
#
# 1 - P^max_shuffle_height
#
# The power of the shuffle height is the real problem, as we want:
#
# 1 - shuffle_range/(shuffle_range+1)
#
# So we bias the selection of undef at any given node based on the tree
# height. Below, let 'A' be 'len(shuffle_range)', 'C' be 'max_shuffle_height',
# and 'B' be the bias we use to compensate for
# C '((A+1)*A^(1/C))/(A*(A+1)^(1/C))':
#
# 1 - (B * A)/(A + 1)^C = 1 - A/(A + 1)
#
# So at each node we use:
#
# 1 - (B * A)/(A + 1)
# = 1 - ((A + 1) * A * A^(1/C))/(A * (A + 1) * (A + 1)^(1/C))
# = 1 - ((A + 1) * A^((C + 1)/C))/(A * (A + 1)^((C + 1)/C))
#
# This is the formula we use to select undef lanes in the shuffle.
A = float(shuffle_range)
C = float(args.max_shuffle_height)
undef_prob = 1.0 - (((A + 1.0) * pow(A, (C + 1.0)/C)) /
(A * pow(A + 1.0, (C + 1.0)/C)))
shuffle_tree = [[[-1 if random.random() <= undef_prob
else random.choice(range(shuffle_range))
for _ in itertools.repeat(None, width)]
for _ in itertools.repeat(None, args.max_shuffle_height - i)]
for i in xrange(args.max_shuffle_height)]
if args.verbose:
# Print out the shuffle sequence in a compact form.
print >>sys.stderr, ('Testing shuffle sequence "%s" (v%d%s):' %
(args.seed, width, element_type))
for i, shuffles in enumerate(shuffle_tree):
print >>sys.stderr, ' tree level %d:' % (i,)
for j, s in enumerate(shuffles):
print >>sys.stderr, ' shuffle %d: %s' % (j, s)
print >>sys.stderr, ''
# Symbolically evaluate the shuffle tree.
inputs = [[int(j % element_modulus)
for j in xrange(i * width + 1, (i + 1) * width + 1)]
for i in xrange(args.max_shuffle_height + 1)]
results = inputs
for shuffles in shuffle_tree:
results = [[((results[i] if j < width else results[i + 1])[j % width]
if j != -1 else -1)
for j in s]
for i, s in enumerate(shuffles)]
if len(results) != 1:
print >>sys.stderr, 'ERROR: Bad results: %s' % (results,)
sys.exit(1)
result = results[0]
if args.verbose:
print >>sys.stderr, 'Which transforms:'
print >>sys.stderr, ' from: %s' % (inputs,)
print >>sys.stderr, ' into: %s' % (result,)
print >>sys.stderr, ''
# The IR uses silly names for floating point types. We also need a same-size
# integer type.
integral_element_type = element_type
if element_type == 'f32':
integral_element_type = 'i32'
element_type = 'float'
elif element_type == 'f64':
integral_element_type = 'i64'
element_type = 'double'
# Now we need to generate IR for the shuffle function.
subst = {'N': width, 'T': element_type, 'IT': integral_element_type}
print """
define internal fastcc <%(N)d x %(T)s> @test(%(arguments)s) noinline nounwind {
entry:""" % dict(subst,
arguments=', '.join(
['<%(N)d x %(T)s> %%s.0.%(i)d' % dict(subst, i=i)
for i in xrange(args.max_shuffle_height + 1)]))
for i, shuffles in enumerate(shuffle_tree):
for j, s in enumerate(shuffles):
print """
%%s.%(next_i)d.%(j)d = shufflevector <%(N)d x %(T)s> %%s.%(i)d.%(j)d, <%(N)d x %(T)s> %%s.%(i)d.%(next_j)d, <%(N)d x i32> <%(S)s>
""".strip('\n') % dict(subst, i=i, next_i=i + 1, j=j, next_j=j + 1,
S=', '.join(['i32 ' + (str(si) if si != -1 else 'undef')
for si in s]))
print """
ret <%(N)d x %(T)s> %%s.%(i)d.0
}
""" % dict(subst, i=len(shuffle_tree))
# Generate some string constants that we can use to report errors.
for i, r in enumerate(result):
if r != -1:
s = ('FAIL(%(seed)s): lane %(lane)d, expected %(result)d, found %%d\n\\0A' %
{'seed': args.seed, 'lane': i, 'result': r})
s += ''.join(['\\00' for _ in itertools.repeat(None, 128 - len(s) + 2)])
print """
@error.%(i)d = private unnamed_addr global [128 x i8] c"%(s)s"
""".strip() % {'i': i, 's': s}
# Define a wrapper function which is marked 'optnone' to prevent
# interprocedural optimizations from deleting the test.
print """
define internal fastcc <%(N)d x %(T)s> @test_wrapper(%(arguments)s) optnone noinline {
%%result = call fastcc <%(N)d x %(T)s> @test(%(arguments)s)
ret <%(N)d x %(T)s> %%result
}
""" % dict(subst,
arguments=', '.join(['<%(N)d x %(T)s> %%s.%(i)d' % dict(subst, i=i)
for i in xrange(args.max_shuffle_height + 1)]))
# Finally, generate a main function which will trap if any lanes are mapped
# incorrectly (in an observable way).
print """
define i32 @main() {
entry:
; Create a scratch space to print error messages.
%%str = alloca [128 x i8]
%%str.ptr = getelementptr inbounds [128 x i8], [128 x i8]* %%str, i32 0, i32 0
; Build the input vector and call the test function.
%%v = call fastcc <%(N)d x %(T)s> @test_wrapper(%(inputs)s)
; We need to cast this back to an integer type vector to easily check the
; result.
%%v.cast = bitcast <%(N)d x %(T)s> %%v to <%(N)d x %(IT)s>
br label %%test.0
""" % dict(subst,
inputs=', '.join(
[('<%(N)d x %(T)s> bitcast '
'(<%(N)d x %(IT)s> <%(input)s> to <%(N)d x %(T)s>)' %
dict(subst, input=', '.join(['%(IT)s %(i)d' % dict(subst, i=i)
for i in input])))
for input in inputs]))
# Test that each non-undef result lane contains the expected value.
for i, r in enumerate(result):
if r == -1:
print """
test.%(i)d:
; Skip this lane, its value is undef.
br label %%test.%(next_i)d
""" % dict(subst, i=i, next_i=i + 1)
else:
print """
test.%(i)d:
%%v.%(i)d = extractelement <%(N)d x %(IT)s> %%v.cast, i32 %(i)d
%%cmp.%(i)d = icmp ne %(IT)s %%v.%(i)d, %(r)d
br i1 %%cmp.%(i)d, label %%die.%(i)d, label %%test.%(next_i)d
die.%(i)d:
; Capture the actual value and print an error message.
%%tmp.%(i)d = zext %(IT)s %%v.%(i)d to i2048
%%bad.%(i)d = trunc i2048 %%tmp.%(i)d to i32
call i32 (i8*, i8*, ...) @sprintf(i8* %%str.ptr, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @error.%(i)d, i32 0, i32 0), i32 %%bad.%(i)d)
%%length.%(i)d = call i32 @strlen(i8* %%str.ptr)
call i32 @write(i32 2, i8* %%str.ptr, i32 %%length.%(i)d)
call void @llvm.trap()
unreachable
""" % dict(subst, i=i, next_i=i + 1, r=r)
print """
test.%d:
ret i32 0
}
declare i32 @strlen(i8*)
declare i32 @write(i32, i8*, i32)
declare i32 @sprintf(i8*, i8*, ...)
declare void @llvm.trap() noreturn nounwind
""" % (len(result),)
if __name__ == '__main__':
main()
| 10,060 | 38.300781 | 148 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/DSAextract.py | #! /usr/bin/python
#this is a script to extract given named nodes from a dot file, with
#the associated edges. An edge is kept iff for edge x -> y
# x and y are both nodes specified to be kept.
#known issues: if a line contains '->' and is not an edge line
#problems will occur. If node labels do not begin with
#Node this also will not work. Since this is designed to work
#on DSA dot output and not general dot files this is ok.
#If you want to use this on other files rename the node labels
#to Node[.*] with a script or something. This also relies on
#the length of a node name being 13 characters (as it is in all
#DSA dot output files)
#Note that the name of the node can be any substring of the actual
#name in the dot file. Thus if you say specify COLLAPSED
#as a parameter this script will pull out all COLLAPSED
#nodes in the file
#Specifying escape characters in the name like \n also will not work,
#as Python
#will make it \\n, I'm not really sure how to fix this
#currently the script prints the names it is searching for
#to STDOUT, so you can check to see if they are what you intend
import re
import string
import sys
if len(sys.argv) < 3:
print 'usage is ./DSAextract <dot_file_to_modify> \
<output_file> [list of nodes to extract]'
#open the input file
input = open(sys.argv[1], 'r')
#construct a set of node names
node_name_set = set()
for name in sys.argv[3:]:
node_name_set |= set([name])
#construct a list of compiled regular expressions from the
#node_name_set
regexp_list = []
for name in node_name_set:
regexp_list.append(re.compile(name))
#used to see what kind of line we are on
nodeexp = re.compile('Node')
#used to check to see if the current line is an edge line
arrowexp = re.compile('->')
node_set = set()
#read the file one line at a time
buffer = input.readline()
while buffer != '':
#filter out the unnecessary checks on all the edge lines
if not arrowexp.search(buffer):
#check to see if this is a node we are looking for
for regexp in regexp_list:
#if this name is for the current node, add the dot variable name
#for the node (it will be Node(hex number)) to our set of nodes
if regexp.search(buffer):
node_set |= set([re.split('\s+',buffer,2)[1]])
break
buffer = input.readline()
#test code
#print '\n'
print node_name_set
#print node_set
#open the output file
output = open(sys.argv[2], 'w')
#start the second pass over the file
input = open(sys.argv[1], 'r')
buffer = input.readline()
while buffer != '':
#there are three types of lines we are looking for
#1) node lines, 2) edge lines 3) support lines (like page size, etc)
#is this an edge line?
#note that this is no completely robust, if a none edge line
#for some reason contains -> it will be missidentified
#hand edit the file if this happens
if arrowexp.search(buffer):
#check to make sure that both nodes are in the node list
#if they are print this to output
nodes = arrowexp.split(buffer)
nodes[0] = string.strip(nodes[0])
nodes[1] = string.strip(nodes[1])
if nodes[0][:13] in node_set and \
nodes[1][:13] in node_set:
output.write(buffer)
elif nodeexp.search(buffer): #this is a node line
node = re.split('\s+', buffer,2)[1]
if node in node_set:
output.write(buffer)
else: #this is a support line
output.write(buffer)
buffer = input.readline()
| 3,350 | 28.919643 | 70 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/wciia.py | #!/usr/bin/env python
"""
wciia - Whose Code Is It Anyway
Determines code owner of the file/folder relative to the llvm source root.
Code owner is determined from the content of the CODE_OWNERS.TXT
by parsing the D: field
usage:
utils/wciia.py path
limitations:
- must be run from llvm source root
- very simplistic algorithm
- only handles * as a wildcard
- not very user friendly
- does not handle the proposed F: field
"""
import os
code_owners = {}
def process_files_and_folders(owner):
filesfolders = owner['filesfolders']
# paths must be in ( ... ) so strip them
lpar = filesfolders.find('(')
rpar = filesfolders.rfind(')')
if rpar <= lpar:
# give up
return
paths = filesfolders[lpar+1:rpar]
# split paths
owner['paths'] = []
for path in paths.split():
owner['paths'].append(path)
def process_code_owner(owner):
if 'filesfolders' in owner:
filesfolders = owner['filesfolders']
else:
# print "F: field missing, using D: field"
owner['filesfolders'] = owner['description']
process_files_and_folders(owner)
code_owners[owner['name']] = owner
# process CODE_OWNERS.TXT first
code_owners_file = open("CODE_OWNERS.TXT", "r").readlines()
code_owner = {}
for line in code_owners_file:
for word in line.split():
if word == "N:":
name = line[2:].strip()
if code_owner:
process_code_owner(code_owner)
code_owner = {}
# reset the values
code_owner['name'] = name
if word == "E:":
email = line[2:].strip()
code_owner['email'] = email
if word == "D:":
description = line[2:].strip()
code_owner['description'] = description
if word == "F:":
filesfolders = line[2:].strip()
code_owner['filesfolders'].append(filesfolders)
def find_owners(fpath):
onames = []
lmatch = -1
# very simplistic way of findning the best match
for name in code_owners:
owner = code_owners[name]
if 'paths' in owner:
for path in owner['paths']:
# print "searching (" + path + ")"
# try exact match
if fpath == path:
return name
# see if path ends with a *
rstar = path.rfind('*')
if rstar>0:
# try the longest match,
rpos = -1
if len(fpath) < len(path):
rpos = path.find(fpath)
if rpos == 0:
onames.append(name)
onames.append('Chris Lattner')
return onames
# now lest try to find the owner of the file or folder
import sys
if len(sys.argv) < 2:
print "usage " + sys.argv[0] + " file_or_folder"
exit(-1)
# the path we are checking
path = str(sys.argv[1])
# check if this is real path
if not os.path.exists(path):
print "path (" + path + ") does not exist"
exit(-1)
owners_name = find_owners(path)
# be grammatically correct
print "The owner(s) of the (" + path + ") is(are) : " + str(owners_name)
exit(0)
# bottom up walk of the current .
# not yet used
root = "."
for dir,subdirList,fileList in os.walk( root , topdown=False ) :
print "dir :" , dir
for fname in fileList :
print "-" , fname
print
| 2,944 | 22.373016 | 74 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/sort_includes.py | #!/usr/bin/env python
"""Script to sort the top-most block of #include lines.
Assumes the LLVM coding conventions.
Currently, this script only bothers sorting the llvm/... headers. Patches
welcome for more functionality, and sorting other header groups.
"""
import argparse
import os
def sort_includes(f):
"""Sort the #include lines of a specific file."""
# Skip files which are under INPUTS trees or test trees.
if 'INPUTS/' in f.name or 'test/' in f.name:
return
ext = os.path.splitext(f.name)[1]
if ext not in ['.cpp', '.c', '.h', '.inc', '.def']:
return
lines = f.readlines()
look_for_api_header = ext in ['.cpp', '.c']
found_headers = False
headers_begin = 0
headers_end = 0
api_headers = []
local_headers = []
subproject_headers = []
llvm_headers = []
system_headers = []
for (i, l) in enumerate(lines):
if l.strip() == '':
continue
if l.startswith('#include'):
if not found_headers:
headers_begin = i
found_headers = True
headers_end = i
header = l[len('#include'):].lstrip()
if look_for_api_header and header.startswith('"'):
api_headers.append(header)
look_for_api_header = False
continue
if (header.startswith('<') or header.startswith('"gtest/') or
header.startswith('"isl/') or header.startswith('"json/')):
system_headers.append(header)
continue
if (header.startswith('"clang/') or header.startswith('"clang-c/') or
header.startswith('"polly/')):
subproject_headers.append(header)
continue
if (header.startswith('"llvm/') or header.startswith('"llvm-c/')):
llvm_headers.append(header)
continue
local_headers.append(header)
continue
# Only allow comments and #defines prior to any includes. If either are
# mixed with includes, the order might be sensitive.
if found_headers:
break
if l.startswith('//') or l.startswith('#define') or l.startswith('#ifndef'):
continue
break
if not found_headers:
return
local_headers = sorted(set(local_headers))
subproject_headers = sorted(set(subproject_headers))
llvm_headers = sorted(set(llvm_headers))
system_headers = sorted(set(system_headers))
headers = api_headers + local_headers + subproject_headers + llvm_headers + system_headers
header_lines = ['#include ' + h for h in headers]
lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:]
f.seek(0)
f.truncate()
f.writelines(lines)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('files', nargs='+', type=argparse.FileType('r+'),
help='the source files to sort includes within')
args = parser.parse_args()
for f in args.files:
sort_includes(f)
if __name__ == '__main__':
main()
| 2,859 | 29.425532 | 92 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/schedcover.py | #!/usr/bin/python
# This creates a CSV file from the output of the debug output of subtarget:
# llvm-tblgen --gen-subtarget --debug-only=subtarget-emitter
# With thanks to Dave Estes for mentioning the idea at 2014 LLVM Developers' Meeting
import os;
import sys;
import re;
import operator;
table = {}
models = set()
filt = None
def add(instr, model, resource=None):
global table, models
entry = table.setdefault(instr, dict())
entry[model] = resource
models.add(model)
def filter_model(m):
global filt
if m and filt:
return filt.search(m) != None
else:
return True
def display():
global table, models
ordered_table = sorted(table.items(), key=operator.itemgetter(0))
ordered_models = filter(filter_model, sorted(models))
# print header
sys.stdout.write("instruction")
for model in ordered_models:
if not model: model = "default"
sys.stdout.write(", {}".format(model))
sys.stdout.write(os.linesep)
for (instr, mapping) in ordered_table:
sys.stdout.write(instr)
for model in ordered_models:
if model in mapping:
sys.stdout.write(", {}".format(mapping[model]))
else:
sys.stdout.write(", ")
sys.stdout.write(os.linesep)
def machineModelCover(path):
# The interesting bits
re_sched_default = re.compile("SchedRW machine model for ([^ ]*) (.*)\n");
re_sched_no_default = re.compile("No machine model for ([^ ]*)\n");
re_sched_spec = re.compile("InstRW on ([^ ]*) for ([^ ]*) (.*)\n");
re_sched_no_spec = re.compile("No machine model for ([^ ]*) on processor (.*)\n");
# scan the file
with open(path, 'r') as f:
for line in f.readlines():
match = re_sched_default.match(line)
if match: add(match.group(1), None, match.group(2))
match = re_sched_no_default.match(line)
if match: add(match.group(1), None)
match = re_sched_spec.match(line)
if match: add(match.group(2), match.group(1), match.group(3))
match = re_sched_no_default.match(line)
if match: add(match.group(1), None)
display()
if len(sys.argv) > 2:
filt = re.compile(sys.argv[2], re.IGNORECASE)
machineModelCover(sys.argv[1])
| 2,311 | 28.641026 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/gdb-scripts/prettyprinters.py | import gdb.printing
class SmallStringPrinter:
"""Print an llvm::SmallString object."""
def __init__(self, val):
self.val = val
def to_string(self):
begin = self.val['BeginX']
end = self.val['EndX']
return begin.cast(gdb.lookup_type("char").pointer()).string(length = end - begin)
def display_hint (self):
return 'string'
class StringRefPrinter:
"""Print an llvm::StringRef object."""
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['Data'].string(length = self.val['Length'])
def display_hint (self):
return 'string'
class SmallVectorPrinter:
"""Print an llvm::SmallVector object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
return self._iterator(begin, end)
def to_string(self):
t = self.val.type.template_argument(0).pointer()
begin = self.val['BeginX'].cast(t)
end = self.val['EndX'].cast(t)
capacity = self.val['CapacityX'].cast(t)
return 'llvm::SmallVector of length %d, capacity %d' % (end - begin, capacity - begin)
def display_hint (self):
return 'array'
class ArrayRefPrinter:
"""Print an llvm::ArrayRef object."""
class _iterator:
def __init__(self, begin, end):
self.cur = begin
self.end = end
self.count = 0
def __iter__(self):
return self
def next(self):
if self.cur == self.end:
raise StopIteration
count = self.count
self.count = self.count + 1
cur = self.cur
self.cur = self.cur + 1
return '[%d]' % count, cur.dereference()
__next__ = next
def __init__(self, val):
self.val = val
def children(self):
data = self.val['Data']
return self._iterator(data, data + self.val['Length'])
def to_string(self):
return 'llvm::ArrayRef of length %d' % (self.val['Length'])
def display_hint (self):
return 'array'
class OptionalPrinter:
"""Print an llvm::Optional object."""
def __init__(self, value):
self.value = value
class _iterator:
def __init__(self, member, empty):
self.member = member
self.done = empty
def __iter__(self):
return self
def next(self):
if self.done:
raise StopIteration
self.done = True
return ('value', self.member.dereference())
def children(self):
if not self.value['hasVal']:
return self._iterator('', True)
return self._iterator(self.value['storage']['buffer'].address.cast(self.value.type.template_argument(0).pointer()), False)
def to_string(self):
return 'llvm::Optional is %sinitialized' % ('' if self.value['hasVal'] else 'not ')
class DenseMapPrinter:
"Print a DenseMap"
class _iterator:
def __init__(self, key_info_t, begin, end):
self.key_info_t = key_info_t
self.cur = begin
self.end = end
self.advancePastEmptyBuckets()
self.first = True
def __iter__(self):
return self
def advancePastEmptyBuckets(self):
# disabled until the comments below can be addressed
# keeping as notes/posterity/hints for future contributors
return
n = self.key_info_t.name
is_equal = gdb.parse_and_eval(n + '::isEqual')
empty = gdb.parse_and_eval(n + '::getEmptyKey()')
tombstone = gdb.parse_and_eval(n + '::getTombstoneKey()')
# the following is invalid, GDB fails with:
# Python Exception <class 'gdb.error'> Attempt to take address of value
# not located in memory.
# because isEqual took parameter (for the unsigned long key I was testing)
# by const ref, and GDB
# It's also not entirely general - we should be accessing the "getFirst()"
# member function, not the 'first' member variable, but I've yet to figure
# out how to find/call member functions (especially (const) overloaded
# ones) on a gdb.Value.
while self.cur != self.end and (is_equal(self.cur.dereference()['first'], empty) or is_equal(self.cur.dereference()['first'], tombstone)):
self.cur = self.cur + 1
def next(self):
if self.cur == self.end:
raise StopIteration
cur = self.cur
v = cur.dereference()['first' if self.first else 'second']
if not self.first:
self.cur = self.cur + 1
self.advancePastEmptyBuckets()
self.first = True
else:
self.first = False
return 'x', v
def __init__(self, val):
self.val = val
def children(self):
t = self.val.type.template_argument(3).pointer()
begin = self.val['Buckets'].cast(t)
end = (begin + self.val['NumBuckets']).cast(t)
return self._iterator(self.val.type.template_argument(2), begin, end)
def to_string(self):
return 'llvm::DenseMap with %d elements' % (self.val['NumEntries'])
def display_hint(self):
return 'map'
pp = gdb.printing.RegexpCollectionPrettyPrinter("LLVMSupport")
pp.add_printer('llvm::SmallString', '^llvm::SmallString<.*>$', SmallStringPrinter)
pp.add_printer('llvm::StringRef', '^llvm::StringRef$', StringRefPrinter)
pp.add_printer('llvm::SmallVectorImpl', '^llvm::SmallVector(Impl)?<.*>$', SmallVectorPrinter)
pp.add_printer('llvm::ArrayRef', '^llvm::(Const)?ArrayRef<.*>$', ArrayRefPrinter)
pp.add_printer('llvm::Optional', '^llvm::Optional<.*>$', OptionalPrinter)
pp.add_printer('llvm::DenseMap', '^llvm::DenseMap<.*>$', DenseMapPrinter)
gdb.printing.register_pretty_printer(gdb.current_objfile(), pp)
| 5,967 | 28.112195 | 144 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/llvm-build/llvmbuild/main.py | from __future__ import absolute_import
import filecmp
import os
import sys
import llvmbuild.componentinfo as componentinfo
import llvmbuild.configutil as configutil
from llvmbuild.util import fatal, note
###
def cmake_quote_string(value):
"""
cmake_quote_string(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# Currently, we only handle escaping backslashes.
value = value.replace("\\", "\\\\")
return value
def cmake_quote_path(value):
"""
cmake_quote_path(value) -> str
Return a quoted form of the given value that is suitable for use in CMake
language files.
"""
# CMake has a bug in it's Makefile generator that doesn't properly quote
# strings it generates. So instead of using proper quoting, we just use "/"
# style paths. Currently, we only handle escaping backslashes.
value = value.replace("\\", "/")
return value
def mk_quote_string_for_target(value):
"""
mk_quote_string_for_target(target_name) -> str
Return a quoted form of the given target_name suitable for including in a
Makefile as a target name.
"""
# The only quoting we currently perform is for ':', to support msys users.
return value.replace(":", "\\:")
def make_install_dir(path):
"""
make_install_dir(path) -> None
Create the given directory path for installation, including any parents.
"""
# os.makedirs considers it an error to be called with an existent path.
if not os.path.exists(path):
os.makedirs(path)
###
class LLVMProjectInfo(object):
@staticmethod
def load_infos_from_path(llvmbuild_source_root):
def recurse(subpath):
# Load the LLVMBuild file.
llvmbuild_path = os.path.join(llvmbuild_source_root + subpath,
'LLVMBuild.txt')
if not os.path.exists(llvmbuild_path):
fatal("missing LLVMBuild.txt file at: %r" % (llvmbuild_path,))
# Parse the components from it.
common,info_iter = componentinfo.load_from_path(llvmbuild_path,
subpath)
for info in info_iter:
yield info
# Recurse into the specified subdirectories.
for subdir in common.get_list("subdirectories"):
for item in recurse(os.path.join(subpath, subdir)):
yield item
return recurse("/")
@staticmethod
def load_from_path(source_root, llvmbuild_source_root):
infos = list(
LLVMProjectInfo.load_infos_from_path(llvmbuild_source_root))
return LLVMProjectInfo(source_root, infos)
def __init__(self, source_root, component_infos):
# Store our simple ivars.
self.source_root = source_root
self.component_infos = list(component_infos)
self.component_info_map = None
self.ordered_component_infos = None
def validate_components(self):
"""validate_components() -> None
Validate that the project components are well-defined. Among other
things, this checks that:
- Components have valid references.
- Components references do not form cycles.
We also construct the map from component names to info, and the
topological ordering of components.
"""
# Create the component info map and validate that component names are
# unique.
self.component_info_map = {}
for ci in self.component_infos:
existing = self.component_info_map.get(ci.name)
if existing is not None:
# We found a duplicate component name, report it and error out.
fatal("found duplicate component %r (at %r and %r)" % (
ci.name, ci.subpath, existing.subpath))
self.component_info_map[ci.name] = ci
# Disallow 'all' as a component name, which is a special case.
if 'all' in self.component_info_map:
fatal("project is not allowed to define 'all' component")
# Add the root component.
if '$ROOT' in self.component_info_map:
fatal("project is not allowed to define $ROOT component")
self.component_info_map['$ROOT'] = componentinfo.GroupComponentInfo(
'/', '$ROOT', None)
self.component_infos.append(self.component_info_map['$ROOT'])
# Topologically order the component information according to their
# component references.
def visit_component_info(ci, current_stack, current_set):
# Check for a cycles.
if ci in current_set:
# We found a cycle, report it and error out.
cycle_description = ' -> '.join(
'%r (%s)' % (ci.name, relation)
for relation,ci in current_stack)
fatal("found cycle to %r after following: %s -> %s" % (
ci.name, cycle_description, ci.name))
# If we have already visited this item, we are done.
if ci not in components_to_visit:
return
# Otherwise, mark the component info as visited and traverse.
components_to_visit.remove(ci)
# Validate the parent reference, which we treat specially.
if ci.parent is not None:
parent = self.component_info_map.get(ci.parent)
if parent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, ci.parent, 'parent'))
ci.set_parent_instance(parent)
for relation,referent_name in ci.get_component_references():
# Validate that the reference is ok.
referent = self.component_info_map.get(referent_name)
if referent is None:
fatal("component %r has invalid reference %r (via %r)" % (
ci.name, referent_name, relation))
# Visit the reference.
current_stack.append((relation,ci))
current_set.add(ci)
visit_component_info(referent, current_stack, current_set)
current_set.remove(ci)
current_stack.pop()
# Finally, add the component info to the ordered list.
self.ordered_component_infos.append(ci)
# FIXME: We aren't actually correctly checking for cycles along the
# parent edges. Haven't decided how I want to handle this -- I thought
# about only checking cycles by relation type. If we do that, it falls
# out easily. If we don't, we should special case the check.
self.ordered_component_infos = []
components_to_visit = sorted(
set(self.component_infos),
key = lambda c: c.name)
while components_to_visit:
visit_component_info(components_to_visit[0], [], set())
# Canonicalize children lists.
for c in self.ordered_component_infos:
c.children.sort(key = lambda c: c.name)
def print_tree(self):
def visit(node, depth = 0):
print('%s%-40s (%s)' % (' '*depth, node.name, node.type_name))
for c in node.children:
visit(c, depth + 1)
visit(self.component_info_map['$ROOT'])
def write_components(self, output_path):
# Organize all the components by the directory their LLVMBuild file
# should go in.
info_basedir = {}
for ci in self.component_infos:
# Ignore the $ROOT component.
if ci.parent is None:
continue
info_basedir[ci.subpath] = info_basedir.get(ci.subpath, []) + [ci]
# Compute the list of subdirectories to scan.
subpath_subdirs = {}
for ci in self.component_infos:
# Ignore root components.
if ci.subpath == '/':
continue
# Otherwise, append this subpath to the parent list.
parent_path = os.path.dirname(ci.subpath)
subpath_subdirs[parent_path] = parent_list = subpath_subdirs.get(
parent_path, set())
parent_list.add(os.path.basename(ci.subpath))
# Generate the build files.
for subpath, infos in info_basedir.items():
# Order the components by name to have a canonical ordering.
infos.sort(key = lambda ci: ci.name)
# Format the components into llvmbuild fragments.
fragments = []
# Add the common fragments.
subdirectories = subpath_subdirs.get(subpath)
if subdirectories:
fragment = """\
subdirectories = %s
""" % (" ".join(sorted(subdirectories)),)
fragments.append(("common", fragment))
# Add the component fragments.
num_common_fragments = len(fragments)
for ci in infos:
fragment = ci.get_llvmbuild_fragment()
if fragment is None:
continue
name = "component_%d" % (len(fragments) - num_common_fragments)
fragments.append((name, fragment))
if not fragments:
continue
assert subpath.startswith('/')
directory_path = os.path.join(output_path, subpath[1:])
# Create the directory if it does not already exist.
if not os.path.exists(directory_path):
os.makedirs(directory_path)
# In an effort to preserve comments (which aren't parsed), read in
# the original file and extract the comments. We only know how to
# associate comments that prefix a section name.
f = open(infos[0]._source_path)
comments_map = {}
comment_block = ""
for ln in f:
if ln.startswith(';'):
comment_block += ln
elif ln.startswith('[') and ln.endswith(']\n'):
comments_map[ln[1:-2]] = comment_block
else:
comment_block = ""
f.close()
# Create the LLVMBuild fil[e.
file_path = os.path.join(directory_path, 'LLVMBuild.txt')
f = open(file_path, "w")
# Write the header.
header_fmt = ';===- %s %s-*- Conf -*--===;'
header_name = '.' + os.path.join(subpath, 'LLVMBuild.txt')
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
;
; The LLVM Compiler Infrastructure
;
; This file is distributed under the University of Illinois Open Source
; License. See LICENSE.TXT for details.
;
;===------------------------------------------------------------------------===;
;
; This is an LLVMBuild description file for the components in this subdirectory.
;
; For more information on the LLVMBuild system, please see:
;
; http://llvm.org/docs/LLVMBuild.html
;
;===------------------------------------------------------------------------===;
""" % header_string)
# Write out each fragment.each component fragment.
for name,fragment in fragments:
comment = comments_map.get(name)
if comment is not None:
f.write(comment)
f.write("[%s]\n" % name)
f.write(fragment)
if fragment is not fragments[-1][1]:
f.write('\n')
f.close()
def write_library_table(self, output_path, enabled_optional_components):
# Write out the mapping from component names to required libraries.
#
# We do this in topological order so that we know we can append the
# dependencies for added library groups.
entries = {}
for c in self.ordered_component_infos:
# Skip optional components which are not enabled.
if c.type_name == 'OptionalLibrary' \
and c.name not in enabled_optional_components:
continue
# Skip target groups which are not enabled.
tg = c.get_parent_target_group()
if tg and not tg.enabled:
continue
# Only certain components are in the table.
if c.type_name not in ('Library', 'OptionalLibrary', \
'LibraryGroup', 'TargetGroup'):
continue
# Compute the llvm-config "component name". For historical reasons,
# this is lowercased based on the library name.
llvmconfig_component_name = c.get_llvmconfig_component_name()
# Get the library name, or None for LibraryGroups.
if c.type_name == 'Library' or c.type_name == 'OptionalLibrary':
library_name = c.get_prefixed_library_name()
is_installed = c.installed
else:
library_name = None
is_installed = True
# Get the component names of all the required libraries.
required_llvmconfig_component_names = [
self.component_info_map[dep].get_llvmconfig_component_name()
for dep in c.required_libraries]
# Insert the entries for library groups we should add to.
for dep in c.add_to_library_groups:
entries[dep][2].append(llvmconfig_component_name)
# Add the entry.
entries[c.name] = (llvmconfig_component_name, library_name,
required_llvmconfig_component_names,
is_installed)
# Convert to a list of entries and sort by name.
entries = list(entries.values())
# Create an 'all' pseudo component. We keep the dependency list small by
# only listing entries that have no other dependents.
root_entries = set(e[0] for e in entries)
for _,_,deps,_ in entries:
root_entries -= set(deps)
entries.append(('all', None, root_entries, True))
entries.sort()
# Compute the maximum number of required libraries, plus one so there is
# always a sentinel.
max_required_libraries = max(len(deps)
for _,_,deps,_ in entries) + 1
# Write out the library table.
make_install_dir(os.path.dirname(output_path))
f = open(output_path+'.new', 'w')
f.write("""\
//===- llvm-build generated file --------------------------------*- C++ -*-===//
//
// Component Library Depenedency Table
//
// Automatically generated file, do not edit!
//
//===----------------------------------------------------------------------===//
""")
f.write('struct AvailableComponent {\n')
f.write(' /// The name of the component.\n')
f.write(' const char *Name;\n')
f.write('\n')
f.write(' /// The name of the library for this component (or NULL).\n')
f.write(' const char *Library;\n')
f.write('\n')
f.write(' /// Whether the component is installed.\n')
f.write(' bool IsInstalled;\n')
f.write('\n')
f.write('\
/// The list of libraries required when linking this component.\n')
f.write(' const char *RequiredLibraries[%d];\n' % (
max_required_libraries))
f.write('} AvailableComponents[%d] = {\n' % len(entries))
for name,library_name,required_names,is_installed in entries:
if library_name is None:
library_name_as_cstr = 'nullptr'
else:
library_name_as_cstr = '"%s"' % library_name
if is_installed:
is_installed_as_cstr = 'true'
else:
is_installed_as_cstr = 'false'
f.write(' { "%s", %s, %s, { %s } },\n' % (
name, library_name_as_cstr, is_installed_as_cstr,
', '.join('"%s"' % dep
for dep in required_names)))
f.write('};\n')
f.close()
if not os.path.isfile(output_path):
os.rename(output_path+'.new', output_path)
elif filecmp.cmp(output_path, output_path+'.new'):
os.remove(output_path+'.new')
else:
os.remove(output_path)
os.rename(output_path+'.new', output_path)
def get_required_libraries_for_component(self, ci, traverse_groups = False):
"""
get_required_libraries_for_component(component_info) -> iter
Given a Library component info descriptor, return an iterator over all
of the directly required libraries for linking with this component. If
traverse_groups is True, then library and target groups will be
traversed to include their required libraries.
"""
assert ci.type_name in ('Library', 'OptionalLibrary', 'LibraryGroup', 'TargetGroup')
for name in ci.required_libraries:
# Get the dependency info.
dep = self.component_info_map[name]
# If it is a library, yield it.
if dep.type_name == 'Library' or dep.type_name == 'OptionalLibrary':
yield dep
continue
# Otherwise if it is a group, yield or traverse depending on what
# was requested.
if dep.type_name in ('LibraryGroup', 'TargetGroup'):
if not traverse_groups:
yield dep
continue
for res in self.get_required_libraries_for_component(dep, True):
yield res
def get_fragment_dependencies(self):
"""
get_fragment_dependencies() -> iter
Compute the list of files (as absolute paths) on which the output
fragments depend (i.e., files for which a modification should trigger a
rebuild of the fragment).
"""
# Construct a list of all the dependencies of the Makefile fragment
# itself. These include all the LLVMBuild files themselves, as well as
# all of our own sources.
#
# Many components may come from the same file, so we make sure to unique
# these.
build_paths = set()
for ci in self.component_infos:
p = os.path.join(self.source_root, ci.subpath[1:], 'LLVMBuild.txt')
if p not in build_paths:
yield p
build_paths.add(p)
# Gather the list of necessary sources by just finding all loaded
# modules that are inside the LLVM source tree.
for module in sys.modules.values():
# Find the module path.
if not hasattr(module, '__file__'):
continue
path = getattr(module, '__file__')
if not path:
continue
# Strip off any compiled suffix.
if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']:
path = path[:-1]
# If the path exists and is in the source tree, consider it a
# dependency.
if (path.startswith(self.source_root) and os.path.exists(path)):
yield path
def foreach_cmake_library(self, f,
enabled_optional_components,
skip_disabled,
skip_not_installed):
for ci in self.ordered_component_infos:
# Skip optional components which are not enabled.
if ci.type_name == 'OptionalLibrary' \
and ci.name not in enabled_optional_components:
continue
# We only write the information for libraries currently.
if ci.type_name not in ('Library', 'OptionalLibrary'):
continue
# Skip disabled targets.
if skip_disabled:
tg = ci.get_parent_target_group()
if tg and not tg.enabled:
continue
# Skip targets that will not be installed
if skip_not_installed and not ci.installed:
continue
f(ci)
def write_cmake_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_fragment(output_path) -> None
Generate a CMake fragment which includes all of the collated LLVMBuild
information in a format that is easily digestible by a CMake. The exact
contents of this are closely tied to how the CMake configuration
integrates LLVMBuild, see CMakeLists.txt in the top-level.
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
# Write the header.
header_fmt = '\
#===-- %s - LLVMBuild Configuration for LLVM %s-*- CMake -*--===#'
header_name = os.path.basename(output_path)
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# This file contains the LLVMBuild project information in a format easily
# consumed by the CMake based build system.
#
# This file is autogenerated by llvm-build, do not edit!
#
#===------------------------------------------------------------------------===#
""" % header_string)
# Write the dependency information in the best way we can.
f.write("""
# LLVMBuild CMake fragment dependencies.
#
# CMake has no builtin way to declare that the configuration depends on
# a particular file. However, a side effect of configure_file is to add
# said input file to CMake's internal dependency list. So, we use that
# and a dummy output file to communicate the dependency information to
# CMake.
#
# FIXME: File a CMake RFE to get a properly supported version of this
# feature.
""")
for dep in dependencies:
f.write("""\
configure_file(\"%s\"
${CMAKE_CURRENT_BINARY_DIR}/DummyConfigureOutput)\n""" % (
cmake_quote_path(dep),))
# Write the properties we use to encode the required library dependency
# information in a form CMake can easily use directly.
f.write("""
# Explicit library dependency information.
#
# The following property assignments effectively create a map from component
# names to required libraries, in a way that is easily accessed from CMake.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(GLOBAL PROPERTY LLVMBUILD_LIB_DEPS_%s %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = False,
skip_not_installed = False # Dependency info must be emitted for internals libs too
)
f.close()
def write_cmake_exports_fragment(self, output_path, enabled_optional_components):
"""
write_cmake_exports_fragment(output_path) -> None
Generate a CMake fragment which includes LLVMBuild library
dependencies expressed similarly to how CMake would write
them via install(EXPORT).
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the CMake exports fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
f.write("""\
# Explicit library dependency information.
#
# The following property assignments tell CMake about link
# dependencies of libraries imported from LLVM.
""")
self.foreach_cmake_library(
lambda ci:
f.write("""\
set_property(TARGET %s PROPERTY IMPORTED_LINK_INTERFACE_LIBRARIES %s)\n""" % (
ci.get_prefixed_library_name(), " ".join(sorted(
dep.get_prefixed_library_name()
for dep in self.get_required_libraries_for_component(ci)))))
,
enabled_optional_components,
skip_disabled = True,
skip_not_installed = True # Do not export internal libraries like gtest
)
f.close()
def write_make_fragment(self, output_path, enabled_optional_components):
"""
write_make_fragment(output_path) -> None
Generate a Makefile fragment which includes all of the collated
LLVMBuild information in a format that is easily digestible by a
Makefile. The exact contents of this are closely tied to how the LLVM
Makefiles integrate LLVMBuild, see Makefile.rules in the top-level.
"""
dependencies = list(self.get_fragment_dependencies())
# Write out the Makefile fragment.
make_install_dir(os.path.dirname(output_path))
f = open(output_path, 'w')
# Write the header.
header_fmt = '\
#===-- %s - LLVMBuild Configuration for LLVM %s-*- Makefile -*--===#'
header_name = os.path.basename(output_path)
header_pad = '-' * (80 - len(header_fmt % (header_name, '')))
header_string = header_fmt % (header_name, header_pad)
f.write("""\
%s
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
#
# This file contains the LLVMBuild project information in a format easily
# consumed by the Makefile based build system.
#
# This file is autogenerated by llvm-build, do not edit!
#
#===------------------------------------------------------------------------===#
""" % header_string)
# Write the dependencies for the fragment.
#
# FIXME: Technically, we need to properly quote for Make here.
f.write("""\
# Clients must explicitly enable LLVMBUILD_INCLUDE_DEPENDENCIES to get
# these dependencies. This is a compromise to help improve the
# performance of recursive Make systems.
""")
f.write('ifeq ($(LLVMBUILD_INCLUDE_DEPENDENCIES),1)\n')
f.write("# The dependencies for this Makefile fragment itself.\n")
f.write("%s: \\\n" % (mk_quote_string_for_target(output_path),))
for dep in dependencies:
f.write("\t%s \\\n" % (dep,))
f.write('\n')
# Generate dummy rules for each of the dependencies, so that things
# continue to work correctly if any of those files are moved or removed.
f.write("""\
# The dummy targets to allow proper regeneration even when files are moved or
# removed.
""")
for dep in dependencies:
f.write("%s:\n" % (mk_quote_string_for_target(dep),))
f.write('endif\n')
f.write("""
# List of libraries to be exported for use by applications.
# See 'cmake/modules/Makefile'.
LLVM_LIBS_TO_EXPORT :=""")
self.foreach_cmake_library(
lambda ci:
f.write(' \\\n %s' % ci.get_prefixed_library_name())
,
enabled_optional_components,
skip_disabled = True,
skip_not_installed = True # Do not export internal libraries like gtest
)
f.write('\n')
f.close()
def add_magic_target_components(parser, project, opts):
"""add_magic_target_components(project, opts) -> None
Add the "magic" target based components to the project, which can only be
determined based on the target configuration options.
This currently is responsible for populating the required_libraries list of
the "all-targets", "Native", "NativeCodeGen", and "Engine" components.
"""
# Determine the available targets.
available_targets = dict((ci.name,ci)
for ci in project.component_infos
if ci.type_name == 'TargetGroup')
# Find the configured native target.
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
native_target_name = { 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
if native_target_name is None:
native_target = None
else:
native_target = available_targets.get(native_target_name)
if native_target is None:
parser.error("invalid native target: %r (not in project)" % (
opts.native_target,))
if native_target.type_name != 'TargetGroup':
parser.error("invalid native target: %r (not a target)" % (
opts.native_target,))
# Find the list of targets to enable.
if opts.enable_targets is None:
enable_targets = available_targets.values()
else:
# We support both space separated and semi-colon separated lists.
if opts.enable_targets == '':
enable_target_names = []
elif ' ' in opts.enable_targets:
enable_target_names = opts.enable_targets.split()
else:
enable_target_names = opts.enable_targets.split(';')
enable_targets = []
for name in enable_target_names:
target = available_targets.get(name)
if target is None:
parser.error("invalid target to enable: %r (not in project)" % (
name,))
if target.type_name != 'TargetGroup':
parser.error("invalid target to enable: %r (not a target)" % (
name,))
enable_targets.append(target)
# Find the special library groups we are going to populate. We enforce that
# these appear in the project (instead of just adding them) so that they at
# least have an explicit representation in the project LLVMBuild files (and
# comments explaining how they are populated).
def find_special_group(name):
info = info_map.get(name)
if info is None:
fatal("expected project to contain special %r component" % (
name,))
if info.type_name != 'LibraryGroup':
fatal("special component %r should be a LibraryGroup" % (
name,))
if info.required_libraries:
fatal("special component %r must have empty %r list" % (
name, 'required_libraries'))
if info.add_to_library_groups:
fatal("special component %r must have empty %r list" % (
name, 'add_to_library_groups'))
info._is_special_group = True
return info
info_map = dict((ci.name, ci) for ci in project.component_infos)
all_targets = find_special_group('all-targets')
native_group = find_special_group('Native')
native_codegen_group = find_special_group('NativeCodeGen')
engine_group = find_special_group('Engine')
# Set the enabled bit in all the target groups, and append to the
# all-targets list.
for ci in enable_targets:
all_targets.required_libraries.append(ci.name)
ci.enabled = True
# If we have a native target, then that defines the native and
# native_codegen libraries.
if native_target and native_target.enabled:
native_group.required_libraries.append(native_target.name)
native_codegen_group.required_libraries.append(
'%sCodeGen' % native_target.name)
# If we have a native target with a JIT, use that for the engine. Otherwise,
# use the interpreter.
if native_target and native_target.enabled and native_target.has_jit:
engine_group.required_libraries.append('MCJIT')
engine_group.required_libraries.append(native_group.name)
else:
engine_group.required_libraries.append('Interpreter')
def main():
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options]")
group = OptionGroup(parser, "Input Options")
group.add_option("", "--source-root", dest="source_root", metavar="PATH",
help="Path to the LLVM source (inferred if not given)",
action="store", default=None)
group.add_option("", "--llvmbuild-source-root",
dest="llvmbuild_source_root",
help=(
"If given, an alternate path to search for LLVMBuild.txt files"),
action="store", default=None, metavar="PATH")
group.add_option("", "--build-root", dest="build_root", metavar="PATH",
help="Path to the build directory (if needed) [%default]",
action="store", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Output Options")
group.add_option("", "--print-tree", dest="print_tree",
help="Print out the project component tree [%default]",
action="store_true", default=False)
group.add_option("", "--write-llvmbuild", dest="write_llvmbuild",
help="Write out the LLVMBuild.txt files to PATH",
action="store", default=None, metavar="PATH")
group.add_option("", "--write-library-table",
dest="write_library_table", metavar="PATH",
help="Write the C++ library dependency table to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-fragment",
dest="write_cmake_fragment", metavar="PATH",
help="Write the CMake project information to PATH",
action="store", default=None)
group.add_option("", "--write-cmake-exports-fragment",
dest="write_cmake_exports_fragment", metavar="PATH",
help="Write the CMake exports information to PATH",
action="store", default=None)
group.add_option("", "--write-make-fragment",
dest="write_make_fragment", metavar="PATH",
help="Write the Makefile project information to PATH",
action="store", default=None)
group.add_option("", "--configure-target-def-file",
dest="configure_target_def_files",
help="""Configure the given file at SUBPATH (relative to
the inferred or given source root, and with a '.in' suffix) by replacing certain
substitution variables with lists of targets that support certain features (for
example, targets with AsmPrinters) and write the result to the build root (as
given by --build-root) at the same SUBPATH""",
metavar="SUBPATH", action="append", default=None)
parser.add_option_group(group)
group = OptionGroup(parser, "Configuration Options")
group.add_option("", "--native-target",
dest="native_target", metavar="NAME",
help=("Treat the named target as the 'native' one, if "
"given [%default]"),
action="store", default=None)
group.add_option("", "--enable-targets",
dest="enable_targets", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of targets, or all targets if not present"),
action="store", default=None)
group.add_option("", "--enable-optional-components",
dest="optional_components", metavar="NAMES",
help=("Enable the given space or semi-colon separated "
"list of optional components"),
action="store", default="")
parser.add_option_group(group)
(opts, args) = parser.parse_args()
# Determine the LLVM source path, if not given.
source_root = opts.source_root
if source_root:
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('invalid LLVM source root: %r' % source_root)
else:
llvmbuild_path = os.path.dirname(__file__)
llvm_build_path = os.path.dirname(llvmbuild_path)
utils_path = os.path.dirname(llvm_build_path)
source_root = os.path.dirname(utils_path)
if not os.path.exists(os.path.join(source_root, 'lib', 'IR',
'Function.cpp')):
parser.error('unable to infer LLVM source root, please specify')
# Construct the LLVM project information.
llvmbuild_source_root = opts.llvmbuild_source_root or source_root
project_info = LLVMProjectInfo.load_from_path(
source_root, llvmbuild_source_root)
# Add the magic target based components.
add_magic_target_components(parser, project_info, opts)
# Validate the project component info.
project_info.validate_components()
# Print the component tree, if requested.
if opts.print_tree:
project_info.print_tree()
# Write out the components, if requested. This is useful for auto-upgrading
# the schema.
if opts.write_llvmbuild:
project_info.write_components(opts.write_llvmbuild)
# Write out the required library table, if requested.
if opts.write_library_table:
project_info.write_library_table(opts.write_library_table,
opts.optional_components)
# Write out the make fragment, if requested.
if opts.write_make_fragment:
project_info.write_make_fragment(opts.write_make_fragment,
opts.optional_components)
# Write out the cmake fragment, if requested.
if opts.write_cmake_fragment:
project_info.write_cmake_fragment(opts.write_cmake_fragment,
opts.optional_components)
if opts.write_cmake_exports_fragment:
project_info.write_cmake_exports_fragment(opts.write_cmake_exports_fragment,
opts.optional_components)
# Configure target definition files, if requested.
if opts.configure_target_def_files:
# Verify we were given a build root.
if not opts.build_root:
parser.error("must specify --build-root when using "
"--configure-target-def-file")
# Create the substitution list.
available_targets = [ci for ci in project_info.component_infos
if ci.type_name == 'TargetGroup']
substitutions = [
("@LLVM_ENUM_TARGETS@",
' '.join('LLVM_TARGET(%s)' % ci.name
for ci in available_targets)),
("@LLVM_ENUM_ASM_PRINTERS@",
' '.join('LLVM_ASM_PRINTER(%s)' % ci.name
for ci in available_targets
if ci.has_asmprinter)),
("@LLVM_ENUM_ASM_PARSERS@",
' '.join('LLVM_ASM_PARSER(%s)' % ci.name
for ci in available_targets
if ci.has_asmparser)),
("@LLVM_ENUM_DISASSEMBLERS@",
' '.join('LLVM_DISASSEMBLER(%s)' % ci.name
for ci in available_targets
if ci.has_disassembler))]
# Configure the given files.
for subpath in opts.configure_target_def_files:
inpath = os.path.join(source_root, subpath + '.in')
outpath = os.path.join(opts.build_root, subpath)
result = configutil.configure_file(inpath, outpath, substitutions)
if not result:
note("configured file %r hasn't changed" % outpath)
if __name__=='__main__':
main()
| 40,402 | 39.242032 | 95 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/llvm-build/llvmbuild/configutil.py | """
Defines utilities useful for performing standard "configuration" style tasks.
"""
import re
import os
def configure_file(input_path, output_path, substitutions):
"""configure_file(input_path, output_path, substitutions) -> bool
Given an input and output path, "configure" the file at the given input path
by replacing variables in the file with those given in the substitutions
list. Returns true if the output file was written.
The substitutions list should be given as a list of tuples (regex string,
replacement), where the regex and replacement will be used as in 're.sub' to
execute the variable replacement.
The output path's parent directory need not exist (it will be created).
If the output path does exist and the configured data is not different than
it's current contents, the output file will not be modified. This is
designed to limit the impact of configured files on build dependencies.
"""
# Read in the input data.
f = open(input_path, "rb")
try:
data = f.read()
finally:
f.close()
# Perform the substitutions.
for regex_string,replacement in substitutions:
regex = re.compile(regex_string)
data = regex.sub(replacement, data)
# Ensure the output parent directory exists.
output_parent_path = os.path.dirname(os.path.abspath(output_path))
if not os.path.exists(output_parent_path):
os.makedirs(output_parent_path)
# If the output path exists, load it and compare to the configured contents.
if os.path.exists(output_path):
current_data = None
try:
f = open(output_path, "rb")
try:
current_data = f.read()
except:
current_data = None
f.close()
except:
current_data = None
if current_data is not None and current_data == data:
return False
# Write the output contents.
f = open(output_path, "wb")
try:
f.write(data)
finally:
f.close()
return True
| 2,084 | 30.119403 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/llvm-build/llvmbuild/util.py | import os
import sys
def _write_message(kind, message):
program = os.path.basename(sys.argv[0])
sys.stderr.write('%s: %s: %s\n' % (program, kind, message))
note = lambda message: _write_message('note', message)
warning = lambda message: _write_message('warning', message)
error = lambda message: _write_message('error', message)
fatal = lambda message: (_write_message('fatal error', message), sys.exit(1))
__all__ = ['note', 'warning', 'error', 'fatal']
| 466 | 32.357143 | 77 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/llvm-build/llvmbuild/__init__.py | from llvmbuild.main import main
| 32 | 15.5 | 31 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/llvm-build/llvmbuild/componentinfo.py | """
Descriptor objects for entities that are part of the LLVM project.
"""
from __future__ import absolute_import
try:
import configparser
except:
import ConfigParser as configparser
import sys
from llvmbuild.util import fatal, warning
class ParseError(Exception):
pass
class ComponentInfo(object):
"""
Base class for component descriptions.
"""
type_name = None
@staticmethod
def parse_items(items, has_dependencies = True):
kwargs = {}
kwargs['name'] = items.get_string('name')
kwargs['parent'] = items.get_optional_string('parent')
if has_dependencies:
kwargs['dependencies'] = items.get_list('dependencies')
return kwargs
def __init__(self, subpath, name, dependencies, parent):
if not subpath.startswith('/'):
raise ValueError("invalid subpath: %r" % subpath)
self.subpath = subpath
self.name = name
self.dependencies = list(dependencies)
# The name of the parent component to logically group this component
# under.
self.parent = parent
# The parent instance, once loaded.
self.parent_instance = None
self.children = []
# The original source path.
self._source_path = None
# A flag to mark "special" components which have some amount of magic
# handling (generally based on command line options).
self._is_special_group = False
def set_parent_instance(self, parent):
assert parent.name == self.parent, "Unexpected parent!"
self.parent_instance = parent
self.parent_instance.children.append(self)
def get_component_references(self):
"""get_component_references() -> iter
Return an iterator over the named references to other components from
this object. Items are of the form (reference-type, component-name).
"""
# Parent references are handled specially.
for r in self.dependencies:
yield ('dependency', r)
def get_llvmbuild_fragment(self):
abstract
def get_parent_target_group(self):
"""get_parent_target_group() -> ComponentInfo or None
Return the nearest parent target group (if any), or None if the
component is not part of any target group.
"""
# If this is a target group, return it.
if self.type_name == 'TargetGroup':
return self
# Otherwise recurse on the parent, if any.
if self.parent_instance:
return self.parent_instance.get_parent_target_group()
class GroupComponentInfo(ComponentInfo):
"""
Group components have no semantics as far as the build system are concerned,
but exist to help organize other components into a logical tree structure.
"""
type_name = 'Group'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
return GroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent):
ComponentInfo.__init__(self, subpath, name, [], parent)
def get_llvmbuild_fragment(self):
return """\
type = %s
name = %s
parent = %s
""" % (self.type_name, self.name, self.parent)
class LibraryComponentInfo(ComponentInfo):
type_name = 'Library'
@staticmethod
def parse_items(items):
kwargs = ComponentInfo.parse_items(items)
kwargs['library_name'] = items.get_optional_string('library_name')
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
kwargs['installed'] = items.get_optional_bool('installed', True)
return kwargs
@staticmethod
def parse(subpath, items):
kwargs = LibraryComponentInfo.parse_items(items)
return LibraryComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent, library_name,
required_libraries, add_to_library_groups, installed):
ComponentInfo.__init__(self, subpath, name, dependencies, parent)
# If given, the name to use for the library instead of deriving it from
# the component name.
self.library_name = library_name
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
# Whether or not this library is installed.
self.installed = installed
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = """\
type = %s
name = %s
parent = %s
""" % (self.type_name, self.name, self.parent)
if self.library_name is not None:
result += 'library_name = %s\n' % self.library_name
if self.required_libraries:
result += 'required_libraries = %s\n' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
result += 'add_to_library_groups = %s\n' % ' '.join(
self.add_to_library_groups)
if not self.installed:
result += 'installed = 0\n'
return result
def get_library_name(self):
return self.library_name or self.name
def get_prefixed_library_name(self):
"""
get_prefixed_library_name() -> str
Return the library name prefixed by the project name. This is generally
what the library name will be on disk.
"""
basename = self.get_library_name()
# FIXME: We need to get the prefix information from an explicit project
# object, or something.
if basename in ('gtest', 'gtest_main'):
return basename
return 'LLVM%s' % basename
def get_llvmconfig_component_name(self):
return self.get_library_name().lower()
class OptionalLibraryComponentInfo(LibraryComponentInfo):
type_name = "OptionalLibrary"
@staticmethod
def parse(subpath, items):
kwargs = LibraryComponentInfo.parse_items(items)
return OptionalLibraryComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent, library_name,
required_libraries, add_to_library_groups, installed):
LibraryComponentInfo.__init__(self, subpath, name, dependencies, parent,
library_name, required_libraries,
add_to_library_groups, installed)
class LibraryGroupComponentInfo(ComponentInfo):
type_name = 'LibraryGroup'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
return LibraryGroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent, required_libraries = [],
add_to_library_groups = []):
ComponentInfo.__init__(self, subpath, name, [], parent)
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = """\
type = %s
name = %s
parent = %s
""" % (self.type_name, self.name, self.parent)
if self.required_libraries and not self._is_special_group:
result += 'required_libraries = %s\n' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
result += 'add_to_library_groups = %s\n' % ' '.join(
self.add_to_library_groups)
return result
def get_llvmconfig_component_name(self):
return self.name.lower()
class TargetGroupComponentInfo(ComponentInfo):
type_name = 'TargetGroup'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items, has_dependencies = False)
kwargs['required_libraries'] = items.get_list('required_libraries')
kwargs['add_to_library_groups'] = items.get_list(
'add_to_library_groups')
kwargs['has_jit'] = items.get_optional_bool('has_jit', False)
kwargs['has_asmprinter'] = items.get_optional_bool('has_asmprinter',
False)
kwargs['has_asmparser'] = items.get_optional_bool('has_asmparser',
False)
kwargs['has_disassembler'] = items.get_optional_bool('has_disassembler',
False)
return TargetGroupComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, parent, required_libraries = [],
add_to_library_groups = [], has_jit = False,
has_asmprinter = False, has_asmparser = False,
has_disassembler = False):
ComponentInfo.__init__(self, subpath, name, [], parent)
# The names of the library components which are required when linking
# with this component.
self.required_libraries = list(required_libraries)
# The names of the library group components this component should be
# considered part of.
self.add_to_library_groups = list(add_to_library_groups)
# Whether or not this target supports the JIT.
self.has_jit = bool(has_jit)
# Whether or not this target defines an assembly printer.
self.has_asmprinter = bool(has_asmprinter)
# Whether or not this target defines an assembly parser.
self.has_asmparser = bool(has_asmparser)
# Whether or not this target defines an disassembler.
self.has_disassembler = bool(has_disassembler)
# Whether or not this target is enabled. This is set in response to
# configuration parameters.
self.enabled = False
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
for r in self.add_to_library_groups:
yield ('library group', r)
def get_llvmbuild_fragment(self):
result = """\
type = %s
name = %s
parent = %s
""" % (self.type_name, self.name, self.parent)
if self.required_libraries:
result += 'required_libraries = %s\n' % ' '.join(
self.required_libraries)
if self.add_to_library_groups:
result += 'add_to_library_groups = %s\n' % ' '.join(
self.add_to_library_groups)
for bool_key in ('has_asmparser', 'has_asmprinter', 'has_disassembler',
'has_jit'):
if getattr(self, bool_key):
result += '%s = 1\n' % (bool_key,)
return result
def get_llvmconfig_component_name(self):
return self.name.lower()
class ToolComponentInfo(ComponentInfo):
type_name = 'Tool'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items)
kwargs['required_libraries'] = items.get_list('required_libraries')
return ToolComponentInfo(subpath, **kwargs)
def __init__(self, subpath, name, dependencies, parent,
required_libraries):
ComponentInfo.__init__(self, subpath, name, dependencies, parent)
# The names of the library components which are required to link this
# tool.
self.required_libraries = list(required_libraries)
def get_component_references(self):
for r in ComponentInfo.get_component_references(self):
yield r
for r in self.required_libraries:
yield ('required library', r)
def get_llvmbuild_fragment(self):
return """\
type = %s
name = %s
parent = %s
required_libraries = %s
""" % (self.type_name, self.name, self.parent,
' '.join(self.required_libraries))
class BuildToolComponentInfo(ToolComponentInfo):
type_name = 'BuildTool'
@staticmethod
def parse(subpath, items):
kwargs = ComponentInfo.parse_items(items)
kwargs['required_libraries'] = items.get_list('required_libraries')
return BuildToolComponentInfo(subpath, **kwargs)
###
class IniFormatParser(dict):
def get_list(self, key):
# Check if the value is defined.
value = self.get(key)
if value is None:
return []
# Lists are just whitespace separated strings.
return value.split()
def get_optional_string(self, key):
value = self.get_list(key)
if not value:
return None
if len(value) > 1:
raise ParseError("multiple values for scalar key: %r" % key)
return value[0]
def get_string(self, key):
value = self.get_optional_string(key)
if not value:
raise ParseError("missing value for required string: %r" % key)
return value
def get_optional_bool(self, key, default = None):
value = self.get_optional_string(key)
if not value:
return default
if value not in ('0', '1'):
raise ParseError("invalid value(%r) for boolean property: %r" % (
value, key))
return bool(int(value))
def get_bool(self, key):
value = self.get_optional_bool(key)
if value is None:
raise ParseError("missing value for required boolean: %r" % key)
return value
_component_type_map = dict(
(t.type_name, t)
for t in (GroupComponentInfo,
LibraryComponentInfo, LibraryGroupComponentInfo,
ToolComponentInfo, BuildToolComponentInfo,
TargetGroupComponentInfo, OptionalLibraryComponentInfo))
def load_from_path(path, subpath):
# Load the LLVMBuild.txt file as an .ini format file.
parser = configparser.RawConfigParser()
parser.read(path)
# Extract the common section.
if parser.has_section("common"):
common = IniFormatParser(parser.items("common"))
parser.remove_section("common")
else:
common = IniFormatParser({})
return common, _read_components_from_parser(parser, path, subpath)
def _read_components_from_parser(parser, path, subpath):
# We load each section which starts with 'component' as a distinct component
# description (so multiple components can be described in one file).
for section in parser.sections():
if not section.startswith('component'):
# We don't expect arbitrary sections currently, warn the user.
warning("ignoring unknown section %r in %r" % (section, path))
continue
# Determine the type of the component to instantiate.
if not parser.has_option(section, 'type'):
fatal("invalid component %r in %r: %s" % (
section, path, "no component type"))
type_name = parser.get(section, 'type')
type_class = _component_type_map.get(type_name)
if type_class is None:
fatal("invalid component %r in %r: %s" % (
section, path, "invalid component type: %r" % type_name))
# Instantiate the component based on the remaining values.
try:
info = type_class.parse(subpath,
IniFormatParser(parser.items(section)))
except TypeError:
print >>sys.stderr, "error: invalid component %r in %r: %s" % (
section, path, "unable to instantiate: %r" % type_name)
import traceback
traceback.print_exc()
raise SystemExit(1)
except ParseError:
e = sys.exc_info()[1]
fatal("unable to load component %r in %r: %s" % (
section, path, e.message))
info._source_path = path
yield info
| 16,943 | 34.596639 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/testgen/mc-bundling-x86-gen.py |
#!/usr/bin/python
# Auto-generates an exhaustive and repetitive test for correct bundle-locked
# alignment on x86.
# For every possible offset in an aligned bundle, a bundle-locked group of every
# size in the inclusive range [1, bundle_size] is inserted. An appropriate CHECK
# is added to verify that NOP padding occurred (or did not occur) as expected.
# Run with --align-to-end to generate a similar test with align_to_end for each
# .bundle_lock directive.
# This script runs with Python 2.7 and 3.2+
from __future__ import print_function
import argparse
BUNDLE_SIZE_POW2 = 4
BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2
PREAMBLE = '''
# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\
# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!!
# It tests that bundle-aligned grouping works correctly in MC. Read the
# source of the script for more details.
.text
.bundle_align_mode {0}
'''.format(BUNDLE_SIZE_POW2).lstrip()
ALIGNTO = ' .align {0}, 0x90'
NOPFILL = ' .fill {0}, 1, 0x90'
def print_bundle_locked_sequence(len, align_to_end=False):
print(' .bundle_lock{0}'.format(' align_to_end' if align_to_end else ''))
print(' .rept {0}'.format(len))
print(' inc %eax')
print(' .endr')
print(' .bundle_unlock')
def generate(align_to_end=False):
print(PREAMBLE)
ntest = 0
for instlen in range(1, BUNDLE_SIZE + 1):
for offset in range(0, BUNDLE_SIZE):
# Spread out all the instructions to not worry about cross-bundle
# interference.
print(ALIGNTO.format(2 * BUNDLE_SIZE))
print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset))
if offset > 0:
print(NOPFILL.format(offset))
print_bundle_locked_sequence(instlen, align_to_end)
# Now generate an appropriate CHECK line
base_offset = ntest * 2 * BUNDLE_SIZE
inst_orig_offset = base_offset + offset # had it not been padded...
def print_check(adjusted_offset=None, nop_split_offset=None):
if adjusted_offset is not None:
print('# CHECK: {0:x}: nop'.format(inst_orig_offset))
if nop_split_offset is not None:
print('# CHECK: {0:x}: nop'.format(nop_split_offset))
print('# CHECK: {0:x}: incl'.format(adjusted_offset))
else:
print('# CHECK: {0:x}: incl'.format(inst_orig_offset))
if align_to_end:
if offset + instlen == BUNDLE_SIZE:
# No padding needed
print_check()
elif offset + instlen < BUNDLE_SIZE:
# Pad to end at nearest bundle boundary
offset_to_end = base_offset + (BUNDLE_SIZE - instlen)
print_check(offset_to_end)
else: # offset + instlen > BUNDLE_SIZE
# Pad to end at next bundle boundary, splitting the nop sequence
# at the nearest bundle boundary
offset_to_nearest_bundle = base_offset + BUNDLE_SIZE
offset_to_end = base_offset + (BUNDLE_SIZE * 2 - instlen)
if offset_to_nearest_bundle == offset_to_end:
offset_to_nearest_bundle = None
print_check(offset_to_end, offset_to_nearest_bundle)
else:
if offset + instlen > BUNDLE_SIZE:
# Padding needed
aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1)
print_check(aligned_offset)
else:
# No padding needed
print_check()
print()
ntest += 1
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--align-to-end',
action='store_true',
help='generate .bundle_lock with align_to_end option')
args = argparser.parse_args()
generate(align_to_end=args.align_to_end)
| 3,818 | 35.721154 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/opt-viewer/opt-viewer.py | #!/usr/bin/env python2.7
from __future__ import print_function
desc = '''Generate HTML output to visualize optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.
For faster parsing, you may want to use libYAML with PyYAML.'''
import yaml
# Try to use the C parser.
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import argparse
import os.path
import re
import subprocess
import shutil
from pygments import highlight
from pygments.lexers.c_cpp import CppLexer
from pygments.formatters import HtmlFormatter
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('yaml_files', nargs='+')
parser.add_argument('output_dir')
parser.add_argument('-source-dir', '-s', default='', help='set source directory')
args = parser.parse_args()
p = subprocess.Popen(['c++filt', '-n'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def demangle(name):
p.stdin.write(name + '\n')
return p.stdout.readline().rstrip()
class Remark(yaml.YAMLObject):
max_hotness = 0
# Work-around for http://pyyaml.org/ticket/154.
yaml_loader = Loader
@classmethod
def should_display_hotness(cls):
# If max_hotness is 0 at the end, we assume hotness information is
# missing and no relative hotness information is displayed
return cls.max_hotness != 0
# Map function names to their source location for function where inlining happened
caller_loc = dict()
def __getattr__(self, name):
# If hotness is missing, assume 0
if name == 'Hotness':
return 0
raise AttributeError
@property
def File(self):
return self.DebugLoc['File']
@property
def Line(self):
return int(self.DebugLoc['Line'])
@property
def Column(self):
return self.DebugLoc['Column']
@property
def DebugLocString(self):
return "{}:{}:{}".format(self.File, self.Line, self.Column)
@property
def DemangledFunctionName(self):
return demangle(self.Function)
@classmethod
def make_link(cls, File, Line):
return "{}#L{}".format(SourceFileRenderer.html_file_name(File), Line)
@property
def Link(self):
return Remark.make_link(self.File, self.Line)
def getArgString(self, mapping):
mapping = mapping.copy()
dl = mapping.get('DebugLoc')
if dl:
del mapping['DebugLoc']
assert(len(mapping) == 1)
(key, value) = mapping.items()[0]
if key == 'Caller' or key == 'Callee':
value = demangle(value)
if dl and key != 'Caller':
return "<a href={}>{}</a>".format(
Remark.make_link(dl['File'], dl['Line']), value)
else:
return value
@property
def message(self):
# Args is a list of mappings (dictionaries)
values = [self.getArgString(mapping) for mapping in self.Args]
return "".join(values)
@property
def RelativeHotness(self):
if Remark.should_display_hotness():
return "{}%".format(int(round(self.Hotness * 100 / Remark.max_hotness)))
else:
return ''
@property
def key(self):
return (self.__class__, self.Pass, self.Name, self.File, self.Line, self.Column, self.message)
class Analysis(Remark):
yaml_tag = '!Analysis'
@property
def color(self):
return "white"
class AnalysisFPCommute(Analysis):
yaml_tag = '!AnalysisFPCommute'
class AnalysisAliasing(Analysis):
yaml_tag = '!AnalysisAliasing'
class Passed(Remark):
yaml_tag = '!Passed'
@property
def color(self):
return "green"
class Missed(Remark):
yaml_tag = '!Missed'
@property
def color(self):
return "red"
class SourceFileRenderer:
def __init__(self, filename):
existing_filename = None
if os.path.exists(filename):
existing_filename = filename
else:
fn = os.path.join(args.source_dir, filename)
if os.path.exists(fn):
existing_filename = fn
self.stream = open(os.path.join(args.output_dir, SourceFileRenderer.html_file_name(filename)), 'w')
if existing_filename:
self.source_stream = open(existing_filename)
else:
self.source_stream = None
print('''
<html>
<h1>Unable to locate file {}</h1>
</html>
'''.format(filename), file=self.stream)
self.html_formatter = HtmlFormatter()
self.cpp_lexer = CppLexer()
def render_source_line(self, linenum, line):
html_line = highlight(line, self.cpp_lexer, self.html_formatter)
print('''
<tr>
<td><a name=\"L{linenum}\">{linenum}</a></td>
<td></td>
<td></td>
<td>{html_line}</td>
</tr>'''.format(**locals()), file=self.stream)
def render_inline_remarks(self, r, line):
inlining_context = r.DemangledFunctionName
dl = Remark.caller_loc.get(r.Function)
if dl:
link = Remark.make_link(dl['File'], dl['Line'] - 2)
inlining_context = "<a href={link}>{r.DemangledFunctionName}</a>".format(**locals())
# Column is the number of characters *including* tabs, keep those and
# replace everything else with spaces.
indent = line[:r.Column - 1]
indent = re.sub('\S', ' ', indent)
print('''
<tr>
<td></td>
<td>{r.RelativeHotness}</td>
<td class=\"column-entry-{r.color}\">{r.Pass}</td>
<td><pre style="display:inline">{indent}</pre><span class=\"column-entry-yellow\"> {r.message} </span></td>
<td class=\"column-entry-yellow\">{inlining_context}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, line_remarks):
if not self.source_stream:
return
print('''
<html>
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table>
<tr>
<td>Line</td>
<td>Hotness</td>
<td>Optimization</td>
<td>Source</td>
<td>Inline Context</td>
</tr>''', file=self.stream)
for (linenum, line) in enumerate(self.source_stream.readlines(), start=1):
self.render_source_line(linenum, line)
for remark in line_remarks.get(linenum, []):
self.render_inline_remarks(remark, line)
print('''
</table>
</body>
</html>''', file=self.stream)
@classmethod
def html_file_name(cls, filename):
return filename.replace('/', '_') + ".html"
class IndexRenderer:
def __init__(self):
self.stream = open(os.path.join(args.output_dir, 'index.html'), 'w')
def render_entry(self, r):
print('''
<tr>
<td><a href={r.Link}>{r.DebugLocString}</a></td>
<td>{r.RelativeHotness}</td>
<td>{r.DemangledFunctionName}</td>
<td class=\"column-entry-{r.color}\">{r.Pass}</td>
</tr>'''.format(**locals()), file=self.stream)
def render(self, all_remarks):
print('''
<html>
<head>
<link rel='stylesheet' type='text/css' href='style.css'>
</head>
<body>
<div class="centered">
<table>
<tr>
<td>Source Location</td>
<td>Hotness</td>
<td>Function</td>
<td>Pass</td>
</tr>''', file=self.stream)
for remark in all_remarks:
self.render_entry(remark)
print('''
</table>
</body>
</html>''', file=self.stream)
all_remarks = dict()
file_remarks = dict()
for input_file in args.yaml_files:
f = open(input_file)
docs = yaml.load_all(f, Loader=Loader)
for remark in docs:
# Avoid remarks withoug debug location or if they are duplicated
if not hasattr(remark, 'DebugLoc') or remark.key in all_remarks:
continue
all_remarks[remark.key] = remark
file_remarks.setdefault(remark.File, dict()).setdefault(remark.Line, []).append(remark)
Remark.max_hotness = max(Remark.max_hotness, remark.Hotness)
# Set up a map between function names and their source location for function where inlining happened
for remark in all_remarks.itervalues():
if type(remark) == Passed and remark.Pass == "inline" and remark.Name == "Inlined":
for arg in remark.Args:
caller = arg.get('Caller')
if caller:
Remark.caller_loc[caller] = arg['DebugLoc']
if Remark.should_display_hotness():
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: r.Hotness, reverse=True)
else:
sorted_remarks = sorted(all_remarks.itervalues(), key=lambda r: (r.File, r.Line, r.Column))
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
for (filename, remarks) in file_remarks.iteritems():
SourceFileRenderer(filename).render(remarks)
IndexRenderer().render(sorted_remarks)
shutil.copy(os.path.join(os.path.dirname(os.path.realpath(__file__)), "style.css"), args.output_dir)
| 8,866 | 26.796238 | 112 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/Target/ARM/analyze-match-table.py | #!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
| 2,129 | 33.354839 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit.py | #!/usr/bin/env python
from lit.main import main
if __name__=='__main__':
main()
| 86 | 11.428571 | 25 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/setup.py | import lit
import os
from setuptools import setup, find_packages
# setuptools expects to be invoked from within the directory of setup.py, but it
# is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "lit",
version = lit.__version__,
author = lit.__author__,
author_email = lit.__email__,
url = 'http://llvm.org',
license = 'BSD',
description = "A Software Testing Tool",
keywords = 'test C++ automatic discovery',
long_description = """\
*lit*
+++++
About
=====
*lit* is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. *lit* is
designed to be a lightweight testing tool with as simple a user interface as
possible.
Features
========
* Portable!
* Flexible test discovery.
* Parallel test execution.
* Support for multiple test formats and test suite designs.
Documentation
=============
The official *lit* documentation is in the man page, available online at the LLVM
Command Guide: http://llvm.org/cmds/lit.html.
Source
======
The *lit* source is available as part of LLVM, in the LLVM SVN repository:
http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit.
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
zip_safe = False,
packages = find_packages(),
entry_points = {
'console_scripts': [
'lit = lit:main',
],
}
)
| 1,878 | 23.089744 | 85 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shtest-shell.py | # Check the internal shell handling component of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-shell :: error-0.txt
# CHECK: *** TEST 'shtest-shell :: error-0.txt' FAILED ***
# CHECK: $ "not-a-real-command"
# CHECK: # command stderr:
# CHECK: 'not-a-real-command': command not found
# CHECK: error: command failed with exit status: 127
# CHECK: ***
# FIXME: The output here sucks.
#
# CHECK: FAIL: shtest-shell :: error-1.txt
# CHECK: *** TEST 'shtest-shell :: error-1.txt' FAILED ***
# CHECK: shell parser error on: 'echo "missing quote'
# CHECK: ***
# CHECK: FAIL: shtest-shell :: error-2.txt
# CHECK: *** TEST 'shtest-shell :: error-2.txt' FAILED ***
# CHECK: Unsupported redirect:
# CHECK: ***
# CHECK: PASS: shtest-shell :: redirects.txt
# CHECK: PASS: shtest-shell :: sequencing-0.txt
# CHECK: XFAIL: shtest-shell :: sequencing-1.txt
# CHECK: Failing Tests (3)
| 997 | 28.352941 | 67 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shtest-encoding.py | # RUN: true
# Here is a string that cannot be decoded in line mode: .
| 71 | 17 | 57 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shtest-timeout.py | # REQUIRES: python-psutil
# Test per test timeout using external shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/quick_then_slow.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: %{inputs}/shtest-timeout/slow.py \
# RUN: -j 1 -v --debug --timeout 1 --param external=1 > %t.extsh.out 2> %t.extsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.extsh.out %s
# RUN: FileCheck --check-prefix=CHECK-EXTSH-ERR < %t.extsh.err %s
#
# CHECK-EXTSH-ERR: Using external shell
# Test per test timeout using internal shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/quick_then_slow.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: %{inputs}/shtest-timeout/slow.py \
# RUN: -j 1 -v --debug --timeout 1 --param external=0 > %t.intsh.out 2> %t.intsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-OUT < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-ERR < %t.intsh.err %s
# CHECK-INTSH-OUT: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-INTSH-OUT: command output:
# CHECK-INTSH-OUT-NEXT: Running infinite loop
# CHECK-INTSH-OUT: command reached timeout: True
# CHECK-INTSH-OUT: TIMEOUT: per_test_timeout :: quick_then_slow.py
# CHECK-INTSH-OUT: Timeout: Reached timeout of 1 seconds
# CHECK-INTSH-OUT: Command Output
# CHECK-INTSH-OUT: command output:
# CHECK-INTSH-OUT-NEXT: Running in quick mode
# CHECK-INTSH-OUT: command reached timeout: False
# CHECK-INTSH-OUT: command output:
# CHECK-INTSH-OUT-NEXT: Running in slow mode
# CHECK-INTSH-OUT: command reached timeout: True
# CHECK-INTSH-OUT: TIMEOUT: per_test_timeout :: slow.py
# CHECK-INTSH-OUT: command output:
# CHECK-INTSH-OUT-NEXT: Running slow program
# CHECK-INTSH-OUT: command reached timeout: True
# CHECK-INTSH-ERR: Using internal shell
# Test per test timeout set via a config file rather than on the command line
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/quick_then_slow.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: %{inputs}/shtest-timeout/slow.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=1 > %t.cfgset.out 2> %t.cfgset.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.cfgset.out %s
# RUN: FileCheck --check-prefix=CHECK-CFGSET-ERR < %t.cfgset.err %s
#
# CHECK-CFGSET-ERR: Using internal shell
# CHECK-OUT-COMMON: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-OUT-COMMON: Timeout: Reached timeout of 1 seconds
# CHECK-OUT-COMMON: Command {{([0-9]+ )?}}Output
# CHECK-OUT-COMMON: Running infinite loop
# CHECK-OUT-COMMON: TIMEOUT: per_test_timeout :: quick_then_slow.py
# CHECK-OUT-COMMON: Timeout: Reached timeout of 1 seconds
# CHECK-OUT-COMMON: Command {{([0-9]+ )?}}Output
# CHECK-OUT-COMMON: Running in quick mode
# CHECK-OUT-COMMON: Running in slow mode
# CHECK-OUT-COMMON: PASS: per_test_timeout :: short.py
# CHECK-OUT-COMMON: TIMEOUT: per_test_timeout :: slow.py
# CHECK-OUT-COMMON: Timeout: Reached timeout of 1 seconds
# CHECK-OUT-COMMON: Command {{([0-9]+ )?}}Output
# CHECK-OUT-COMMON: Running slow program
# CHECK-OUT-COMMON: Expected Passes{{ *}}: 1
# CHECK-OUT-COMMON: Individual Timeouts{{ *}}: 3
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/quick_then_slow.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: %{inputs}/shtest-timeout/slow.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=1 --timeout=2 > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-OUT < %t.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-CMDLINE-OVERRIDE-OUT: Timeout: Reached timeout of 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: Command {{([0-9]+ )?}}Output
# CHECK-CMDLINE-OVERRIDE-OUT: Running infinite loop
# CHECK-CMDLINE-OVERRIDE-OUT: TIMEOUT: per_test_timeout :: quick_then_slow.py
# CHECK-CMDLINE-OVERRIDE-OUT: Timeout: Reached timeout of 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: Command {{([0-9]+ )?}}Output
# CHECK-CMDLINE-OVERRIDE-OUT: Running in quick mode
# CHECK-CMDLINE-OVERRIDE-OUT: Running in slow mode
# CHECK-CMDLINE-OVERRIDE-OUT: PASS: per_test_timeout :: short.py
# CHECK-CMDLINE-OVERRIDE-OUT: TIMEOUT: per_test_timeout :: slow.py
# CHECK-CMDLINE-OVERRIDE-OUT: Timeout: Reached timeout of 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: Command {{([0-9]+ )?}}Output
# CHECK-CMDLINE-OVERRIDE-OUT: Running slow program
# CHECK-CMDLINE-OVERRIDE-OUT: Expected Passes{{ *}}: 1
# CHECK-CMDLINE-OVERRIDE-OUT: Individual Timeouts{{ *}}: 3
| 5,009 | 42.189655 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shell-parsing.py | # Just run the ShUtil unit tests.
#
# RUN: %{python} -m lit.ShUtil
| 67 | 16 | 33 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/xunit-output.py | # Check xunit output
# RUN: %{lit} --xunit-xml-output %t.xunit.xml %{inputs}/test-data
# RUN: FileCheck < %t.xunit.xml %s
# CHECK: <?xml version="1.0" encoding="UTF-8" ?>
# CHECK: <testsuites>
# CHECK: <testsuite name='test-data' tests='1' failures='0'>
# CHECK: <testcase classname='test-data.test-data' name='metrics.ini' time='0.{{[0-9]+}}'/>
# CHECK: </testsuite>
# CHECK: </testsuites>
| 392 | 34.727273 | 91 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/discovery.py | # Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
#
# CHECK-BASIC-ERR: loading suite config '{{.*}}/discovery/lit.cfg'
# CHECK-BASIC-ERR-DAG: loading suite config '{{.*}}/discovery/subsuite/lit.cfg'
# CHECK-BASIC-ERR-DAG: loading local config '{{.*}}/discovery/subdir/lit.local.cfg'
#
# CHECK-BASIC-OUT: -- Test Suites --
# CHECK-BASIC-OUT: sub-suite - 2 tests
# CHECK-BASIC-OUT: Source Root: {{.*/discovery/subsuite$}}
# CHECK-BASIC-OUT: Exec Root : {{.*/discovery/subsuite$}}
# CHECK-BASIC-OUT: top-level-suite - 3 tests
# CHECK-BASIC-OUT: Source Root: {{.*/discovery$}}
# CHECK-BASIC-OUT: Exec Root : {{.*/discovery$}}
#
# CHECK-BASIC-OUT: -- Available Tests --
# CHECK-BASIC-OUT: sub-suite :: test-one
# CHECK-BASIC-OUT: sub-suite :: test-two
# CHECK-BASIC-OUT: top-level-suite :: subdir/test-three
# CHECK-BASIC-OUT: top-level-suite :: test-one
# CHECK-BASIC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-EXACT-TEST < %t.out %s
#
# CHECK-EXACT-TEST: -- Available Tests --
# CHECK-EXACT-TEST: sub-suite :: test-one
# CHECK-EXACT-TEST: top-level-suite :: subdir/test-three
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
# RUN: -j 1 --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
# CHECK-ASEXEC-ERR: loading suite config '{{.*}}/exec-discovery/lit.site.cfg'
# CHECK-ASEXEC-ERR: load_config from '{{.*}}/discovery/lit.cfg'
# CHECK-ASEXEC-ERR: loaded config '{{.*}}/discovery/lit.cfg'
# CHECK-ASEXEC-ERR: loaded config '{{.*}}/exec-discovery/lit.site.cfg'
# CHECK-ASEXEC-ERR-DAG: loading suite config '{{.*}}/discovery/subsuite/lit.cfg'
# CHECK-ASEXEC-ERR-DAG: loading local config '{{.*}}/discovery/subdir/lit.local.cfg'
#
# CHECK-ASEXEC-OUT: -- Test Suites --
# CHECK-ASEXEC-OUT: sub-suite - 2 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*/discovery/subsuite$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*/discovery/subsuite$}}
# CHECK-ASEXEC-OUT: top-level-suite - 3 tests
# CHECK-ASEXEC-OUT: Source Root: {{.*/discovery$}}
# CHECK-ASEXEC-OUT: Exec Root : {{.*/exec-discovery$}}
#
# CHECK-ASEXEC-OUT: -- Available Tests --
# CHECK-ASEXEC-OUT: sub-suite :: test-one
# CHECK-ASEXEC-OUT: sub-suite :: test-two
# CHECK-ASEXEC-OUT: top-level-suite :: subdir/test-three
# CHECK-ASEXEC-OUT: top-level-suite :: test-one
# CHECK-ASEXEC-OUT: top-level-suite :: test-two
# Check discovery when exact test names are given.
#
# FIXME: Note that using a path into a subsuite doesn't work correctly here.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-EXACT-TEST < %t.out %s
#
# CHECK-ASEXEC-EXACT-TEST: -- Available Tests --
# CHECK-ASEXEC-EXACT-TEST: top-level-suite :: subdir/test-three
# Check that we don't recurse infinitely when loading an site specific test
# suite located inside the test source root.
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
# RUN: -j 1 --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# CHECK-ASEXEC-INTREE-NEXT: Source Root: {{.*/exec-discovery-in-tree$}}
# CHECK-ASEXEC-INTREE-NEXT: Exec Root : {{.*/exec-discovery-in-tree/obj$}}
# CHECK-ASEXEC-INTREE-NEXT: -- Available Tests --
# CHECK-ASEXEC-INTREE-NEXT: exec-discovery-in-tree-suite :: test-one
| 4,093 | 40.77551 | 84 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shtest-format.py | # Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: shtest-format :: argv0.txt
# CHECK: FAIL: shtest-format :: external_shell/fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: Command Output (stderr):
# CHECK-NEXT: --
# CHECK-NEXT: cat: does-not-exist: No such file or directory
# CHECK: --
# CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED ***
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: a line with bad encoding:
# CHECK: --
# CHECK: PASS: shtest-format :: external_shell/pass.txt
# CHECK: FAIL: shtest-format :: fail.txt
# CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: printf "line 1
# CHECK-NEXT: false
# CHECK-NEXT: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output (stdout):
# CHECK-NEXT: --
# CHECK-NEXT: $ "printf"
# CHECK-NEXT: # command output:
# CHECK-NEXT: line 1: failed test output on stdout
# CHECK-NEXT: line 2: failed test output on stdout
# CHECK: UNRESOLVED: shtest-format :: no-test-line.txt
# CHECK: PASS: shtest-format :: pass.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-any-missing.txt
# CHECK: PASS: shtest-format :: requires-any-present.txt
# CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt
# CHECK: PASS: shtest-format :: requires-present.txt
# CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt
# CHECK: XFAIL: shtest-format :: xfail-feature.txt
# CHECK: XFAIL: shtest-format :: xfail-target.txt
# CHECK: XFAIL: shtest-format :: xfail.txt
# CHECK: XPASS: shtest-format :: xpass.txt
# CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED ***
# CHECK-NEXT: Script
# CHECK-NEXT: --
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Testing Time
# CHECK: Unexpected Passing Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Failing Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Expected Passes : 5
# CHECK: Expected Failures : 3
# CHECK: Unsupported Tests : 3
# CHECK: Unresolved Tests : 1
# CHECK: Unexpected Passes : 1
# CHECK: Unexpected Failures: 3
| 2,598 | 31.898734 | 94 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/max-failures.py | # Check the behavior of --max-failures option.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
# RUN: not %{lit} --max-failures=1 -j 1 -v %{inputs}/shtest-shell >> %t.out
# RUN: not %{lit} --max-failures=2 -j 1 -v %{inputs}/shtest-shell >> %t.out
# RUN: not %{lit} --max-failures=0 -j 1 -v %{inputs}/shtest-shell 2>> %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: Failing Tests (3)
# CHECK: Failing Tests (1)
# CHECK: Failing Tests (2)
# CHECK: error: Setting --max-failures to 0 does not have any effect.
| 526 | 34.133333 | 76 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/googletest-format.py | # Check the various features of the GoogleTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/FirstTest.subTestA
# CHECK: FAIL: googletest-format :: DummySubDir/OneTest/FirstTest.subTestB
# CHECK-NEXT: *** TEST 'googletest-format :: DummySubDir/OneTest/FirstTest.subTestB' FAILED ***
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: ***
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: DummySubDir/OneTest/ParameterizedTest/1.subTest
# CHECK: Failing Tests (1)
# CHECK: Expected Passes : 3
# CHECK: Unexpected Failures: 1
| 781 | 36.238095 | 95 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/googletest-upstream-format.py | # Check the various features of the GoogleTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-upstream-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-upstream-format :: DummySubDir/OneTest/FirstTest.subTestA
# CHECK: FAIL: googletest-upstream-format :: DummySubDir/OneTest/FirstTest.subTestB
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: DummySubDir/OneTest/FirstTest.subTestB' FAILED ***
# CHECK-NEXT: Running main() from gtest_main.cc
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: ***
# CHECK: PASS: googletest-upstream-format :: DummySubDir/OneTest/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-upstream-format :: DummySubDir/OneTest/ParameterizedTest/1.subTest
# CHECK: Failing Tests (1)
# CHECK: Expected Passes : 3
# CHECK: Unexpected Failures: 1
| 882 | 41.047619 | 104 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/unittest-adaptor.py | # Check the lit adaption to run under unittest.
#
# RUN: %{python} %s %{inputs}/unittest-adaptor 2> %t.err
# RUN: FileCheck < %t.err %s
#
# CHECK-DAG: unittest-adaptor :: test-two.txt ... FAIL
# CHECK-DAG: unittest-adaptor :: test-one.txt ... ok
import unittest
import sys
import lit
import lit.discovery
input_path = sys.argv[1]
unittest_suite = lit.discovery.load_test_suite([input_path])
runner = unittest.TextTestRunner(verbosity=2)
runner.run(unittest_suite)
| 467 | 23.631579 | 60 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/usage.py | # Basic sanity check that usage works.
#
# RUN: %{lit} --help > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: usage: lit.py [-h]
| 130 | 17.714286 | 38 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/googletest-timeout.py | # REQUIRES: python-psutil
# Check that the per test timeout is enforced when running GTest tests.
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout --timeout=1 > %t.cmd.out
# RUN: FileCheck < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --param set_timeout=1 > %t.cfgset.out 2> %t.cfgset.err
# RUN: FileCheck < %t.cfgset.out %s
# CHECK: -- Testing:
# CHECK: PASS: googletest-timeout :: DummySubDir/OneTest/FirstTest.subTestA
# CHECK: TIMEOUT: googletest-timeout :: DummySubDir/OneTest/FirstTest.subTestB
# CHECK: TIMEOUT: googletest-timeout :: DummySubDir/OneTest/FirstTest.subTestC
# CHECK: Expected Passes : 1
# CHECK: Individual Timeouts: 2
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
# RUN: --param set_timeout=1 --timeout=2 > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck < %t.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 2 seconds
| 1,241 | 40.4 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/test-data.py | # Test features related to formats which support reporting additional test data.
# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
# RUN: FileCheck < %t.out %s
# CHECK: -- Testing:
# CHECK: PASS: test-data :: metrics.ini
# CHECK-NEXT: *** TEST 'test-data :: metrics.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***
| 358 | 26.615385 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/progress-bar.py | # Check the simple progress bar.
#
# RUN: not %{lit} -j 1 -s %{inputs}/progress-bar > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: Testing: 0 .. 10.. 20
# CHECK: FAIL: shtest-shell :: test-1.txt (1 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40..
# CHECK: FAIL: shtest-shell :: test-2.txt (2 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70
# CHECK: FAIL: shtest-shell :: test-3.txt (3 of 4)
# CHECK: Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
# CHECK: FAIL: shtest-shell :: test-4.txt (4 of 4)
| 528 | 36.785714 | 68 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/shtest-output-printing.py | # Check the various features of the ShTest format.
#
# RUN: not %{lit} -j 1 -v %{inputs}/shtest-output-printing > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: FAIL: shtest-output-printing :: basic.txt
# CHECK-NEXT: *** TEST 'shtest-output-printing :: basic.txt' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK: --
# CHECK-NEXT: Exit Code: 1
#
# CHECK: Command Output
# CHECK-NEXT: --
# CHECK-NEXT: $ "true"
# CHECK-NEXT: $ "echo" "hi"
# CHECK-NEXT: # command output:
# CHECK-NEXT: hi
#
# CHECK: $ "wc" "missing-file"
# CHECK-NEXT: # redirected output from '{{.*}}/basic.txt.tmp.out':
# CHECK-NEXT: missing-file{{.*}} No such file or directory
# CHECK: note: command had no output on stdout or stderr
# CHECK-NEXT: error: command failed with exit status: 1
| 829 | 27.62069 | 71 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/test-output.py | # RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: }
# CHECK-NEXT: "name": "test-data :: metrics.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }
| 553 | 26.7 | 74 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/unit/TestRunner.py | # RUN: %{python} %s
#
# END.
import unittest
import platform
import os.path
import tempfile
import lit
from lit.TestRunner import ParserKind, IntegratedTestKeywordParser, \
parseIntegratedTestScript
class TestIntegratedTestKeywordParser(unittest.TestCase):
inputTestCase = None
@staticmethod
def load_keyword_parser_lit_tests():
"""
Create and load the LIT test suite and test objects used by
TestIntegratedTestKeywordParser
"""
# Create the global config object.
lit_config = lit.LitConfig.LitConfig(progname='lit',
path=[],
quiet=False,
useValgrind=False,
valgrindLeakCheck=False,
valgrindArgs=[],
noExecute=False,
debug=False,
isWindows=(
platform.system() == 'Windows'),
params={})
TestIntegratedTestKeywordParser.litConfig = lit_config
# Perform test discovery.
test_path = os.path.dirname(os.path.dirname(__file__))
inputs = [os.path.join(test_path, 'Inputs/testrunner-custom-parsers/')]
assert os.path.isdir(inputs[0])
run = lit.run.Run(lit_config,
lit.discovery.find_tests_for_inputs(lit_config, inputs))
assert len(run.tests) == 1 and "there should only be one test"
TestIntegratedTestKeywordParser.inputTestCase = run.tests[0]
@staticmethod
def make_parsers():
def custom_parse(line_number, line, output):
if output is None:
output = []
output += [part for part in line.split(' ') if part.strip()]
return output
return [
IntegratedTestKeywordParser("MY_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_DNE_TAG.", ParserKind.TAG),
IntegratedTestKeywordParser("MY_LIST:", ParserKind.LIST),
IntegratedTestKeywordParser("MY_RUN:", ParserKind.COMMAND),
IntegratedTestKeywordParser("MY_CUSTOM:", ParserKind.CUSTOM,
custom_parse)
]
@staticmethod
def get_parser(parser_list, keyword):
for p in parser_list:
if p.keyword == keyword:
return p
assert False and "parser not found"
@staticmethod
def parse_test(parser_list):
script = parseIntegratedTestScript(
TestIntegratedTestKeywordParser.inputTestCase,
additional_parsers=parser_list, require_script=False)
assert not isinstance(script, lit.Test.Result)
assert isinstance(script, list)
assert len(script) == 0
def test_tags(self):
parsers = self.make_parsers()
self.parse_test(parsers)
tag_parser = self.get_parser(parsers, 'MY_TAG.')
dne_tag_parser = self.get_parser(parsers, 'MY_DNE_TAG.')
self.assertTrue(tag_parser.getValue())
self.assertFalse(dne_tag_parser.getValue())
def test_lists(self):
parsers = self.make_parsers()
self.parse_test(parsers)
list_parser = self.get_parser(parsers, 'MY_LIST:')
self.assertItemsEqual(list_parser.getValue(),
['one', 'two', 'three', 'four'])
def test_commands(self):
parsers = self.make_parsers()
self.parse_test(parsers)
cmd_parser = self.get_parser(parsers, 'MY_RUN:')
value = cmd_parser.getValue()
self.assertEqual(len(value), 2) # there are only two run lines
self.assertEqual(value[0].strip(), 'baz')
self.assertEqual(value[1].strip(), 'foo bar')
def test_custom(self):
parsers = self.make_parsers()
self.parse_test(parsers)
custom_parser = self.get_parser(parsers, 'MY_CUSTOM:')
value = custom_parser.getValue()
self.assertItemsEqual(value, ['a', 'b', 'c'])
if __name__ == '__main__':
TestIntegratedTestKeywordParser.load_keyword_parser_lit_tests()
unittest.main(verbosity=2)
| 4,358 | 36.904348 | 82 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/unit/ShUtil.py | # RUN: %{python} %s
import unittest
from lit.ShUtil import Command, Pipeline, Seq, ShLexer, ShParser
class TestShLexer(unittest.TestCase):
def lex(self, str, *args, **kwargs):
return list(ShLexer(str, *args, **kwargs).lex())
def test_basic(self):
self.assertEqual(self.lex('a|b>c&d<e;f'),
['a', ('|',), 'b', ('>',), 'c', ('&',), 'd',
('<',), 'e', (';',), 'f'])
def test_redirection_tokens(self):
self.assertEqual(self.lex('a2>c'),
['a2', ('>',), 'c'])
self.assertEqual(self.lex('a 2>c'),
['a', ('>',2), 'c'])
def test_quoting(self):
self.assertEqual(self.lex(""" 'a' """),
['a'])
self.assertEqual(self.lex(""" "hello\\"world" """),
['hello"world'])
self.assertEqual(self.lex(""" "hello\\'world" """),
["hello\\'world"])
self.assertEqual(self.lex(""" "hello\\\\world" """),
["hello\\world"])
self.assertEqual(self.lex(""" he"llo wo"rld """),
["hello world"])
self.assertEqual(self.lex(""" a\\ b a\\\\b """),
["a b", "a\\b"])
self.assertEqual(self.lex(""" "" "" """),
["", ""])
self.assertEqual(self.lex(""" a\\ b """, win32Escapes = True),
['a\\', 'b'])
class TestShParse(unittest.TestCase):
def parse(self, str):
return ShParser(str).parse()
def test_basic(self):
self.assertEqual(self.parse('echo hello'),
Pipeline([Command(['echo', 'hello'], [])], False))
self.assertEqual(self.parse('echo ""'),
Pipeline([Command(['echo', ''], [])], False))
self.assertEqual(self.parse("""echo -DFOO='a'"""),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
self.assertEqual(self.parse('echo -DFOO="a"'),
Pipeline([Command(['echo', '-DFOO=a'], [])], False))
def test_redirection(self):
self.assertEqual(self.parse('echo hello > c'),
Pipeline([Command(['echo', 'hello'],
[((('>'),), 'c')])], False))
self.assertEqual(self.parse('echo hello > c >> d'),
Pipeline([Command(['echo', 'hello'], [(('>',), 'c'),
(('>>',), 'd')])], False))
self.assertEqual(self.parse('a 2>&1'),
Pipeline([Command(['a'], [(('>&',2), '1')])], False))
def test_pipeline(self):
self.assertEqual(self.parse('a | b'),
Pipeline([Command(['a'], []),
Command(['b'], [])],
False))
self.assertEqual(self.parse('a | b | c'),
Pipeline([Command(['a'], []),
Command(['b'], []),
Command(['c'], [])],
False))
def test_list(self):
self.assertEqual(self.parse('a ; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a & b'),
Seq(Pipeline([Command(['a'], [])], False),
'&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b'),
Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a || b'),
Seq(Pipeline([Command(['a'], [])], False),
'||',
Pipeline([Command(['b'], [])], False)))
self.assertEqual(self.parse('a && b || c'),
Seq(Seq(Pipeline([Command(['a'], [])], False),
'&&',
Pipeline([Command(['b'], [])], False)),
'||',
Pipeline([Command(['c'], [])], False)))
self.assertEqual(self.parse('a; b'),
Seq(Pipeline([Command(['a'], [])], False),
';',
Pipeline([Command(['b'], [])], False)))
if __name__ == '__main__':
unittest.main()
| 4,682 | 40.442478 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/Inputs/shtest-timeout/short.py | # RUN: %{python} %s
from __future__ import print_function
print("short program")
| 82 | 15.6 | 37 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/Inputs/shtest-timeout/infinite_loop.py | # RUN: %{python} %s
from __future__ import print_function
import sys
print("Running infinite loop")
sys.stdout.flush() # Make sure the print gets flushed so it appears in lit output.
while True:
pass
| 206 | 19.7 | 82 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/Inputs/shtest-timeout/slow.py | # RUN: %{python} %s
from __future__ import print_function
import time
import sys
print("Running slow program")
sys.stdout.flush() # Make sure the print gets flushed so it appears in lit output.
time.sleep(6)
| 210 | 20.1 | 82 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/Inputs/shtest-timeout/quick_then_slow.py | # RUN: %{python} %s quick
# RUN: %{python} %s slow
from __future__ import print_function
import time
import sys
if len(sys.argv) != 2:
print("Wrong number of args")
sys.exit(1)
mode = sys.argv[1]
if mode == 'slow':
print("Running in slow mode")
sys.stdout.flush() # Make sure the print gets flushed so it appears in lit output.
time.sleep(6)
sys.exit(0)
elif mode == 'quick':
print("Running in quick mode")
sys.exit(0)
else:
print("Unrecognised mode {}".format(mode))
sys.exit(1)
| 525 | 20.04 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/tests/Inputs/discovery/subdir/test-three.py | # RUN: true
| 12 | 5.5 | 11 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/LitTestCase.py | from __future__ import absolute_import
import unittest
import lit.Test
"""
TestCase adaptor for providing a 'unittest' compatible interface to 'lit' tests.
"""
class UnresolvedError(RuntimeError):
pass
class LitTestCase(unittest.TestCase):
def __init__(self, test, run):
unittest.TestCase.__init__(self)
self._test = test
self._run = run
def id(self):
return self._test.getFullName()
def shortDescription(self):
return self._test.getFullName()
def runTest(self):
# Run the test.
self._run.execute_test(self._test)
# Adapt the result to unittest.
result = self._test.result
if result.code is lit.Test.UNRESOLVED:
raise UnresolvedError(result.output)
elif result.code.isFailure:
self.fail(result.output)
| 850 | 23.314286 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/main.py | #!/usr/bin/env python
"""
lit - LLVM Integrated Tester.
See lit.pod for more information.
"""
from __future__ import absolute_import
import os
import platform
import random
import re
import sys
import time
import argparse
import tempfile
import shutil
import lit.ProgressBar
import lit.LitConfig
import lit.Test
import lit.run
import lit.util
import lit.discovery
class TestingProgressDisplay(object):
def __init__(self, opts, numTests, progressBar=None):
self.opts = opts
self.numTests = numTests
self.progressBar = progressBar
self.completed = 0
def finish(self):
if self.progressBar:
self.progressBar.clear()
elif self.opts.quiet:
pass
elif self.opts.succinct:
sys.stdout.write('\n')
def update(self, test):
self.completed += 1
if self.opts.incremental:
update_incremental_cache(test)
if self.progressBar:
self.progressBar.update(float(self.completed)/self.numTests,
test.getFullName())
shouldShow = test.result.code.isFailure or \
self.opts.showAllOutput or \
(not self.opts.quiet and not self.opts.succinct)
if not shouldShow:
return
if self.progressBar:
self.progressBar.clear()
# Show the test result line.
test_name = test.getFullName()
print('%s: %s (%d of %d)' % (test.result.code.name, test_name,
self.completed, self.numTests))
# Show the test failure output, if requested.
if (test.result.code.isFailure and self.opts.showOutput) or \
self.opts.showAllOutput:
if test.result.code.isFailure:
print("%s TEST '%s' FAILED %s" % ('*'*20, test.getFullName(),
'*'*20))
print(test.result.output)
print("*" * 20)
# Report test metrics, if present.
if test.result.metrics:
print("%s TEST '%s' RESULTS %s" % ('*'*10, test.getFullName(),
'*'*10))
items = sorted(test.result.metrics.items())
for metric_name, value in items:
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Ensure the output is flushed.
sys.stdout.flush()
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def sort_by_incremental_cache(run):
def sortIndex(test):
fname = test.getFilePath()
try:
return -os.path.getmtime(fname)
except:
return 0
run.tests.sort(key = lambda t: sortIndex(t))
def main(builtinParameters = {}):
# Create a temp directory inside the normal temp directory so that we can
# try to avoid temporary test file leaks. The user can avoid this behavior
# by setting LIT_PRESERVES_TMP in the environment, so they can easily use
# their own temp directory to monitor temporary file leaks or handle them at
# the buildbot level.
lit_tmp = None
if 'LIT_PRESERVES_TMP' not in os.environ:
lit_tmp = tempfile.mkdtemp(prefix="lit_tmp_")
os.environ.update({
'TMPDIR': lit_tmp,
'TMP': lit_tmp,
'TEMP': lit_tmp,
'TEMPDIR': lit_tmp,
})
# FIXME: If Python does not exit cleanly, this directory will not be cleaned
# up. We should consider writing the lit pid into the temp directory,
# scanning for stale temp directories, and deleting temp directories whose
# lit process has died.
try:
main_with_tmp(builtinParameters)
finally:
if lit_tmp:
shutil.rmtree(lit_tmp)
def main_with_tmp(builtinParameters):
parser = argparse.ArgumentParser()
parser.add_argument('test_paths',
nargs='*',
help='Files or paths to include in the test suite')
parser.add_argument("--version", dest="show_version",
help="Show version and exit",
action="store_true", default=False)
parser.add_argument("-j", "--threads", dest="numThreads", metavar="N",
help="Number of testing threads",
type=int, default=None)
parser.add_argument("--config-prefix", dest="configPrefix",
metavar="NAME", help="Prefix for 'lit' config files",
action="store", default=None)
parser.add_argument("-D", "--param", dest="userParameters",
metavar="NAME=VAL",
help="Add 'NAME' = 'VAL' to the user defined parameters",
type=str, action="append", default=[])
format_group = parser.add_argument_group("Output Format")
# FIXME: I find these names very confusing, although I like the
# functionality.
format_group.add_argument("-q", "--quiet",
help="Suppress no error output",
action="store_true", default=False)
format_group.add_argument("-s", "--succinct",
help="Reduce amount of output",
action="store_true", default=False)
format_group.add_argument("-v", "--verbose", dest="showOutput",
help="Show test output for failures",
action="store_true", default=False)
format_group.add_argument("-a", "--show-all", dest="showAllOutput",
help="Display all commandlines and output",
action="store_true", default=False)
format_group.add_argument("-o", "--output", dest="output_path",
help="Write test results to the provided path",
action="store", metavar="PATH")
format_group.add_argument("--no-progress-bar", dest="useProgressBar",
help="Do not use curses based progress bar",
action="store_false", default=True)
format_group.add_argument("--show-unsupported",
help="Show unsupported tests",
action="store_true", default=False)
format_group.add_argument("--show-xfail",
help="Show tests that were expected to fail",
action="store_true", default=False)
execution_group = parser.add_argument_group("Test Execution")
execution_group.add_argument("--path",
help="Additional paths to add to testing environment",
action="append", type=str, default=[])
execution_group.add_argument("--vg", dest="useValgrind",
help="Run tests under valgrind",
action="store_true", default=False)
execution_group.add_argument("--vg-leak", dest="valgrindLeakCheck",
help="Check for memory leaks under valgrind",
action="store_true", default=False)
execution_group.add_argument("--vg-arg", dest="valgrindArgs", metavar="ARG",
help="Specify an extra argument for valgrind",
type=str, action="append", default=[])
execution_group.add_argument("--time-tests", dest="timeTests",
help="Track elapsed wall time for each test",
action="store_true", default=False)
execution_group.add_argument("--no-execute", dest="noExecute",
help="Don't execute any tests (assume PASS)",
action="store_true", default=False)
execution_group.add_argument("--xunit-xml-output", dest="xunit_output_file",
help=("Write XUnit-compatible XML test reports to the"
" specified file"), default=None)
execution_group.add_argument("--timeout", dest="maxIndividualTestTime",
help="Maximum time to spend running a single test (in seconds)."
"0 means no time limit. [Default: 0]",
type=int, default=None)
execution_group.add_argument("--max-failures", dest="maxFailures",
help="Stop execution after the given number of failures.",
action="store", type=int, default=None)
selection_group = parser.add_argument_group("Test Selection")
selection_group.add_argument("--max-tests", dest="maxTests", metavar="N",
help="Maximum number of tests to run",
action="store", type=int, default=None)
selection_group.add_argument("--max-time", dest="maxTime", metavar="N",
help="Maximum time to spend testing (in seconds)",
action="store", type=float, default=None)
selection_group.add_argument("--shuffle",
help="Run tests in random order",
action="store_true", default=False)
selection_group.add_argument("-i", "--incremental",
help="Run modified and failing tests first (updates "
"mtimes)",
action="store_true", default=False)
selection_group.add_argument("--filter", metavar="REGEX",
help=("Only run tests with paths matching the given "
"regular expression"),
action="store", default=None)
debug_group = parser.add_argument_group("Debug and Experimental Options")
debug_group.add_argument("--debug",
help="Enable debugging (for 'lit' development)",
action="store_true", default=False)
debug_group.add_argument("--show-suites", dest="showSuites",
help="Show discovered test suites",
action="store_true", default=False)
debug_group.add_argument("--show-tests", dest="showTests",
help="Show all discovered tests",
action="store_true", default=False)
debug_group.add_argument("--use-processes", dest="useProcesses",
help="Run tests in parallel with processes (not threads)",
action="store_true", default=True)
debug_group.add_argument("--use-threads", dest="useProcesses",
help="Run tests in parallel with threads (not processes)",
action="store_false", default=True)
opts = parser.parse_args()
args = opts.test_paths
if opts.show_version:
print("lit %s" % (lit.__version__,))
return
if not args:
parser.error('No inputs specified')
if opts.numThreads is None:
opts.numThreads = lit.util.detectCPUs()
if opts.maxFailures == 0:
parser.error("Setting --max-failures to 0 does not have any effect.")
inputs = args
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
# Decide what the requested maximum indvidual test time should be
if opts.maxIndividualTestTime is not None:
maxIndividualTestTime = opts.maxIndividualTestTime
else:
# Default is zero
maxIndividualTestTime = 0
isWindows = platform.system() == 'Windows'
# Create the global config object.
litConfig = lit.LitConfig.LitConfig(
progname = os.path.basename(sys.argv[0]),
path = opts.path,
quiet = opts.quiet,
useValgrind = opts.useValgrind,
valgrindLeakCheck = opts.valgrindLeakCheck,
valgrindArgs = opts.valgrindArgs,
noExecute = opts.noExecute,
debug = opts.debug,
isWindows = isWindows,
params = userParams,
config_prefix = opts.configPrefix,
maxIndividualTestTime = maxIndividualTestTime,
maxFailures = opts.maxFailures)
# Perform test discovery.
run = lit.run.Run(litConfig,
lit.discovery.find_tests_for_inputs(litConfig, inputs))
# After test discovery the configuration might have changed
# the maxIndividualTestTime. If we explicitly set this on the
# command line then override what was set in the test configuration
if opts.maxIndividualTestTime is not None:
if opts.maxIndividualTestTime != litConfig.maxIndividualTestTime:
litConfig.note(('The test suite configuration requested an individual'
' test timeout of {0} seconds but a timeout of {1} seconds was'
' requested on the command line. Forcing timeout to be {1}'
' seconds')
.format(litConfig.maxIndividualTestTime,
opts.maxIndividualTestTime))
litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
if ts.config.available_features:
print(' Available Features : %s' % ' '.join(
sorted(ts.config.available_features)))
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
# Select and order the tests.
numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
sort_by_incremental_cache(run)
else:
run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
# Finally limit the number of tests, if desired.
if opts.maxTests is not None:
run.tests = run.tests[:opts.maxTests]
# Don't create more threads than tests.
opts.numThreads = min(len(run.tests), opts.numThreads)
# Because some tests use threads internally, and at least on Linux each
# of these threads counts toward the current process limit, try to
# raise the (soft) process limit so that tests don't fail due to
# resource exhaustion.
try:
cpus = lit.util.detectCPUs()
desired_limit = opts.numThreads * cpus * 2 # the 2 is a safety factor
# Import the resource module here inside this try block because it
# will likely fail on Windows.
import resource
max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
desired_limit = min(desired_limit, max_procs_hard)
if max_procs_soft < desired_limit:
resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
litConfig.note('raised the process limit from %d to %d' % \
(max_procs_soft, desired_limit))
except:
pass
extra = ''
if len(run.tests) != numTotalTests:
extra = ' of %d' % numTotalTests
header = '-- Testing: %d%s tests, %d threads --'%(len(run.tests), extra,
opts.numThreads)
progressBar = None
if not opts.quiet:
if opts.succinct and opts.useProgressBar:
try:
tc = lit.ProgressBar.TerminalController()
progressBar = lit.ProgressBar.ProgressBar(tc, header)
except ValueError:
print(header)
progressBar = lit.ProgressBar.SimpleProgressBar('Testing: ')
else:
print(header)
startTime = time.time()
display = TestingProgressDisplay(opts, len(run.tests), progressBar)
try:
run.execute_tests(display, opts.numThreads, opts.maxTime,
opts.useProcesses)
except KeyboardInterrupt:
sys.exit(2)
display.finish()
testing_time = time.time() - startTime
if not opts.quiet:
print('Testing Time: %.2fs' % (testing_time,))
# Write out the test data, if requested.
if opts.output_path is not None:
write_test_results(run, litConfig, testing_time, opts.output_path)
# List test results organized by kind.
hasFailures = False
byCode = {}
for test in run.tests:
if test.result.code not in byCode:
byCode[test.result.code] = []
byCode[test.result.code].append(test)
if test.result.code.isFailure:
hasFailures = True
# Print each test in any of the failing groups.
for title,code in (('Unexpected Passing Tests', lit.Test.XPASS),
('Failing Tests', lit.Test.FAIL),
('Unresolved Tests', lit.Test.UNRESOLVED),
('Unsupported Tests', lit.Test.UNSUPPORTED),
('Expected Failing Tests', lit.Test.XFAIL),
('Timed Out Tests', lit.Test.TIMEOUT)):
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported) or \
(lit.Test.UNRESOLVED == code and (opts.maxFailures is not None)):
continue
elts = byCode.get(code)
if not elts:
continue
print('*'*20)
print('%s (%d):' % (title, len(elts)))
for test in elts:
print(' %s' % test.getFullName())
sys.stdout.write('\n')
if opts.timeTests and run.tests:
# Order by time.
test_times = [(test.getFullName(), test.result.elapsed)
for test in run.tests]
lit.util.printHistogram(test_times, title='Tests')
for name,code in (('Expected Passes ', lit.Test.PASS),
('Passes With Retry ', lit.Test.FLAKYPASS),
('Expected Failures ', lit.Test.XFAIL),
('Unsupported Tests ', lit.Test.UNSUPPORTED),
('Unresolved Tests ', lit.Test.UNRESOLVED),
('Unexpected Passes ', lit.Test.XPASS),
('Unexpected Failures', lit.Test.FAIL),
('Individual Timeouts', lit.Test.TIMEOUT)):
if opts.quiet and not code.isFailure:
continue
N = len(byCode.get(code,[]))
if N:
print(' %s: %d' % (name,N))
if opts.xunit_output_file:
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
safe_suite_name = suite_name.replace(".", "-")
xunit_output_file.write("<testsuite name='" + safe_suite_name + "'")
xunit_output_file.write(" tests='" + str(suite['passes'] +
suite['failures']) + "'")
xunit_output_file.write(" failures='" + str(suite['failures']) +
"'>\n")
for result_test in suite['tests']:
xunit_output_file.write(result_test.getJUnitXML() + "\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
sys.stderr.write('\n%d error(s), exiting.\n' % litConfig.numErrors)
sys.exit(2)
# Warn about warnings.
if litConfig.numWarnings:
sys.stderr.write('\n%d warning(s) in tests.\n' % litConfig.numWarnings)
if hasFailures:
sys.exit(1)
sys.exit(0)
if __name__=='__main__':
main()
| 22,727 | 39.513369 | 86 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/ProgressBar.py | #!/usr/bin/env python
# Source: http://code.activestate.com/recipes/475116/, with
# modifications by Daniel Dunbar.
import sys, re, time
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
class TerminalController:
"""
A class that can be used to portably generate formatted output to
a terminal.
`TerminalController` defines a set of instance variables whose
values are initialized to the control sequence necessary to
perform a given action. These can be simply included in normal
output to the terminal:
>>> term = TerminalController()
>>> print('This is '+term.GREEN+'green'+term.NORMAL)
Alternatively, the `render()` method can used, which replaces
'${action}' with the string required to perform 'action':
>>> term = TerminalController()
>>> print(term.render('This is ${GREEN}green${NORMAL}'))
If the terminal doesn't support a given action, then the value of
the corresponding instance variable will be set to ''. As a
result, the above code will still work on terminals that do not
support color, except that their output will not be colored.
Also, this means that you can test whether the terminal supports a
given action by simply testing the truth value of the
corresponding instance variable:
>>> term = TerminalController()
>>> if term.CLEAR_SCREEN:
... print('This terminal supports clearning the screen.')
Finally, if the width and height of the terminal are known, then
they will be stored in the `COLS` and `LINES` attributes.
"""
# Cursor movement:
BOL = '' #: Move the cursor to the beginning of the line
UP = '' #: Move the cursor up one line
DOWN = '' #: Move the cursor down one line
LEFT = '' #: Move the cursor left one char
RIGHT = '' #: Move the cursor right one char
# Deletion:
CLEAR_SCREEN = '' #: Clear the screen and move to home position
CLEAR_EOL = '' #: Clear to the end of the line.
CLEAR_BOL = '' #: Clear to the beginning of the line.
CLEAR_EOS = '' #: Clear to the end of the screen
# Output modes:
BOLD = '' #: Turn on bold mode
BLINK = '' #: Turn on blink mode
DIM = '' #: Turn on half-bright mode
REVERSE = '' #: Turn on reverse-video mode
NORMAL = '' #: Turn off all modes
# Cursor display:
HIDE_CURSOR = '' #: Make the cursor invisible
SHOW_CURSOR = '' #: Make the cursor visible
# Terminal size:
COLS = None #: Width of the terminal (None for unknown)
LINES = None #: Height of the terminal (None for unknown)
# Foreground colors:
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
# Background colors:
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
_STRING_CAPABILITIES = """
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
def __init__(self, term_stream=sys.stdout):
"""
Create a `TerminalController` and initialize its attributes
with appropriate values for the current terminal.
`term_stream` is the stream that will be used for terminal
output; if this stream is not a tty, then the terminal is
assumed to be a dumb terminal (i.e., have no capabilities).
"""
# Curses isn't available on all platforms
try: import curses
except: return
# If the stream isn't a tty, then assume it has no capabilities.
if not term_stream.isatty(): return
# Check the terminal type. If we fail, then assume that the
# terminal has no capabilities.
try: curses.setupterm()
except: return
# Look up numeric capabilities.
self.COLS = curses.tigetnum('cols')
self.LINES = curses.tigetnum('lines')
self.XN = curses.tigetflag('xenl')
# Look up string capabilities.
for capability in self._STRING_CAPABILITIES:
(attrib, cap_name) = capability.split('=')
setattr(self, attrib, self._tigetstr(cap_name) or '')
# Colors
set_fg = self._tigetstr('setf')
if set_fg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, color, self._tparm(set_fg, i))
set_fg_ansi = self._tigetstr('setaf')
if set_fg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, color, self._tparm(set_fg_ansi, i))
set_bg = self._tigetstr('setb')
if set_bg:
for i,color in zip(range(len(self._COLORS)), self._COLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg, i))
set_bg_ansi = self._tigetstr('setab')
if set_bg_ansi:
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
def _tparm(self, arg, index):
import curses
return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
def _tigetstr(self, cap_name):
# String capabilities can include "delays" of the form "$<2>".
# For any modern terminal, we should be able to just ignore
# these, so strip them out.
import curses
cap = curses.tigetstr(cap_name)
if cap is None:
cap = ''
else:
cap = cap.decode('utf-8')
return re.sub(r'\$<\d+>[/*]?', '', cap)
def render(self, template):
"""
Replace each $-substitutions in the given template string with
the corresponding terminal control string (if it's defined) or
'' (if it's not).
"""
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
def _render_sub(self, match):
s = match.group()
if s == '$$': return s
else: return getattr(self, s[2:-1])
#######################################################################
# Example use case: progress bar
#######################################################################
class SimpleProgressBar:
"""
A simple progress bar which doesn't need any terminal support.
This prints out a progress bar like:
'Header: 0 .. 10.. 20.. ...'
"""
def __init__(self, header):
self.header = header
self.atIndex = None
def update(self, percent, message):
if self.atIndex is None:
sys.stdout.write(self.header)
self.atIndex = 0
next = int(percent*50)
if next == self.atIndex:
return
for i in range(self.atIndex, next):
idx = i % 5
if idx == 0:
sys.stdout.write('%-2d' % (i*2))
elif idx == 1:
pass # Skip second char
elif idx < 4:
sys.stdout.write('.')
else:
sys.stdout.write(' ')
sys.stdout.flush()
self.atIndex = next
def clear(self):
if self.atIndex is not None:
sys.stdout.write('\n')
sys.stdout.flush()
self.atIndex = None
class ProgressBar:
"""
A 3-line progress bar, which looks like::
Header
20% [===========----------------------------------]
progress message
The progress bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
BAR = '%s${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}%s'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
def __init__(self, term, header, useETA=True):
self.term = term
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
raise ValueError("Terminal isn't capable enough -- you "
"should use a simpler progress dispaly.")
self.BOL = self.term.BOL # BoL from col#79
self.XNL = "\n" # Newline from col#79
if self.term.COLS:
self.width = self.term.COLS
if not self.term.XN:
self.BOL = self.term.UP + self.term.BOL
self.XNL = "" # Cursor must be fed to the next line
else:
self.width = 75
self.bar = term.render(self.BAR)
self.header = self.term.render(self.HEADER % header.center(self.width))
self.cleared = 1 #: true if we haven't drawn the bar yet.
self.useETA = useETA
if self.useETA:
self.startTime = time.time()
self.update(0, '')
def update(self, percent, message):
if self.cleared:
sys.stdout.write(self.header)
self.cleared = 0
prefix = '%3d%% ' % (percent*100,)
suffix = ''
if self.useETA:
elapsed = time.time() - self.startTime
if percent > .0001 and elapsed > 1:
total = elapsed / percent
eta = int(total - elapsed)
h = eta//3600.
m = (eta//60) % 60
s = eta % 60
suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
barWidth = self.width - len(prefix) - len(suffix) - 2
n = int(barWidth*percent)
if len(message) < self.width:
message = message + ' '*(self.width - len(message))
else:
message = '... ' + message[-(self.width-4):]
sys.stdout.write(
self.BOL + self.term.UP + self.term.CLEAR_EOL +
(self.bar % (prefix, '='*n, '-'*(barWidth-n), suffix)) +
self.XNL +
self.term.CLEAR_EOL + message)
if not self.term.XN:
sys.stdout.flush()
def clear(self):
if not self.cleared:
sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
sys.stdout.flush()
self.cleared = 1
def test():
tc = TerminalController()
p = ProgressBar(tc, 'Tests')
for i in range(101):
p.update(i/100., str(i))
time.sleep(.3)
if __name__=='__main__':
test()
| 10,717 | 35.705479 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/TestingConfig.py | import os
import sys
class TestingConfig:
""""
TestingConfig - Information on the tests inside a suite.
"""
@staticmethod
def fromdefaults(litConfig):
"""
fromdefaults(litConfig) -> TestingConfig
Create a TestingConfig object with default values.
"""
# Set the environment based on the command line arguments.
environment = {
'PATH' : os.pathsep.join(litConfig.path +
[os.environ.get('PATH','')]),
'LLVM_DISABLE_CRASH_REPORT' : '1',
}
pass_vars = ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'SYSTEMROOT', 'TERM',
'LD_PRELOAD', 'ASAN_OPTIONS', 'UBSAN_OPTIONS',
'LSAN_OPTIONS', 'ADB', 'ANDROID_SERIAL',
'SANITIZER_IGNORE_CVE_2016_2143', 'TMPDIR', 'TMP', 'TEMP',
'TEMPDIR', 'AVRLIT_BOARD', 'AVRLIT_PORT']
for var in pass_vars:
val = os.environ.get(var, '')
# Check for empty string as some variables such as LD_PRELOAD cannot be empty
# ('') for OS's such as OpenBSD.
if val:
environment[var] = val
if sys.platform == 'win32':
environment.update({
'INCLUDE' : os.environ.get('INCLUDE',''),
'PATHEXT' : os.environ.get('PATHEXT',''),
'PYTHONUNBUFFERED' : '1',
'TEMP' : os.environ.get('TEMP',''),
'TMP' : os.environ.get('TMP',''),
})
# Set the default available features based on the LitConfig.
available_features = []
if litConfig.useValgrind:
available_features.append('valgrind')
if litConfig.valgrindLeakCheck:
available_features.append('vg_leak')
return TestingConfig(None,
name = '<unnamed>',
suffixes = set(),
test_format = None,
environment = environment,
substitutions = [],
unsupported = False,
test_exec_root = None,
test_source_root = None,
excludes = [],
available_features = available_features,
pipefail = True)
def load_from_path(self, path, litConfig):
"""
load_from_path(path, litConfig)
Load the configuration module at the provided path into the given config
object.
"""
# Load the config script data.
data = None
f = open(path)
try:
data = f.read()
except:
litConfig.fatal('unable to load config file: %r' % (path,))
f.close()
# Execute the config script to initialize the object.
cfg_globals = dict(globals())
cfg_globals['config'] = self
cfg_globals['lit_config'] = litConfig
cfg_globals['__file__'] = path
try:
exec(compile(data, path, 'exec'), cfg_globals, None)
if litConfig.debug:
litConfig.note('... loaded config %r' % path)
except SystemExit:
e = sys.exc_info()[1]
# We allow normal system exit inside a config file to just
# return control without error.
if e.args:
raise
except:
import traceback
litConfig.fatal(
'unable to parse config file %r, traceback: %s' % (
path, traceback.format_exc()))
self.finish(litConfig)
def __init__(self, parent, name, suffixes, test_format,
environment, substitutions, unsupported,
test_exec_root, test_source_root, excludes,
available_features, pipefail, limit_to_features = [],
is_early = False):
self.parent = parent
self.name = str(name)
self.suffixes = set(suffixes)
self.test_format = test_format
self.environment = dict(environment)
self.substitutions = list(substitutions)
self.unsupported = unsupported
self.test_exec_root = test_exec_root
self.test_source_root = test_source_root
self.excludes = set(excludes)
self.available_features = set(available_features)
self.pipefail = pipefail
# This list is used by TestRunner.py to restrict running only tests that
# require one of the features in this list if this list is non-empty.
# Configurations can set this list to restrict the set of tests to run.
self.limit_to_features = set(limit_to_features)
# Whether the suite should be tested early in a given run.
self.is_early = bool(is_early)
def finish(self, litConfig):
"""finish() - Finish this config object, after loading is complete."""
self.name = str(self.name)
self.suffixes = set(self.suffixes)
self.environment = dict(self.environment)
self.substitutions = list(self.substitutions)
if self.test_exec_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_exec_root = str(self.test_exec_root)
if self.test_source_root is not None:
# FIXME: This should really only be suite in test suite config
# files. Should we distinguish them?
self.test_source_root = str(self.test_source_root)
self.excludes = set(self.excludes)
@property
def root(self):
"""root attribute - The root configuration for the test suite."""
if self.parent is None:
return self
else:
return self.parent.root
| 5,932 | 37.525974 | 89 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/LitConfig.py | from __future__ import absolute_import
import inspect
import os
import sys
import lit.Test
import lit.formats
import lit.TestingConfig
import lit.util
# LitConfig must be a new style class for properties to work
class LitConfig(object):
"""LitConfig - Configuration data for a 'lit' test runner instance, shared
across all tests.
The LitConfig object is also used to communicate with client configuration
files, it is always passed in as the global variable 'lit' so that
configuration files can access common functionality and internal components
easily.
"""
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
noExecute, debug, isWindows,
params, config_prefix = None,
maxIndividualTestTime = 0,
maxFailures = None):
# The name of the test runner.
self.progname = progname
# The items to add to the PATH environment variable.
self.path = [str(p) for p in path]
self.quiet = bool(quiet)
self.useValgrind = bool(useValgrind)
self.valgrindLeakCheck = bool(valgrindLeakCheck)
self.valgrindUserArgs = list(valgrindArgs)
self.noExecute = noExecute
self.debug = debug
self.isWindows = bool(isWindows)
self.params = dict(params)
self.bashPath = None
# Configuration files to look for when discovering test suites.
self.config_prefix = config_prefix or 'lit'
self.config_name = '%s.cfg' % (self.config_prefix,)
self.site_config_name = '%s.site.cfg' % (self.config_prefix,)
self.local_config_name = '%s.local.cfg' % (self.config_prefix,)
self.numErrors = 0
self.numWarnings = 0
self.valgrindArgs = []
if self.useValgrind:
self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
'--tool=memcheck', '--trace-children=yes',
'--error-exitcode=123']
if self.valgrindLeakCheck:
self.valgrindArgs.append('--leak-check=full')
else:
# The default is 'summary'.
self.valgrindArgs.append('--leak-check=no')
self.valgrindArgs.extend(self.valgrindUserArgs)
self.maxIndividualTestTime = maxIndividualTestTime
self.maxFailures = maxFailures
@property
def maxIndividualTestTime(self):
"""
Interface for getting maximum time to spend executing
a single test
"""
return self._maxIndividualTestTime
@maxIndividualTestTime.setter
def maxIndividualTestTime(self, value):
"""
Interface for setting maximum time to spend executing
a single test
"""
self._maxIndividualTestTime = value
if self.maxIndividualTestTime > 0:
# The current implementation needs psutil to set
# a timeout per test. Check it's available.
# See lit.util.killProcessAndChildren()
try:
import psutil # noqa: F401
except ImportError:
self.fatal("Setting a timeout per test requires the"
" Python psutil module but it could not be"
" found. Try installing it via pip or via"
" your operating system's package manager.")
elif self.maxIndividualTestTime < 0:
self.fatal('The timeout per test must be >= 0 seconds')
def load_config(self, config, path):
"""load_config(config, path) - Load a config object from an alternate
path."""
if self.debug:
self.note('load_config from %r' % path)
config.load_from_path(path, self)
return config
def getBashPath(self):
"""getBashPath - Get the path to 'bash'"""
if self.bashPath is not None:
return self.bashPath
self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
if self.bashPath is None:
self.bashPath = lit.util.which('bash')
if self.bashPath is None:
self.bashPath = ''
return self.bashPath
def getToolsPath(self, dir, paths, tools):
if dir is not None and os.path.isabs(dir) and os.path.isdir(dir):
if not lit.util.checkToolsPath(dir, tools):
return None
else:
dir = lit.util.whichTools(tools, paths)
# bash
self.bashPath = lit.util.which('bash', dir)
if self.bashPath is None:
self.bashPath = ''
return dir
def _write_message(self, kind, message):
# Get the file/line where this message was generated.
f = inspect.currentframe()
# Step out of _write_message, and then out of wrapper.
f = f.f_back.f_back
file,line,_,_,_ = inspect.getframeinfo(f)
location = '%s:%d' % (file, line)
sys.stderr.write('%s: %s: %s: %s\n' % (self.progname, location,
kind, message))
def note(self, message):
self._write_message('note', message)
def warning(self, message):
self._write_message('warning', message)
self.numWarnings += 1
def error(self, message):
self._write_message('error', message)
self.numErrors += 1
def fatal(self, message):
self._write_message('fatal', message)
sys.exit(2)
| 5,549 | 34.576923 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/TestRunner.py | from __future__ import absolute_import
import os, signal, subprocess, sys
import re
import platform
import tempfile
import threading
import lit.ShUtil as ShUtil
import lit.Test as Test
import lit.util
from lit.util import to_bytes, to_string
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
class ShellEnvironment(object):
"""Mutable shell environment containing things like CWD and env vars.
Environment variables are not implemented, but cwd tracking is.
"""
def __init__(self, cwd, env):
self.cwd = cwd
self.env = dict(env)
class TimeoutHelper(object):
"""
Object used to helper manage enforcing a timeout in
_executeShCmd(). It is passed through recursive calls
to collect processes that have been executed so that when
the timeout happens they can be killed.
"""
def __init__(self, timeout):
self.timeout = timeout
self._procs = []
self._timeoutReached = False
self._doneKillPass = False
# This lock will be used to protect concurrent access
# to _procs and _doneKillPass
self._lock = None
self._timer = None
def cancel(self):
if not self.active():
return
self._timer.cancel()
def active(self):
return self.timeout > 0
def addProcess(self, proc):
if not self.active():
return
needToRunKill = False
with self._lock:
self._procs.append(proc)
# Avoid re-entering the lock by finding out if kill needs to be run
# again here but call it if necessary once we have left the lock.
# We could use a reentrant lock here instead but this code seems
# clearer to me.
needToRunKill = self._doneKillPass
# The initial call to _kill() from the timer thread already happened so
# we need to call it again from this thread, otherwise this process
# will be left to run even though the timeout was already hit
if needToRunKill:
assert self.timeoutReached()
self._kill()
def startTimer(self):
if not self.active():
return
# Do some late initialisation that's only needed
# if there is a timeout set
self._lock = threading.Lock()
self._timer = threading.Timer(self.timeout, self._handleTimeoutReached)
self._timer.start()
def _handleTimeoutReached(self):
self._timeoutReached = True
self._kill()
def timeoutReached(self):
return self._timeoutReached
def _kill(self):
"""
This method may be called multiple times as we might get unlucky
and be in the middle of creating a new process in _executeShCmd()
which won't yet be in ``self._procs``. By locking here and in
addProcess() we should be able to kill processes launched after
the initial call to _kill()
"""
with self._lock:
for p in self._procs:
lit.util.killProcessAndChildren(p.pid)
# Empty the list and note that we've done a pass over the list
self._procs = [] # Python2 doesn't have list.clear()
self._doneKillPass = True
class ShellCommandResult(object):
"""Captures the result of an individual command."""
def __init__(self, command, stdout, stderr, exitCode, timeoutReached,
outputFiles = []):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.exitCode = exitCode
self.timeoutReached = timeoutReached
self.outputFiles = list(outputFiles)
def executeShCmd(cmd, shenv, results, timeout=0):
"""
Wrapper around _executeShCmd that handles
timeout
"""
# Use the helper even when no timeout is required to make
# other code simpler (i.e. avoid bunch of ``!= None`` checks)
timeoutHelper = TimeoutHelper(timeout)
if timeout > 0:
timeoutHelper.startTimer()
finalExitCode = _executeShCmd(cmd, shenv, results, timeoutHelper)
timeoutHelper.cancel()
timeoutInfo = None
if timeoutHelper.timeoutReached():
timeoutInfo = 'Reached timeout of {} seconds'.format(timeout)
return (finalExitCode, timeoutInfo)
def quote_windows_command(seq):
"""
Reimplement Python's private subprocess.list2cmdline for MSys compatibility
Based on CPython implementation here:
https://hg.python.org/cpython/file/849826a900d2/Lib/subprocess.py#l422
Some core util distributions (MSys) don't tokenize command line arguments
the same way that MSVC CRT does. Lit rolls its own quoting logic similar to
the stock CPython logic to paper over these quoting and tokenization rule
differences.
We use the same algorithm from MSDN as CPython
(http://msdn.microsoft.com/en-us/library/17w5ykft.aspx), but we treat more
characters as needing quoting, such as double quotes themselves.
"""
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
# This logic differs from upstream list2cmdline.
needquote = (" " in arg) or ("\t" in arg) or ("\"" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
def _executeShCmd(cmd, shenv, results, timeoutHelper):
if timeoutHelper.timeoutReached():
# Prevent further recursion if the timeout has been hit
# as we should try avoid launching more processes.
return None
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
if cmd.op == '&':
raise InternalShellError(cmd,"unsupported shell operator: '&'")
if cmd.op == '||':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res != 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
if cmd.op == '&&':
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
if res is None:
return res
if res == 0:
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
return res
raise ValueError('Unknown shell command: %r' % cmd.op)
assert isinstance(cmd, ShUtil.Pipeline)
# Handle shell builtins first.
if cmd.commands[0].args[0] == 'cd':
if len(cmd.commands) != 1:
raise ValueError("'cd' cannot be part of a pipeline")
if len(cmd.commands[0].args) != 2:
raise ValueError("'cd' supports only one argument")
newdir = cmd.commands[0].args[1]
# Update the cwd in the parent environment.
if os.path.isabs(newdir):
shenv.cwd = newdir
else:
shenv.cwd = os.path.join(shenv.cwd, newdir)
# The cd builtin always succeeds. If the directory does not exist, the
# following Popen calls will fail instead.
return 0
procs = []
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Reference the global environment by default.
cmd_shenv = shenv
if j.args[0] == 'env':
# Create a copy of the global environment and modify it for this one
# command. There might be multiple envs in a pipeline:
# env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
arg_idx = 1
for arg_idx, arg in enumerate(j.args[1:]):
# Partition the string into KEY=VALUE.
key, eq, val = arg.partition('=')
# Stop if there was no equals.
if eq == '':
break
cmd_shenv.env[key] = val
j.args = j.args[arg_idx+1:]
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for r in j.redirects:
if r[0] == ('>',2):
redirects[2] = [r[1], 'w', None]
elif r[0] == ('>>',2):
redirects[2] = [r[1], 'a', None]
elif r[0] == ('>&',2) and r[1] in '012':
redirects[2] = redirects[int(r[1])]
elif r[0] == ('>&',) or r[0] == ('&>',):
redirects[1] = redirects[2] = [r[1], 'w', None]
elif r[0] == ('>',):
redirects[1] = [r[1], 'w', None]
elif r[0] == ('>>',):
redirects[1] = [r[1], 'a', None]
elif r[0] == ('<',):
redirects[0] = [r[1], 'r', None]
else:
raise InternalShellError(j,"Unsupported redirect: %r" % (r,))
# Map from the final redirections to something subprocess can handle.
final_redirects = []
for index,r in enumerate(redirects):
if r == (0,):
result = input
elif r == (1,):
if index == 0:
raise InternalShellError(j,"Unsupported redirect for stdin")
elif index == 1:
result = subprocess.PIPE
else:
result = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise InternalShellError(j,"Unsupported redirect on stdout")
result = subprocess.PIPE
else:
if r[2] is None:
redir_filename = None
if kAvoidDevNull and r[0] == '/dev/null':
r[2] = tempfile.TemporaryFile(mode=r[1])
elif kIsWindows and r[0] == '/dev/tty':
# Simulate /dev/tty on Windows.
# "CON" is a special filename for the console.
r[2] = open("CON", r[1])
else:
# Make sure relative paths are relative to the cwd.
redir_filename = os.path.join(cmd_shenv.cwd, r[0])
r[2] = open(redir_filename, r[1])
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if r[1] == 'a':
r[2].seek(0, 2)
opened_files.append(tuple(r) + (redir_filename,))
result = r[2]
final_redirects.append(result)
stdin, stdout, stderr = final_redirects
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
args = list(j.args)
executable = None
# For paths relative to cwd, use the cwd of the shell environment.
if args[0].startswith('.'):
exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
if os.path.isfile(exe_in_cwd):
executable = exe_in_cwd
if not executable:
executable = lit.util.which(args[0], cmd_shenv.env['PATH'])
if not executable:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
for i,arg in enumerate(args):
if arg == "/dev/null":
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = f.name
# On Windows, do our own command line quoting for better compatibility
# with some core utility distributions.
if kIsWindows:
args = quote_windows_command(args)
try:
procs.append(subprocess.Popen(args, cwd=cmd_shenv.cwd,
executable = executable,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cmd_shenv.env,
close_fds = kUseCloseFDs))
# Let the helper know about this process
timeoutHelper.addProcess(procs[-1])
except OSError as e:
raise InternalShellError(j, 'Could not create process ({}) due to {}'.format(executable, e))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
input = procs[-1].stdout
elif stderrIsStdout:
input = procs[-1].stderr
else:
input = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
# on Win32, for example). Since we have already spawned the subprocess, our
# handles have already been transferred so we do not need them anymore.
for (name, mode, f, path) in opened_files:
f.close()
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return bytes.encode('utf-8')
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
if out is None:
out = ''
else:
out = to_string(out.decode('utf-8', errors='replace'))
except:
out = str(out)
try:
if err is None:
err = ''
else:
err = to_string(err.decode('utf-8', errors='replace'))
except:
err = str(err)
# Gather the redirected output files for failed commands.
output_files = []
if res != 0:
for (name, mode, f, path) in sorted(opened_files):
if path is not None and mode in ('w', 'a'):
try:
with open(path, 'rb') as f:
data = f.read()
except:
data = None
if data is not None:
output_files.append((name, path, data))
results.append(ShellCommandResult(
cmd.commands[i], out, err, res, timeoutHelper.timeoutReached(),
output_files))
if cmd.pipe_err:
# Python treats the exit code as a signed char.
if exitCode is None:
exitCode = res
elif res < 0:
exitCode = min(exitCode, res)
else:
exitCode = max(exitCode, res)
else:
exitCode = res
# Remove any named temporary files we created.
for f in named_temp_files:
try:
os.remove(f)
except OSError:
pass
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
cmds = []
for ln in commands:
try:
cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
test.config.pipefail).parse())
except:
return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
results = []
timeoutInfo = None
try:
shenv = ShellEnvironment(cwd, test.config.environment)
exitCode, timeoutInfo = executeShCmd(cmd, shenv, results, timeout=litConfig.maxIndividualTestTime)
except InternalShellError:
e = sys.exc_info()[1]
exitCode = 127
results.append(
ShellCommandResult(e.command, '', e.message, exitCode, False))
out = err = ''
for i,result in enumerate(results):
# Write the command line run.
out += '$ %s\n' % (' '.join('"%s"' % s
for s in result.command.args),)
# If nothing interesting happened, move on.
if litConfig.maxIndividualTestTime == 0 and \
result.exitCode == 0 and \
not result.stdout.strip() and not result.stderr.strip():
continue
# Otherwise, something failed or was printed, show it.
# Add the command output, if redirected.
for (name, path, data) in result.outputFiles:
if data.strip():
out += "# redirected output from %r:\n" % (name,)
data = to_string(data.decode('utf-8', errors='replace'))
if len(data) > 1024:
out += data[:1024] + "\n...\n"
out += "note: data was truncated\n"
else:
out += data
out += "\n"
if result.stdout.strip():
out += '# command output:\n%s\n' % (result.stdout,)
if result.stderr.strip():
out += '# command stderr:\n%s\n' % (result.stderr,)
if not result.stdout.strip() and not result.stderr.strip():
out += "note: command had no output on stdout or stderr\n"
# Show the error conditions:
if result.exitCode != 0:
# On Windows, a negative exit code indicates a signal, and those are
# easier to recognize or look up if we print them in hex.
if litConfig.isWindows and result.exitCode < 0:
codeStr = hex(int(result.exitCode & 0xFFFFFFFF)).rstrip("L")
else:
codeStr = str(result.exitCode)
out += "error: command failed with exit status: %s\n" % (
codeStr,)
if litConfig.maxIndividualTestTime > 0:
out += 'error: command reached timeout: %s\n' % (
str(result.timeoutReached),)
return out, err, exitCode, timeoutInfo
def executeScript(test, litConfig, tmpBase, commands, cwd):
bashPath = litConfig.getBashPath()
isWin32CMDEXE = (litConfig.isWindows and not bashPath)
script = tmpBase + '.script'
if isWin32CMDEXE:
script += '.bat'
# Write script file
mode = 'w'
if litConfig.isWindows and not isWin32CMDEXE:
mode += 'b' # Avoid CRLFs when writing bash scripts.
f = open(script, mode)
if isWin32CMDEXE:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
if test.config.pipefail:
f.write('set -o pipefail;')
f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write('\n')
f.close()
if isWin32CMDEXE:
command = ['cmd','/c', script]
else:
if bashPath:
command = [bashPath, script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
try:
out, err, exitCode = lit.util.executeCommand(command, cwd=cwd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
return (out, err, exitCode, None)
except lit.util.ExecuteCommandTimeoutException as e:
return (e.out, e.err, e.exitCode, e.msg)
def parseIntegratedTestScriptCommands(source_path, keywords):
"""
parseIntegratedTestScriptCommands(source_path) -> commands
Parse the commands in an integrated test script file into a list of
(line_number, command_type, line).
"""
# This code is carefully written to be dual compatible with Python 2.5+ and
# Python 3 without requiring input files to always have valid codings. The
# trick we use is to open the file in binary mode and use the regular
# expression library to find the commands, with it scanning strings in
# Python2 and bytes in Python3.
#
# Once we find a match, we do require each script line to be decodable to
# UTF-8, so we convert the outputs to UTF-8 before returning. This way the
# remaining code can work with "strings" agnostic of the executing Python
# version.
keywords_re = re.compile(
to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),)))
f = open(source_path, 'rb')
try:
# Read the entire file contents.
data = f.read()
# Ensure the data ends with a newline.
if not data.endswith(to_bytes('\n')):
data = data + to_bytes('\n')
# Iterate over the matches.
line_number = 1
last_match_position = 0
for match in keywords_re.finditer(data):
# Compute the updated line number by counting the intervening
# newlines.
match_position = match.start()
line_number += data.count(to_bytes('\n'), last_match_position,
match_position)
last_match_position = match_position
# Convert the keyword and line to UTF-8 strings and yield the
# command. Note that we take care to return regular strings in
# Python 2, to avoid other code having to differentiate between the
# str and unicode types.
keyword,ln = match.groups()
yield (line_number, to_string(keyword.decode('utf-8')),
to_string(ln.decode('utf-8')))
finally:
f.close()
def getTempPaths(test):
"""Get the temporary location, this is always relative to the test suite
root, not test source root."""
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
return tmpDir, tmpBase
def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpDir = tmpDir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = []
substitutions.extend([('%%', '#_MARKER_#')])
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
('%t', tmpBase + '.tmp'),
('%T', tmpDir),
('#_MARKER_#', '%')])
# "%/[STpst]" should be normalized.
substitutions.extend([
('%/s', sourcepath.replace('\\', '/')),
('%/S', sourcedir.replace('\\', '/')),
('%/p', sourcedir.replace('\\', '/')),
('%/t', tmpBase.replace('\\', '/') + '.tmp'),
('%/T', tmpDir.replace('\\', '/')),
])
# "%:[STpst]" are paths without colons.
if kIsWindows:
substitutions.extend([
('%:s', re.sub(r'^(.):', r'\1', sourcepath)),
('%:S', re.sub(r'^(.):', r'\1', sourcedir)),
('%:p', re.sub(r'^(.):', r'\1', sourcedir)),
('%:t', re.sub(r'^(.):', r'\1', tmpBase) + '.tmp'),
('%:T', re.sub(r'^(.):', r'\1', tmpDir)),
])
else:
substitutions.extend([
('%:s', sourcepath),
('%:S', sourcedir),
('%:p', sourcedir),
('%:t', tmpBase + '.tmp'),
('%:T', tmpDir),
])
return substitutions
def applySubstitutions(script, substitutions):
"""Apply substitutions to the script. Allow full regular expression syntax.
Replace each matching occurrence of regular expression pattern a with
substitution b in line ln."""
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
if kIsWindows:
b = b.replace("\\","\\\\")
ln = re.sub(a, b, ln)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
# Note Python 3 map() gives an iterator rather than a list so explicitly
# convert to list before returning.
return list(map(processLine, script))
class ParserKind(object):
"""
An enumeration representing the style of an integrated test keyword or
command.
TAG: A keyword taking no value. Ex 'END.'
COMMAND: A Keyword taking a list of shell commands. Ex 'RUN:'
LIST: A keyword taking a comma separated list of value. Ex 'XFAIL:'
CUSTOM: A keyword with custom parsing semantics.
"""
TAG = 0
COMMAND = 1
LIST = 2
CUSTOM = 3
class IntegratedTestKeywordParser(object):
"""A parser for LLVM/Clang style integrated test scripts.
keyword: The keyword to parse for. It must end in either '.' or ':'.
kind: An value of ParserKind.
parser: A custom parser. This value may only be specified with
ParserKind.CUSTOM.
"""
def __init__(self, keyword, kind, parser=None, initial_value=None):
if not keyword.endswith('.') and not keyword.endswith(':'):
raise ValueError("keyword '%s' must end with either '.' or ':' "
% keyword)
if keyword.endswith('.') and kind in \
[ParserKind.LIST, ParserKind.COMMAND]:
raise ValueError("Keyword '%s' should end in ':'" % keyword)
elif keyword.endswith(':') and kind in [ParserKind.TAG]:
raise ValueError("Keyword '%s' should end in '.'" % keyword)
if parser is not None and kind != ParserKind.CUSTOM:
raise ValueError("custom parsers can only be specified with "
"ParserKind.CUSTOM")
self.keyword = keyword
self.kind = kind
self.parsed_lines = []
self.value = initial_value
self.parser = parser
if kind == ParserKind.COMMAND:
self.parser = self._handleCommand
elif kind == ParserKind.LIST:
self.parser = self._handleList
elif kind == ParserKind.TAG:
if not keyword.endswith('.'):
raise ValueError("keyword '%s' should end with '.'" % keyword)
self.parser = self._handleTag
elif kind == ParserKind.CUSTOM:
if parser is None:
raise ValueError("ParserKind.CUSTOM requires a custom parser")
self.parser = parser
else:
raise ValueError("Unknown kind '%s'" % kind)
def parseLine(self, line_number, line):
self.parsed_lines += [(line_number, line)]
self.value = self.parser(line_number, line, self.value)
def getValue(self):
return self.value
@staticmethod
def _handleTag(line_number, line, output):
"""A helper for parsing TAG type keywords"""
return (not line.strip() or output)
@staticmethod
def _handleCommand(line_number, line, output):
"""A helper for parsing COMMAND type keywords"""
# Trim trailing whitespace.
line = line.rstrip()
# Substitute line number expressions
line = re.sub('%\(line\)', str(line_number), line)
def replace_line_number(match):
if match.group(1) == '+':
return str(line_number + int(match.group(2)))
if match.group(1) == '-':
return str(line_number - int(match.group(2)))
line = re.sub('%\(line *([\+-]) *(\d+)\)', replace_line_number, line)
# Collapse lines with trailing '\\'.
if output and output[-1][-1] == '\\':
output[-1] = output[-1][:-1] + line
else:
if output is None:
output = []
output.append(line)
return output
@staticmethod
def _handleList(line_number, line, output):
"""A parser for LIST type keywords"""
if output is None:
output = []
output.extend([s.strip() for s in line.split(',')])
return output
def parseIntegratedTestScript(test, additional_parsers=[],
require_script=True):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'REQUIRES'
'REQUIRES-ANY' and 'UNSUPPORTED' information.
If additional parsers are specified then the test is also scanned for the
keywords they specify and all matches are passed to the custom parser.
If 'require_script' is False an empty script
may be returned. This can be used for test formats where the actual script
is optional or ignored.
"""
# Collect the test lines from the script.
sourcepath = test.getSourcePath()
script = []
requires = []
requires_any = []
unsupported = []
builtin_parsers = [
IntegratedTestKeywordParser('RUN:', ParserKind.COMMAND,
initial_value=script),
IntegratedTestKeywordParser('XFAIL:', ParserKind.LIST,
initial_value=test.xfails),
IntegratedTestKeywordParser('REQUIRES:', ParserKind.LIST,
initial_value=requires),
IntegratedTestKeywordParser('REQUIRES-ANY:', ParserKind.LIST,
initial_value=requires_any),
IntegratedTestKeywordParser('UNSUPPORTED:', ParserKind.LIST,
initial_value=unsupported),
IntegratedTestKeywordParser('END.', ParserKind.TAG)
]
keyword_parsers = {p.keyword: p for p in builtin_parsers}
for parser in additional_parsers:
if not isinstance(parser, IntegratedTestKeywordParser):
raise ValueError('additional parser must be an instance of '
'IntegratedTestKeywordParser')
if parser.keyword in keyword_parsers:
raise ValueError("Parser for keyword '%s' already exists"
% parser.keyword)
keyword_parsers[parser.keyword] = parser
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(sourcepath,
keyword_parsers.keys()):
parser = keyword_parsers[command_type]
parser.parseLine(line_number, ln)
if command_type == 'END.' and parser.getValue() is True:
break
# Verify the script contains a run line.
if require_script and not script:
return lit.Test.Result(Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script and script[-1][-1] == '\\':
return lit.Test.Result(Test.UNRESOLVED,
"Test has unterminated run lines (with '\\')")
# Check that we have the required features:
missing_required_features = [f for f in requires
if f not in test.config.available_features]
if missing_required_features:
msg = ', '.join(missing_required_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test requires the following features: %s"
% msg)
requires_any_features = [f for f in requires_any
if f in test.config.available_features]
if requires_any and not requires_any_features:
msg = ' ,'.join(requires_any)
return lit.Test.Result(Test.UNSUPPORTED,
"Test requires any of the following features: "
"%s" % msg)
unsupported_features = [f for f in unsupported
if f in test.config.available_features]
if unsupported_features:
msg = ', '.join(unsupported_features)
return lit.Test.Result(
Test.UNSUPPORTED,
"Test is unsupported with the following features: %s" % msg)
unsupported_targets = [f for f in unsupported
if f in test.suite.config.target_triple]
if unsupported_targets:
return lit.Test.Result(
Test.UNSUPPORTED,
"Test is unsupported with the following triple: %s" % (
test.suite.config.target_triple,))
if test.config.limit_to_features:
# Check that we have one of the limit_to_features features in requires.
limit_to_features_tests = [f for f in test.config.limit_to_features
if f in requires]
if not limit_to_features_tests:
msg = ', '.join(test.config.limit_to_features)
return lit.Test.Result(
Test.UNSUPPORTED,
"Test requires one of the limit_to_features features %s" % msg)
return script
def _runShTest(test, litConfig, useExternalSh, script, tmpBase):
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
execdir = os.path.dirname(test.getExecPath())
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if isinstance(res, lit.Test.Result):
return res
out,err,exitCode,timeoutInfo = res
if exitCode == 0:
status = Test.PASS
else:
if timeoutInfo is None:
status = Test.FAIL
else:
status = Test.TIMEOUT
# Form the output log.
output = """Script:\n--\n%s\n--\nExit Code: %d\n""" % (
'\n'.join(script), exitCode)
if timeoutInfo is not None:
output += """Timeout: %s\n""" % (timeoutInfo,)
output += "\n"
# Append the outputs, if present.
if out:
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
if err:
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
return lit.Test.Result(status, output)
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
script = parseIntegratedTestScript(test)
if isinstance(script, lit.Test.Result):
return script
if litConfig.noExecute:
return lit.Test.Result(Test.PASS)
tmpDir, tmpBase = getTempPaths(test)
substitutions = list(extra_substitutions)
substitutions += getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
script = applySubstitutions(script, substitutions)
# Re-run failed tests up to test_retry_attempts times.
attempts = 1
if hasattr(test.config, 'test_retry_attempts'):
attempts += test.config.test_retry_attempts
for i in range(attempts):
res = _runShTest(test, litConfig, useExternalSh, script, tmpBase)
if res.code != Test.FAIL:
break
# If we had to run the test more than once, count it as a flaky pass. These
# will be printed separately in the test summary.
if i > 0 and res.code == Test.PASS:
res.code = Test.FLAKYPASS
return res
| 38,501 | 36.933005 | 106 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/Test.py | import os
from xml.sax.saxutils import escape
from json import JSONEncoder
# Test result codes.
class ResultCode(object):
"""Test result codes."""
# We override __new__ and __getnewargs__ to ensure that pickling still
# provides unique ResultCode objects in any particular instance.
_instances = {}
def __new__(cls, name, isFailure):
res = cls._instances.get(name)
if res is None:
cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
return res
def __getnewargs__(self):
return (self.name, self.isFailure)
def __init__(self, name, isFailure):
self.name = name
self.isFailure = isFailure
def __repr__(self):
return '%s%r' % (self.__class__.__name__,
(self.name, self.isFailure))
PASS = ResultCode('PASS', False)
FLAKYPASS = ResultCode('FLAKYPASS', False)
XFAIL = ResultCode('XFAIL', False)
FAIL = ResultCode('FAIL', True)
XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
TIMEOUT = ResultCode('TIMEOUT', True)
# Test metric values.
class MetricValue(object):
def format(self):
"""
format() -> str
Convert this metric to a string suitable for displaying as part of the
console output.
"""
raise RuntimeError("abstract method")
def todata(self):
"""
todata() -> json-serializable data
Convert this metric to content suitable for serializing in the JSON test
output.
"""
raise RuntimeError("abstract method")
class IntMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return str(self.value)
def todata(self):
return self.value
class RealMetricValue(MetricValue):
def __init__(self, value):
self.value = value
def format(self):
return '%.4f' % self.value
def todata(self):
return self.value
class JSONMetricValue(MetricValue):
"""
JSONMetricValue is used for types that are representable in the output
but that are otherwise uninterpreted.
"""
def __init__(self, value):
# Ensure the value is a serializable by trying to encode it.
# WARNING: The value may change before it is encoded again, and may
# not be encodable after the change.
try:
e = JSONEncoder()
e.encode(value)
except TypeError:
raise
self.value = value
def format(self):
e = JSONEncoder(indent=2, sort_keys=True)
return e.encode(self.value)
def todata(self):
return self.value
def toMetricValue(value):
if isinstance(value, MetricValue):
return value
elif isinstance(value, int):
return IntMetricValue(value)
elif isinstance(value, float):
return RealMetricValue(value)
else:
# 'long' is only present in python2
try:
if isinstance(value, long):
return IntMetricValue(value)
except NameError:
pass
# Try to create a JSONMetricValue and let the constructor throw
# if value is not a valid type.
return JSONMetricValue(value)
# Test results.
class Result(object):
"""Wrapper for the results of executing an individual test."""
def __init__(self, code, output='', elapsed=None):
# The result code.
self.code = code
# The test output.
self.output = output
# The wall timing to execute the test, if timing.
self.elapsed = elapsed
# The metrics reported by this test.
self.metrics = {}
def addMetric(self, name, value):
"""
addMetric(name, value)
Attach a test metric to the test result, with the given name and list of
values. It is an error to attempt to attach the metrics with the same
name multiple times.
Each value must be an instance of a MetricValue subclass.
"""
if name in self.metrics:
raise ValueError("result already includes metrics for %r" % (
name,))
if not isinstance(value, MetricValue):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value
# Test classes.
class TestSuite:
"""TestSuite - Information on a group of tests.
A test suite groups together a set of logically related tests.
"""
def __init__(self, name, source_root, exec_root, config):
self.name = name
self.source_root = source_root
self.exec_root = exec_root
# The test suite configuration.
self.config = config
def getSourcePath(self, components):
return os.path.join(self.source_root, *components)
def getExecPath(self, components):
return os.path.join(self.exec_root, *components)
class Test:
"""Test - Information on a single test instance."""
def __init__(self, suite, path_in_suite, config, file_path = None):
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
# A list of conditions under which this test is expected to fail. These
# can optionally be provided by test format handlers, and will be
# honored when the test result is supplied.
self.xfails = []
# The test result, once complete.
self.result = None
def setResult(self, result):
if self.result is not None:
raise ValueError("test result already set")
if not isinstance(result, Result):
raise ValueError("unexpected result type")
self.result = result
# Apply the XFAIL handling to resolve the result exit code.
if self.isExpectedToFail():
if self.result.code == PASS:
self.result.code = XPASS
elif self.result.code == FAIL:
self.result.code = XFAIL
def getFullName(self):
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
def getFilePath(self):
if self.file_path:
return self.file_path
return self.getSourcePath()
def getSourcePath(self):
return self.suite.getSourcePath(self.path_in_suite)
def getExecPath(self):
return self.suite.getExecPath(self.path_in_suite)
def isExpectedToFail(self):
"""
isExpectedToFail() -> bool
Check whether this test is expected to fail in the current
configuration. This check relies on the test xfails property which by
some test formats may not be computed until the test has first been
executed.
"""
# Check if any of the xfails match an available feature or the target.
for item in self.xfails:
# If this is the wildcard, it always fails.
if item == '*':
return True
# If this is an exact match for one of the features, it fails.
if item in self.config.available_features:
return True
# If this is a part of the target triple, it fails.
if item and item in self.suite.config.target_triple:
return True
return False
def isEarlyTest(self):
"""
isEarlyTest() -> bool
Check whether this test should be executed early in a particular run.
This can be used for test suites with long running tests to maximize
parallelism or where it is desirable to surface their failures early.
"""
return self.suite.config.is_early
def getJUnitXML(self):
test_name = self.path_in_suite[-1]
test_path = self.path_in_suite[:-1]
safe_test_path = [x.replace(".","_") for x in test_path]
safe_name = self.suite.name.replace(".","-")
if safe_test_path:
class_name = safe_name + "." + "/".join(safe_test_path)
else:
class_name = safe_name + "." + safe_name
xml = "<testcase classname='" + class_name + "' name='" + \
test_name + "'"
xml += " time='%.2f'" % (self.result.elapsed,)
if self.result.code.isFailure:
xml += ">\n\t<failure >\n" + escape(self.result.output)
xml += "\n\t</failure>\n</testcase>"
else:
xml += "/>"
return xml
| 8,541 | 30.061818 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/ShCommands.py | class Command:
def __init__(self, args, redirects):
self.args = list(args)
self.redirects = list(redirects)
def __repr__(self):
return 'Command(%r, %r)' % (self.args, self.redirects)
def __eq__(self, other):
if not isinstance(other, Command):
return False
return ((self.args, self.redirects) ==
(other.args, other.redirects))
def toShell(self, file):
for arg in self.args:
if "'" not in arg:
quoted = "'%s'" % arg
elif '"' not in arg and '$' not in arg:
quoted = '"%s"' % arg
else:
raise NotImplementedError('Unable to quote %r' % arg)
file.write(quoted)
# For debugging / validation.
import ShUtil
dequoted = list(ShUtil.ShLexer(quoted).lex())
if dequoted != [arg]:
raise NotImplementedError('Unable to quote %r' % arg)
for r in self.redirects:
if len(r[0]) == 1:
file.write("%s '%s'" % (r[0][0], r[1]))
else:
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
class Pipeline:
def __init__(self, commands, negate=False, pipe_err=False):
self.commands = commands
self.negate = negate
self.pipe_err = pipe_err
def __repr__(self):
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
self.pipe_err)
def __eq__(self, other):
if not isinstance(other, Pipeline):
return False
return ((self.commands, self.negate, self.pipe_err) ==
(other.commands, other.negate, self.pipe_err))
def toShell(self, file, pipefail=False):
if pipefail != self.pipe_err:
raise ValueError('Inconsistent "pipefail" attribute!')
if self.negate:
file.write('! ')
for cmd in self.commands:
cmd.toShell(file)
if cmd is not self.commands[-1]:
file.write('|\n ')
class Seq:
def __init__(self, lhs, op, rhs):
assert op in (';', '&', '||', '&&')
self.op = op
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
def __eq__(self, other):
if not isinstance(other, Seq):
return False
return ((self.lhs, self.op, self.rhs) ==
(other.lhs, other.op, other.rhs))
def toShell(self, file, pipefail=False):
self.lhs.toShell(file, pipefail)
file.write(' %s\n' % self.op)
self.rhs.toShell(file, pipefail)
| 2,696 | 30.360465 | 69 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/discovery.py | """
Test discovery functions.
"""
import copy
import os
import sys
import lit.run
from lit.TestingConfig import TestingConfig
from lit import LitConfig, Test
def dirContainsTestSuite(path, lit_config):
cfgpath = os.path.join(path, lit_config.site_config_name)
if os.path.exists(cfgpath):
return cfgpath
cfgpath = os.path.join(path, lit_config.config_name)
if os.path.exists(cfgpath):
return cfgpath
def getTestSuite(item, litConfig, cache):
"""getTestSuite(item, litConfig, cache) -> (suite, relative_path)
Find the test suite containing @arg item.
@retval (None, ...) - Indicates no test suite contains @arg item.
@retval (suite, relative_path) - The suite that @arg item is in, and its
relative path inside that suite.
"""
def search1(path):
# Check for a site config or a lit config.
cfgpath = dirContainsTestSuite(path, litConfig)
# If we didn't find a config file, keep looking.
if not cfgpath:
parent,base = os.path.split(path)
if parent == path:
return (None, ())
ts, relative = search(parent)
return (ts, relative + (base,))
# We found a test suite, create a new config for it and load it.
if litConfig.debug:
litConfig.note('loading suite config %r' % cfgpath)
cfg = TestingConfig.fromdefaults(litConfig)
cfg.load_from_path(cfgpath, litConfig)
source_root = os.path.realpath(cfg.test_source_root or path)
exec_root = os.path.realpath(cfg.test_exec_root or path)
return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
def search(path):
# Check for an already instantiated test suite.
res = cache.get(path)
if res is None:
cache[path] = res = search1(path)
return res
# Canonicalize the path.
item = os.path.realpath(item)
# Skip files and virtual components.
components = []
while not os.path.isdir(item):
parent,base = os.path.split(item)
if parent == item:
return (None, ())
components.append(base)
item = parent
components.reverse()
ts, relative = search(item)
return ts, tuple(relative + tuple(components))
def getLocalConfig(ts, path_in_suite, litConfig, cache):
def search1(path_in_suite):
# Get the parent config.
if not path_in_suite:
parent = ts.config
else:
parent = search(path_in_suite[:-1])
# Check if there is a local configuration file.
source_path = ts.getSourcePath(path_in_suite)
cfgpath = os.path.join(source_path, litConfig.local_config_name)
# If not, just reuse the parent config.
if not os.path.exists(cfgpath):
return parent
# Otherwise, copy the current config and load the local configuration
# file into it.
config = copy.deepcopy(parent)
if litConfig.debug:
litConfig.note('loading local config %r' % cfgpath)
config.load_from_path(cfgpath, litConfig)
return config
def search(path_in_suite):
key = (ts, path_in_suite)
res = cache.get(key)
if res is None:
cache[key] = res = search1(path_in_suite)
return res
return search(path_in_suite)
def getTests(path, litConfig, testSuiteCache, localConfigCache):
# Find the test suite for this input and its relative path.
ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
if ts is None:
litConfig.warning('unable to find test suite for %r' % path)
return (),()
if litConfig.debug:
litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
path_in_suite))
return ts, getTestsInSuite(ts, path_in_suite, litConfig,
testSuiteCache, localConfigCache)
def getTestsInSuite(ts, path_in_suite, litConfig,
testSuiteCache, localConfigCache):
# Check that the source path exists (errors here are reported by the
# caller).
source_path = ts.getSourcePath(path_in_suite)
if not os.path.exists(source_path):
return
# Check if the user named a test directly.
if not os.path.isdir(source_path):
lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
yield Test.Test(ts, path_in_suite, lc)
return
# Otherwise we have a directory to search for tests, start by getting the
# local configuration.
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
# Search for tests.
if lc.test_format is not None:
for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
litConfig, lc):
yield res
# Search subdirectories.
for filename in os.listdir(source_path):
# FIXME: This doesn't belong here?
if filename in ('Output', '.svn', '.git') or filename in lc.excludes:
continue
# Ignore non-directories.
file_sourcepath = os.path.join(source_path, filename)
if not os.path.isdir(file_sourcepath):
continue
# Check for nested test suites, first in the execpath in case there is a
# site configuration and then in the source path.
subpath = path_in_suite + (filename,)
file_execpath = ts.getExecPath(subpath)
if dirContainsTestSuite(file_execpath, litConfig):
sub_ts, subpath_in_suite = getTestSuite(file_execpath, litConfig,
testSuiteCache)
elif dirContainsTestSuite(file_sourcepath, litConfig):
sub_ts, subpath_in_suite = getTestSuite(file_sourcepath, litConfig,
testSuiteCache)
else:
sub_ts = None
# If the this directory recursively maps back to the current test suite,
# disregard it (this can happen if the exec root is located inside the
# current test suite, for example).
if sub_ts is ts:
continue
# Otherwise, load from the nested test suite, if present.
if sub_ts is not None:
subiter = getTestsInSuite(sub_ts, subpath_in_suite, litConfig,
testSuiteCache, localConfigCache)
else:
subiter = getTestsInSuite(ts, subpath, litConfig, testSuiteCache,
localConfigCache)
N = 0
for res in subiter:
N += 1
yield res
if sub_ts and not N:
litConfig.warning('test suite %r contained no tests' % sub_ts.name)
def find_tests_for_inputs(lit_config, inputs):
"""
find_tests_for_inputs(lit_config, inputs) -> [Test]
Given a configuration object and a list of input specifiers, find all the
tests to execute.
"""
# Expand '@...' form in inputs.
actual_inputs = []
for input in inputs:
if input.startswith('@'):
f = open(input[1:])
try:
for ln in f:
ln = ln.strip()
if ln:
actual_inputs.append(ln)
finally:
f.close()
else:
actual_inputs.append(input)
# Load the tests from the inputs.
tests = []
test_suite_cache = {}
local_config_cache = {}
for input in actual_inputs:
prev = len(tests)
tests.extend(getTests(input, lit_config,
test_suite_cache, local_config_cache)[1])
if prev == len(tests):
lit_config.warning('input %r contained no tests' % input)
# If there were any errors during test discovery, exit now.
if lit_config.numErrors:
sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors)
sys.exit(2)
return tests
def load_test_suite(inputs):
import platform
import unittest
from lit.LitTestCase import LitTestCase
# Create the global config object.
litConfig = LitConfig.LitConfig(progname = 'lit',
path = [],
quiet = False,
useValgrind = False,
valgrindLeakCheck = False,
valgrindArgs = [],
noExecute = False,
debug = False,
isWindows = (platform.system()=='Windows'),
params = {})
# Perform test discovery.
run = lit.run.Run(litConfig, find_tests_for_inputs(litConfig, inputs))
# Return a unittest test suite which just runs the tests in order.
return unittest.TestSuite([LitTestCase(test, run)
for test in run.tests])
| 9,059 | 34.390625 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/run.py | import os
import threading
import time
import traceback
try:
import Queue as queue
except ImportError:
import queue
try:
import win32api
except ImportError:
win32api = None
try:
import multiprocessing
except ImportError:
multiprocessing = None
import lit.Test
###
# Test Execution Implementation
class LockedValue(object):
def __init__(self, value):
self.lock = threading.Lock()
self._value = value
def _get_value(self):
self.lock.acquire()
try:
return self._value
finally:
self.lock.release()
def _set_value(self, value):
self.lock.acquire()
try:
self._value = value
finally:
self.lock.release()
value = property(_get_value, _set_value)
class TestProvider(object):
def __init__(self, queue_impl, canceled_flag):
self.canceled_flag = canceled_flag
# Create a shared queue to provide the test indices.
self.queue = queue_impl()
def queue_tests(self, tests, num_jobs):
for i in range(len(tests)):
self.queue.put(i)
for i in range(num_jobs):
self.queue.put(None)
def cancel(self):
self.canceled_flag.value = 1
def get(self):
# Check if we are canceled.
if self.canceled_flag.value:
return None
# Otherwise take the next test.
return self.queue.get()
class Tester(object):
def __init__(self, run_instance, provider, consumer):
self.run_instance = run_instance
self.provider = provider
self.consumer = consumer
def run(self):
while True:
item = self.provider.get()
if item is None:
break
self.run_test(item)
self.consumer.task_finished()
def run_test(self, test_index):
test = self.run_instance.tests[test_index]
try:
self.run_instance.execute_test(test)
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print('\nCtrl-C detected, goodbye.')
os.kill(0,9)
self.consumer.update(test_index, test)
class ThreadResultsConsumer(object):
def __init__(self, display):
self.display = display
self.lock = threading.Lock()
def update(self, test_index, test):
self.lock.acquire()
try:
self.display.update(test)
finally:
self.lock.release()
def task_finished(self):
pass
def handle_results(self):
pass
class MultiprocessResultsConsumer(object):
def __init__(self, run, display, num_jobs):
self.run = run
self.display = display
self.num_jobs = num_jobs
self.queue = multiprocessing.Queue()
def update(self, test_index, test):
# This method is called in the child processes, and communicates the
# results to the actual display implementation via an output queue.
self.queue.put((test_index, test.result))
def task_finished(self):
# This method is called in the child processes, and communicates that
# individual tasks are complete.
self.queue.put(None)
def handle_results(self):
# This method is called in the parent, and consumes the results from the
# output queue and dispatches to the actual display. The method will
# complete after each of num_jobs tasks has signalled completion.
completed = 0
while completed != self.num_jobs:
# Wait for a result item.
item = self.queue.get()
if item is None:
completed += 1
continue
# Update the test result in the parent process.
index,result = item
test = self.run.tests[index]
test.result = result
self.display.update(test)
def run_one_tester(run, provider, display):
tester = Tester(run, provider, display)
tester.run()
###
class _Display(object):
def __init__(self, display, provider, maxFailures):
self.display = display
self.provider = provider
self.maxFailures = maxFailures or object()
self.failedCount = 0
def update(self, test):
self.display.update(test)
self.failedCount += (test.result.code == lit.Test.FAIL)
if self.failedCount == self.maxFailures:
self.provider.cancel()
def handleFailures(provider, consumer, maxFailures):
consumer.display = _Display(consumer.display, provider, maxFailures)
class Run(object):
"""
This class represents a concrete, configured testing run.
"""
def __init__(self, lit_config, tests):
self.lit_config = lit_config
self.tests = tests
def execute_test(self, test):
result = None
start_time = time.time()
try:
result = test.config.test_format.execute(test, self.lit_config)
# Support deprecated result from execute() which returned the result
# code and additional output as a tuple.
if isinstance(result, tuple):
code, output = result
result = lit.Test.Result(code, output)
elif not isinstance(result, lit.Test.Result):
raise ValueError("unexpected result from test execution")
except KeyboardInterrupt:
raise
except:
if self.lit_config.debug:
raise
output = 'Exception during script execution:\n'
output += traceback.format_exc()
output += '\n'
result = lit.Test.Result(lit.Test.UNRESOLVED, output)
result.elapsed = time.time() - start_time
test.setResult(result)
def execute_tests(self, display, jobs, max_time=None,
use_processes=False):
"""
execute_tests(display, jobs, [max_time])
Execute each of the tests in the run, using up to jobs number of
parallel tasks, and inform the display of each individual result. The
provided tests should be a subset of the tests available in this run
object.
If max_time is non-None, it should be a time in seconds after which to
stop executing tests.
The display object will have its update method called with each test as
it is completed. The calls are guaranteed to be locked with respect to
one another, but are *not* guaranteed to be called on the same thread as
this method was invoked on.
Upon completion, each test in the run will have its result
computed. Tests which were not actually executed (for any reason) will
be given an UNRESOLVED result.
"""
# Choose the appropriate parallel execution implementation.
consumer = None
if jobs != 1 and use_processes and multiprocessing:
try:
task_impl = multiprocessing.Process
queue_impl = multiprocessing.Queue
canceled_flag = multiprocessing.Value('i', 0)
consumer = MultiprocessResultsConsumer(self, display, jobs)
except:
# multiprocessing fails to initialize with certain OpenBSD and
# FreeBSD Python versions: http://bugs.python.org/issue3770
# Unfortunately the error raised also varies by platform.
self.lit_config.note('failed to initialize multiprocessing')
consumer = None
if not consumer:
task_impl = threading.Thread
queue_impl = queue.Queue
canceled_flag = LockedValue(0)
consumer = ThreadResultsConsumer(display)
# Create the test provider.
provider = TestProvider(queue_impl, canceled_flag)
handleFailures(provider, consumer, self.lit_config.maxFailures)
# Queue the tests outside the main thread because we can't guarantee
# that we can put() all the tests without blocking:
# https://docs.python.org/2/library/multiprocessing.html
# e.g: On Mac OS X, we will hang if we put 2^15 elements in the queue
# without taking any out.
queuer = task_impl(target=provider.queue_tests, args=(self.tests, jobs))
queuer.start()
# Install a console-control signal handler on Windows.
if win32api is not None:
def console_ctrl_handler(type):
provider.cancel()
return True
win32api.SetConsoleCtrlHandler(console_ctrl_handler, True)
# Install a timeout handler, if requested.
if max_time is not None:
def timeout_handler():
provider.cancel()
timeout_timer = threading.Timer(max_time, timeout_handler)
timeout_timer.start()
# If not using multiple tasks, just run the tests directly.
if jobs == 1:
run_one_tester(self, provider, consumer)
else:
# Otherwise, execute the tests in parallel
self._execute_tests_in_parallel(task_impl, provider, consumer, jobs)
queuer.join()
# Cancel the timeout handler.
if max_time is not None:
timeout_timer.cancel()
# Update results for any tests which weren't run.
for test in self.tests:
if test.result is None:
test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
def _execute_tests_in_parallel(self, task_impl, provider, consumer, jobs):
# Start all of the tasks.
tasks = [task_impl(target=run_one_tester,
args=(self, provider, consumer))
for i in range(jobs)]
for t in tasks:
t.start()
# Allow the consumer to handle results, if necessary.
consumer.handle_results()
# Wait for all the tasks to complete.
for t in tasks:
t.join()
| 10,076 | 31.931373 | 80 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/util.py | import errno
import itertools
import math
import os
import platform
import signal
import subprocess
import sys
import threading
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
# With more than 32 processes, process creation often fails with
# "Too many open files". FIXME: Check if there's a better fix.
return min(ncpus, 32)
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('\nSlowest %s:' % title)
print(hr)
for name,value in items[-20:]:
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1)))
print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
'*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path', '--sdk', 'macosx'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
| 9,754 | 32.180272 | 83 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/ShUtil.py | from __future__ import absolute_import
import itertools
import lit.util
from lit.ShCommands import Command, Pipeline, Seq
class ShLexer:
def __init__(self, data, win32Escapes = False):
self.data = data
self.pos = 0
self.end = len(data)
self.win32Escapes = win32Escapes
def eat(self):
c = self.data[self.pos]
self.pos += 1
return c
def look(self):
return self.data[self.pos]
def maybe_eat(self, c):
"""
maybe_eat(c) - Consume the character c if it is the next character,
returning True if a character was consumed. """
if self.data[self.pos] == c:
self.pos += 1
return True
return False
def lex_arg_fast(self, c):
# Get the leading whitespace free section.
chunk = self.data[self.pos - 1:].split(None, 1)[0]
# If it has special characters, the fast path failed.
if ('|' in chunk or '&' in chunk or
'<' in chunk or '>' in chunk or
"'" in chunk or '"' in chunk or
';' in chunk or '\\' in chunk):
return None
self.pos = self.pos - 1 + len(chunk)
return chunk
def lex_arg_slow(self, c):
if c in "'\"":
str = self.lex_arg_quoted(c)
else:
str = c
while self.pos != self.end:
c = self.look()
if c.isspace() or c in "|&;":
break
elif c in '><':
# This is an annoying case; we treat '2>' as a single token so
# we don't have to track whitespace tokens.
# If the parse string isn't an integer, do the usual thing.
if not str.isdigit():
break
# Otherwise, lex the operator and convert to a redirection
# token.
num = int(str)
tok = self.lex_one_token()
assert isinstance(tok, tuple) and len(tok) == 1
return (tok[0], num)
elif c == '"':
self.eat()
str += self.lex_arg_quoted('"')
elif c == "'":
self.eat()
str += self.lex_arg_quoted("'")
elif not self.win32Escapes and c == '\\':
# Outside of a string, '\\' escapes everything.
self.eat()
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
str += self.eat()
else:
str += self.eat()
return str
def lex_arg_quoted(self, delim):
str = ''
while self.pos != self.end:
c = self.eat()
if c == delim:
return str
elif c == '\\' and delim == '"':
# Inside a '"' quoted string, '\\' only escapes the quote
# character and backslash, otherwise it is preserved.
if self.pos == self.end:
lit.util.warning(
"escape at end of quoted argument in: %r" % self.data)
return str
c = self.eat()
if c == '"': #
str += '"'
elif c == '\\':
str += '\\'
else:
str += '\\' + c
else:
str += c
lit.util.warning("missing quote character in %r" % self.data)
return str
def lex_arg_checked(self, c):
pos = self.pos
res = self.lex_arg_fast(c)
end = self.pos
self.pos = pos
reference = self.lex_arg_slow(c)
if res is not None:
if res != reference:
raise ValueError("Fast path failure: %r != %r" % (
res, reference))
if self.pos != end:
raise ValueError("Fast path failure: %r != %r" % (
self.pos, end))
return reference
def lex_arg(self, c):
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
def lex_one_token(self):
"""
lex_one_token - Lex a single 'sh' token. """
c = self.eat()
if c == ';':
return (c,)
if c == '|':
if self.maybe_eat('|'):
return ('||',)
return (c,)
if c == '&':
if self.maybe_eat('&'):
return ('&&',)
if self.maybe_eat('>'):
return ('&>',)
return (c,)
if c == '>':
if self.maybe_eat('&'):
return ('>&',)
if self.maybe_eat('>'):
return ('>>',)
return (c,)
if c == '<':
if self.maybe_eat('&'):
return ('<&',)
if self.maybe_eat('>'):
return ('<<',)
return (c,)
return self.lex_arg(c)
def lex(self):
while self.pos != self.end:
if self.look().isspace():
self.eat()
else:
yield self.lex_one_token()
###
class ShParser:
def __init__(self, data, win32Escapes = False, pipefail = False):
self.data = data
self.pipefail = pipefail
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
def lex(self):
for item in self.tokens:
return item
return None
def look(self):
token = self.lex()
if token is not None:
self.tokens = itertools.chain([token], self.tokens)
return token
def parse_command(self):
tok = self.lex()
if not tok:
raise ValueError("empty command!")
if isinstance(tok, tuple):
raise ValueError("syntax error near unexpected token %r" % tok[0])
args = [tok]
redirects = []
while 1:
tok = self.look()
# EOF?
if tok is None:
break
# If this is an argument, just add it to the current command.
if isinstance(tok, str):
args.append(self.lex())
continue
# Otherwise see if it is a terminator.
assert isinstance(tok, tuple)
if tok[0] in ('|',';','&','||','&&'):
break
# Otherwise it must be a redirection.
op = self.lex()
arg = self.lex()
if not arg:
raise ValueError("syntax error near token %r" % op[0])
redirects.append((op, arg))
return Command(args, redirects)
def parse_pipeline(self):
negate = False
commands = [self.parse_command()]
while self.look() == ('|',):
self.lex()
commands.append(self.parse_command())
return Pipeline(commands, negate, self.pipefail)
def parse(self):
lhs = self.parse_pipeline()
while self.look():
operator = self.lex()
assert isinstance(operator, tuple) and len(operator) == 1
if not self.look():
raise ValueError(
"missing argument to operator %r" % operator[0])
# FIXME: Operator precedence!!
lhs = Seq(lhs, operator[0], self.parse_pipeline())
return lhs
| 7,565 | 29.508065 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/__init__.py | """'lit' Testing Tool"""
__author__ = 'Daniel Dunbar'
__email__ = '[email protected]'
__versioninfo__ = (0, 6, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []
from .main import main
| 222 | 19.272727 | 63 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/formats/base.py | from __future__ import absolute_import
import os
import lit.Test
import lit.util
class TestFormat(object):
pass
###
class FileBasedTest(TestFormat):
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if (filename.startswith('.') or
filename in localConfig.excludes):
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
base,ext = os.path.splitext(filename)
if ext in localConfig.suffixes:
yield lit.Test.Test(testSuite, path_in_suite + (filename,),
localConfig)
###
import re
import tempfile
class OneCommandPerFileTest(TestFormat):
# FIXME: Refactor into generic test for running some command on a directory
# of inputs.
def __init__(self, command, dir, recursive=False,
pattern=".*", useTempInput=False):
if isinstance(command, str):
self.command = [command]
else:
self.command = list(command)
if dir is not None:
dir = str(dir)
self.dir = dir
self.recursive = bool(recursive)
self.pattern = re.compile(pattern)
self.useTempInput = useTempInput
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
dir = self.dir
if dir is None:
dir = testSuite.getSourcePath(path_in_suite)
for dirname,subdirs,filenames in os.walk(dir):
if not self.recursive:
subdirs[:] = []
subdirs[:] = [d for d in subdirs
if (d != '.svn' and
d not in localConfig.excludes)]
for filename in filenames:
if (filename.startswith('.') or
not self.pattern.match(filename) or
filename in localConfig.excludes):
continue
path = os.path.join(dirname,filename)
suffix = path[len(dir):]
if suffix.startswith(os.sep):
suffix = suffix[1:]
test = lit.Test.Test(
testSuite, path_in_suite + tuple(suffix.split(os.sep)),
localConfig)
# FIXME: Hack?
test.source_path = path
yield test
def createTempInput(self, tmp, test):
raise NotImplementedError('This is an abstract method.')
def execute(self, test, litConfig):
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, 'Test is unsupported')
cmd = list(self.command)
# If using temp input, create a temporary file and hand it to the
# subclass.
if self.useTempInput:
tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
self.createTempInput(tmp, test)
tmp.flush()
cmd.append(tmp.name)
elif hasattr(test, 'source_path'):
cmd.append(test.source_path)
else:
cmd.append(test.getSourcePath())
out, err, exitCode = lit.util.executeCommand(cmd)
diags = out + err
if not exitCode and not diags.strip():
return lit.Test.PASS,''
# Try to include some useful information.
report = """Command: %s\n""" % ' '.join(["'%s'" % a
for a in cmd])
if self.useTempInput:
report += """Temporary File: %s\n""" % tmp.name
report += "--\n%s--\n""" % open(tmp.name).read()
report += """Output:\n--\n%s--""" % diags
return lit.Test.FAIL, report
| 3,947 | 32.457627 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/formats/shtest.py | from __future__ import absolute_import
import lit.TestRunner
from .base import FileBasedTest
class ShTest(FileBasedTest):
def __init__(self, execute_external = False):
self.execute_external = execute_external
def execute(self, test, litConfig):
return lit.TestRunner.executeShTest(test, litConfig,
self.execute_external)
| 392 | 29.230769 | 66 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/formats/__init__.py | from lit.formats.base import ( # noqa: F401
TestFormat,
FileBasedTest,
OneCommandPerFileTest
)
from lit.formats.googletest import GoogleTest # noqa: F401
from lit.formats.shtest import ShTest # noqa: F401
| 221 | 23.666667 | 59 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lit/lit/formats/googletest.py | from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from .base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleTest(TestFormat):
def __init__(self, test_sub_dir, test_suffix):
self.test_sub_dir = os.path.normcase(str(test_sub_dir)).split(';')
self.test_suffix = str(test_suffix)
# On Windows, assume tests will also end in '.exe'.
if kIsWindows:
self.test_suffix += '.exe'
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
try:
lines = lit.util.capture([path, '--gtest_list_tests'],
env=localConfig.environment)
if kIsWindows:
lines = lines.replace('\r', '')
lines = lines.split('\n')
except Exception as exc:
out = exc.output if isinstance(exc, subprocess.CalledProcessError) else ''
litConfig.warning("unable to discover google-tests in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], out))
raise StopIteration
nested_tests = []
for ln in lines:
# The test name list includes trailing comments beginning with
# a '#' on some lines, so skip those. We don't support test names
# that use escaping to embed '#' into their name as the names come
# from C++ class and method names where such things are hard and
# uninteresting to support.
ln = ln.split('#', 1)[0].rstrip()
if not ln.lstrip():
continue
if 'Running main() from gtest_main.cc' in ln:
# Upstream googletest prints this to stdout prior to running
# tests. LLVM removed that print statement in r61540, but we
# handle it here in case upstream googletest is being used.
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
# Note: path_in_suite should not include the executable name.
def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
litConfig, localConfig):
if not execpath.endswith(self.test_suffix):
return
(dirname, basename) = os.path.split(execpath)
# Discover the tests in this executable.
for testname in self.getGTestTests(execpath, litConfig, localConfig):
testPath = path_in_suite + (basename, testname)
yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
filepath = os.path.join(source_path, filename)
if os.path.isdir(filepath):
# Iterate over executables in a directory.
if not os.path.normcase(filename) in self.test_sub_dir:
continue
dirpath_in_suite = path_in_suite + (filename, )
for subfilename in os.listdir(filepath):
execpath = os.path.join(filepath, subfilename)
for test in self.getTestsInExecutable(
testSuite, dirpath_in_suite, execpath,
litConfig, localConfig):
yield test
elif ('.' in self.test_sub_dir):
for test in self.getTestsInExecutable(
testSuite, path_in_suite, filepath,
litConfig, localConfig):
yield test
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--gtest_filter=' + testName]
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = '[ PASSED ] 1 test.'
if passing_test_line not in out:
msg = ('Unable to find %r in gtest output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS,''
| 5,878 | 39.267123 | 93 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lint/common_lint.py | #!/usr/bin/python
#
# Common lint functions applicable to multiple types of files.
import re
def VerifyLineLength(filename, lines, max_length):
"""Checks to make sure the file has no lines with lines exceeding the length
limit.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
max_length: maximum acceptable line length as number
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
line_num = 1
for line in lines:
length = len(line.rstrip('\n'))
if length > max_length:
lint.append((filename, line_num,
'Line exceeds %d chars (%d)' % (max_length, length)))
line_num += 1
return lint
def VerifyTabs(filename, lines):
"""Checks to make sure the file has no tab characters.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(line_number, msg), ...] with any violations
found.
"""
lint = []
tab_re = re.compile(r'\t')
line_num = 1
for line in lines:
if tab_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Tab found instead of whitespace'))
line_num += 1
return lint
def VerifyTrailingWhitespace(filename, lines):
"""Checks to make sure the file has no lines with trailing whitespace.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
trailing_whitespace_re = re.compile(r'\s+$')
line_num = 1
for line in lines:
if trailing_whitespace_re.match(line.rstrip('\n')):
lint.append((filename, line_num, 'Trailing whitespace'))
line_num += 1
return lint
class BaseLint:
def RunOnFile(filename, lines):
raise Exception('RunOnFile() unimplemented')
def RunLintOverAllFiles(linter, filenames):
"""Runs linter over the contents of all files.
Args:
lint: subclass of BaseLint, implementing RunOnFile()
filenames: list of all files whose contents will be linted
Returns:
A list of tuples with format [(filename, line number, msg), ...] with any
violations found.
"""
lint = []
for filename in filenames:
file = open(filename, 'r')
if not file:
print 'Cound not open %s' % filename
continue
lines = file.readlines()
lint.extend(linter.RunOnFile(filename, lines))
return lint
| 2,589 | 25.428571 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lint/cpp_lint.py | #!/usr/bin/python
#
# Checks C++ files to make sure they conform to LLVM standards, as specified in
# http://llvm.org/docs/CodingStandards.html .
#
# TODO: add unittests for the verifier functions:
# http://docs.python.org/library/unittest.html .
import common_lint
import re
import sys
def VerifyIncludes(filename, lines):
"""Makes sure the #includes are in proper order and no disallows files are
#included.
Args:
filename: the file under consideration as string
lines: contents of the file as string array
"""
lint = []
include_gtest_re = re.compile(r'^#include "gtest/(.*)"')
include_llvm_re = re.compile(r'^#include "llvm/(.*)"')
include_support_re = re.compile(r'^#include "(Support/.*)"')
include_config_re = re.compile(r'^#include "(Config/.*)"')
include_system_re = re.compile(r'^#include <(.*)>')
DISALLOWED_SYSTEM_HEADERS = ['iostream']
line_num = 1
prev_config_header = None
prev_system_header = None
for line in lines:
# TODO: implement private headers
# TODO: implement gtest headers
# TODO: implement top-level llvm/* headers
# TODO: implement llvm/Support/* headers
# Process Config/* headers
config_header = include_config_re.match(line)
if config_header:
curr_config_header = config_header.group(1)
if prev_config_header:
if prev_config_header > curr_config_header:
lint.append((filename, line_num,
'Config headers not in order: "%s" before "%s"' % (
prev_config_header, curr_config_header)))
# Process system headers
system_header = include_system_re.match(line)
if system_header:
curr_system_header = system_header.group(1)
# Is it blacklisted?
if curr_system_header in DISALLOWED_SYSTEM_HEADERS:
lint.append((filename, line_num,
'Disallowed system header: <%s>' % curr_system_header))
elif prev_system_header:
# Make sure system headers are alphabetized amongst themselves
if prev_system_header > curr_system_header:
lint.append((filename, line_num,
'System headers not in order: <%s> before <%s>' % (
prev_system_header, curr_system_header)))
prev_system_header = curr_system_header
line_num += 1
return lint
class CppLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
lint = []
lint.extend(VerifyIncludes(filename, lines))
lint.extend(common_lint.VerifyLineLength(filename, lines,
CppLint.MAX_LINE_LENGTH))
lint.extend(common_lint.VerifyTabs(filename, lines))
lint.extend(common_lint.VerifyTrailingWhitespace(filename, lines))
return lint
def CppLintMain(filenames):
all_lint = common_lint.RunLintOverAllFiles(CppLint(), filenames)
for lint in all_lint:
print '%s:%d:%s' % (lint[0], lint[1], lint[2])
return 0
if __name__ == '__main__':
sys.exit(CppLintMain(sys.argv[1:]))
| 3,031 | 30.915789 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/lint/generic_lint.py | #!/usr/bin/python
#
# Checks files to make sure they conform to LLVM standards which can be applied
# to any programming language: at present, line length and trailing whitespace.
import common_lint
import sys
class GenericCodeLint(common_lint.BaseLint):
MAX_LINE_LENGTH = 80
def RunOnFile(self, filename, lines):
common_lint.VerifyLineLength(filename, lines,
GenericCodeLint.MAX_LINE_LENGTH)
common_lint.VerifyTrailingWhitespace(filename, lines)
def GenericCodeLintMain(filenames):
common_lint.RunLintOverAllFiles(GenericCodeLint(), filenames)
return 0
if __name__ == '__main__':
sys.exit(GenericCodeLintMain(sys.argv[1:]))
| 683 | 26.36 | 79 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/abtest/mark_armfns.py | #!/usr/bin/env python
#
# Mark functions in an arm assembly file. This is done by surrounding the
# function with "# -- Begin Name" and "# -- End Name"
# (This script is designed for arm ios assembly syntax)
import sys
import re
inp = open(sys.argv[1], "r").readlines()
# First pass
linenum = 0
INVALID=-100
last_align = INVALID
last_code = INVALID
last_globl = INVALID
begin = INVALID
begins = dict()
for line in inp:
linenum += 1
if ".align" in line:
last_align = linenum
if ".code" in line:
last_code = linenum
if ".globl" in line:
last_globl = linenum
m = re.search(r'.thumb_func\s+(\w+)', line)
if m:
funcname = m.group(1)
if last_code == last_align+1 and (linenum - last_code) < 4:
begin = last_align
if last_globl+1 == last_align:
begin = last_globl
if line == "\n" and begin != INVALID:
end = linenum
triple = (funcname, begin, end)
begins[begin] = triple
begin = INVALID
# Second pass: Mark
out = open(sys.argv[1], "w")
in_func = None
linenum = 0
for line in inp:
linenum += 1
if in_func is not None and linenum == end:
out.write("# -- End %s\n" % in_func)
in_func = None
triple = begins.get(linenum)
if triple is not None:
in_func, begin, end = triple
out.write("# -- Begin %s\n" % in_func)
out.write(line)
| 1,415 | 24.745455 | 73 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/abtest/abtest.py | #!/usr/bin/env python
#
# Given a previous good compile narrow down miscompiles.
# Expects two directories named "before" and "after" each containing a set of
# assembly or object files where the "after" version is assumed to be broken.
# You also have to provide a script called "link_test". It is called with a list
# of files which should be linked together and result tested. "link_test" should
# returns with exitcode 0 if the linking and testing succeeded.
#
# abtest.py operates by taking all files from the "before" directory and
# in each step replacing one of them with a file from the "bad" directory.
#
# Additionally you can perform the same steps with a single .s file. In this
# mode functions are identified by "# -- Begin FunctionName" and
# "# -- End FunctionName" markers. The abtest.py then takes all functions from
# the file in the "before" directory and replaces one function with the
# corresponding function from the "bad" file in each step.
#
# Example usage to identify miscompiled files:
# 1. Create a link_test script, make it executable. Simple Example:
# clang "$@" -o /tmp/test && /tmp/test || echo "PROBLEM"
# 2. Run the script to figure out which files are miscompiled:
# > ./abtest.py
# somefile.s: ok
# someotherfile.s: skipped: same content
# anotherfile.s: failed: './link_test' exitcode != 0
# ...
# Example usage to identify miscompiled functions inside a file:
# 3. First you have to mark begin and end of the functions.
# The script comes with some examples called mark_xxx.py.
# Unfortunately this is very specific to your environment and it is likely
# that you have to write a custom version for your environment.
# > for i in before/*.s after/*.s; do mark_xxx.py $i; done
# 4. Run the tests on a single file (assuming before/file.s and
# after/file.s exist)
# > ./abtest.py file.s
# funcname1 [0/XX]: ok
# funcname2 [1/XX]: ok
# funcname3 [2/XX]: skipped: same content
# funcname4 [3/XX]: failed: './link_test' exitcode != 0
# ...
from fnmatch import filter
from sys import stderr
import argparse
import filecmp
import os
import subprocess
import sys
LINKTEST="./link_test"
ESCAPE="\033[%sm"
BOLD=ESCAPE % "1"
RED=ESCAPE % "31"
NORMAL=ESCAPE % "0"
FAILED=RED+"failed"+NORMAL
def find(dir, file_filter=None):
files = [walkdir[0]+"/"+file for walkdir in os.walk(dir) for file in walkdir[2]]
if file_filter != None:
files = filter(files, file_filter)
return files
def error(message):
stderr.write("Error: %s\n" % (message,))
def warn(message):
stderr.write("Warning: %s\n" % (message,))
def extract_functions(file):
functions = []
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
text = line
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
else:
text += line
functions.append( (in_function, text) )
in_function = None
elif in_function != None:
text += line
return functions
def replace_function(file, function, replacement, dest):
out = open(dest, "w")
skip = False
found = False
in_function = None
for line in open(file):
if line.startswith("# -- Begin "):
if in_function != None:
warn("Missing end of function %s" % (in_function,))
funcname = line[12:-1]
in_function = funcname
if in_function == function:
out.write(replacement)
skip = True
elif line.startswith("# -- End "):
function_name = line[10:-1]
if in_function != function_name:
warn("End %s does not match begin %s" % (function_name, in_function))
in_function = None
if skip:
skip = False
continue
if not skip:
out.write(line)
def announce_test(name):
stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
stderr.flush()
def announce_result(result, info):
stderr.write(result)
if info != "":
stderr.write(": %s" % info)
stderr.write("\n")
stderr.flush()
def testrun(files):
linkline="%s %s" % (LINKTEST, " ".join(files),)
res = subprocess.call(linkline, shell=True)
if res != 0:
announce_result(FAILED, "'%s' exitcode != 0" % LINKTEST)
return False
else:
announce_result("ok", "")
return True
def check_files():
"""Check files mode"""
for i in range(0, len(NO_PREFIX)):
f = NO_PREFIX[i]
b=baddir+"/"+f
if b not in BAD_FILES:
warn("There is no corresponding file to '%s' in %s" \
% (gooddir+"/"+f, baddir))
continue
announce_test(f + " [%s/%s]" % (i+1, len(NO_PREFIX)))
# combine files (everything from good except f)
testfiles=[]
skip=False
for c in NO_PREFIX:
badfile = baddir+"/"+c
goodfile = gooddir+"/"+c
if c == f:
testfiles.append(badfile)
if filecmp.cmp(goodfile, badfile):
announce_result("skipped", "same content")
skip = True
break
else:
testfiles.append(goodfile)
if skip:
continue
testrun(testfiles)
def check_functions_in_file(base, goodfile, badfile):
functions = extract_functions(goodfile)
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (goodfile,))
return
badfunctions = dict(extract_functions(badfile))
if len(functions) == 0:
warn("Couldn't find any function in %s, missing annotations?" % (badfile,))
return
COMBINED="/tmp/combined.s"
i = 0
for (func,func_text) in functions:
announce_test(func + " [%s/%s]" % (i+1, len(functions)))
i+=1
if func not in badfunctions:
warn("Function '%s' missing from bad file" % func)
continue
if badfunctions[func] == func_text:
announce_result("skipped", "same content")
continue
replace_function(goodfile, func, badfunctions[func], COMBINED)
testfiles=[]
for c in NO_PREFIX:
if c == base:
testfiles.append(COMBINED)
continue
testfiles.append(gooddir + "/" + c)
testrun(testfiles)
parser = argparse.ArgumentParser()
parser.add_argument('--a', dest='dir_a', default='before')
parser.add_argument('--b', dest='dir_b', default='after')
parser.add_argument('--insane', help='Skip sanity check', action='store_true')
parser.add_argument('file', metavar='file', nargs='?')
config = parser.parse_args()
gooddir=config.dir_a
baddir=config.dir_b
BAD_FILES=find(baddir, "*")
GOOD_FILES=find(gooddir, "*")
NO_PREFIX=sorted([x[len(gooddir)+1:] for x in GOOD_FILES])
# "Checking whether build environment is sane ..."
if not config.insane:
announce_test("sanity check")
if not os.access(LINKTEST, os.X_OK):
error("Expect '%s' to be present and executable" % (LINKTEST,))
exit(1)
res = testrun(GOOD_FILES)
if not res:
# "build environment is grinning and holding a spatula. Guess not."
linkline="%s %s" % (LINKTEST, " ".join(GOOD_FILES),)
stderr.write("\n%s\n\n" % linkline)
stderr.write("Returned with exitcode != 0\n")
sys.exit(1)
if config.file is not None:
# File exchange mode
goodfile = gooddir+"/"+config.file
badfile = baddir+"/"+config.file
check_functions_in_file(config.file, goodfile, badfile)
else:
# Function exchange mode
check_files()
| 8,132 | 33.608511 | 85 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/abtest/mark_aarch64fns.py | #!/usr/bin/env python
#
# Mark functions in an arm assembly file. This is done by surrounding the
# function with "# -- Begin Name" and "# -- End Name"
# (This script is designed for aarch64 ios assembly syntax)
import sys
import re
inp = open(sys.argv[1], "r").readlines()
# First pass
linenum = 0
INVALID=-100
last_align = INVALID
last_code = INVALID
last_globl = INVALID
last_globl_name = None
begin = INVALID
in_text_section = False
begins = dict()
for line in inp:
linenum += 1
if re.search(r'.section\s+__TEXT,__text,regular,pure_instructions', line):
in_text_section = True
continue
elif ".section" in line:
in_text_section = False
continue
if not in_text_section:
continue
if ".align" in line:
last_align = linenum
gl = re.search(r'.globl\s+(\w+)', line)
if gl:
last_globl_name = gl.group(1)
last_globl = linenum
m = re.search(r'^(\w+):', line)
if m and begin == INVALID:
labelname = m.group(1)
if last_globl+2 == linenum and last_globl_name == labelname:
begin = last_globl
funcname = labelname
if line == "\n" and begin != INVALID:
end = linenum
triple = (funcname, begin, end)
begins[begin] = triple
begin = INVALID
# Second pass: Mark
out = open(sys.argv[1], "w")
in_func = None
linenum = 0
for line in inp:
linenum += 1
if in_func is not None and linenum == end:
out.write("# -- End %s\n" % in_func)
in_func = None
triple = begins.get(linenum)
if triple is not None:
in_func, begin, end = triple
out.write("# -- Begin %s\n" % in_func)
out.write(line)
| 1,700 | 24.772727 | 78 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/release/findRegressions-nightly.py | #!/usr/bin/env python
import re, string, sys, os, time
DEBUG = 0
testDirName = 'llvm-test'
test = ['compile', 'llc', 'jit', 'cbe']
exectime = ['llc-time', 'jit-time', 'cbe-time',]
comptime = ['llc', 'jit-comptime', 'compile']
(tp, exp) = ('compileTime_', 'executeTime_')
def parse(file):
f=open(file, 'r')
d = f.read()
#Cleanup weird stuff
d = re.sub(r',\d+:\d','', d)
r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
test = {}
fname = ''
for t in r:
if DEBUG:
print t
if t[0] == 'PASS' or t[0] == 'FAIL' :
tmp = t[2].split(testDirName)
if DEBUG:
print tmp
if len(tmp) == 2:
fname = tmp[1].strip('\r\n')
else:
fname = tmp[0].strip('\r\n')
if not test.has_key(fname) :
test[fname] = {}
for k in test:
test[fname][k] = 'NA'
test[fname][t[1]] = t[0]
if DEBUG:
print test[fname][t[1]]
else :
try:
n = t[0].split('RESULT-')[1]
if DEBUG:
print n;
if n == 'llc' or n == 'jit-comptime' or n == 'compile':
test[fname][tp + n] = float(t[2].split(' ')[2])
if DEBUG:
print test[fname][tp + n]
elif n.endswith('-time') :
test[fname][exp + n] = float(t[2].strip('\r\n'))
if DEBUG:
print test[fname][exp + n]
else :
print "ERROR!"
sys.exit(1)
except:
continue
return test
# Diff results and look for regressions.
def diffResults(d_old, d_new):
for t in sorted(d_old.keys()) :
if DEBUG:
print t
if d_new.has_key(t) :
# Check if the test passed or failed.
for x in test:
if d_old[t].has_key(x):
if d_new[t].has_key(x):
if d_old[t][x] == 'PASS':
if d_new[t][x] != 'PASS':
print t + " *** REGRESSION (" + x + ")\n"
else:
if d_new[t][x] == 'PASS':
print t + " * NEW PASS (" + x + ")\n"
else :
print t + "*** REGRESSION (" + x + ")\n"
# For execution time, if there is no result, its a fail.
for x in exectime:
if d_old[t].has_key(tp + x):
if not d_new[t].has_key(tp + x):
print t + " *** REGRESSION (" + tp + x + ")\n"
else :
if d_new[t].has_key(tp + x):
print t + " * NEW PASS (" + tp + x + ")\n"
for x in comptime:
if d_old[t].has_key(exp + x):
if not d_new[t].has_key(exp + x):
print t + " *** REGRESSION (" + exp + x + ")\n"
else :
if d_new[t].has_key(exp + x):
print t + " * NEW PASS (" + exp + x + ")\n"
else :
print t + ": Removed from test-suite.\n"
#Main
if len(sys.argv) < 3 :
print 'Usage:', sys.argv[0], \
'<old log> <new log>'
sys.exit(-1)
d_old = parse(sys.argv[1])
d_new = parse(sys.argv[2])
diffResults(d_old, d_new)
| 3,193 | 23.381679 | 72 | py |
LowFat | LowFat-master/llvm-4.0.0.src/utils/release/findRegressions-simple.py | #!/usr/bin/env python
import re, string, sys, os, time, math
DEBUG = 0
(tp, exp) = ('compile', 'exec')
def parse(file):
f = open(file, 'r')
d = f.read()
# Cleanup weird stuff
d = re.sub(r',\d+:\d', '', d)
r = re.findall(r'TEST-(PASS|FAIL|RESULT.*?):\s+(.*?)\s+(.*?)\r*\n', d)
test = {}
fname = ''
for t in r:
if DEBUG:
print t
if t[0] == 'PASS' or t[0] == 'FAIL' :
tmp = t[2].split('llvm-test/')
if DEBUG:
print tmp
if len(tmp) == 2:
fname = tmp[1].strip('\r\n')
else:
fname = tmp[0].strip('\r\n')
if not test.has_key(fname):
test[fname] = {}
test[fname][t[1] + ' state'] = t[0]
test[fname][t[1] + ' time'] = float('nan')
else :
try:
n = t[0].split('RESULT-')[1]
if DEBUG:
print "n == ", n;
if n == 'compile-success':
test[fname]['compile time'] = float(t[2].split('program')[1].strip('\r\n'))
elif n == 'exec-success':
test[fname]['exec time'] = float(t[2].split('program')[1].strip('\r\n'))
if DEBUG:
print test[fname][string.replace(n, '-success', '')]
else :
# print "ERROR!"
sys.exit(1)
except:
continue
return test
# Diff results and look for regressions.
def diffResults(d_old, d_new):
regressions = {}
passes = {}
removed = ''
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
regressions[x] = ''
passes[x] = ''
for t in sorted(d_old.keys()) :
if d_new.has_key(t):
# Check if the test passed or failed.
for x in ['compile state', 'compile time', 'exec state', 'exec time']:
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
if d_old[t].has_key(x):
if d_new[t].has_key(x):
if d_old[t][x] == 'PASS':
if d_new[t][x] != 'PASS':
regressions[x] += t + "\n"
else:
if d_new[t][x] == 'PASS':
passes[x] += t + "\n"
else :
regressions[x] += t + "\n"
if x == 'compile state' or x == 'exec state':
continue
# For execution time, if there is no result it's a fail.
if not d_old[t].has_key(x) and not d_new[t].has_key(x):
continue
elif not d_new[t].has_key(x):
regressions[x] += t + "\n"
elif not d_old[t].has_key(x):
passes[x] += t + "\n"
if math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
continue
elif math.isnan(d_old[t][x]) and not math.isnan(d_new[t][x]):
passes[x] += t + "\n"
elif not math.isnan(d_old[t][x]) and math.isnan(d_new[t][x]):
regressions[x] += t + ": NaN%\n"
if d_new[t][x] > d_old[t][x] and d_old[t][x] > 0.0 and \
(d_new[t][x] - d_old[t][x]) / d_old[t][x] > .05:
regressions[x] += t + ": " + "{0:.1f}".format(100 * (d_new[t][x] - d_old[t][x]) / d_old[t][x]) + "%\n"
else :
removed += t + "\n"
if len(regressions['compile state']) != 0:
print 'REGRESSION: Compilation Failed'
print regressions['compile state']
if len(regressions['exec state']) != 0:
print 'REGRESSION: Execution Failed'
print regressions['exec state']
if len(regressions['compile time']) != 0:
print 'REGRESSION: Compilation Time'
print regressions['compile time']
if len(regressions['exec time']) != 0:
print 'REGRESSION: Execution Time'
print regressions['exec time']
if len(passes['compile state']) != 0:
print 'NEW PASSES: Compilation'
print passes['compile state']
if len(passes['exec state']) != 0:
print 'NEW PASSES: Execution'
print passes['exec state']
if len(removed) != 0:
print 'REMOVED TESTS'
print removed
# Main
if len(sys.argv) < 3 :
print 'Usage:', sys.argv[0], '<old log> <new log>'
sys.exit(-1)
d_old = parse(sys.argv[1])
d_new = parse(sys.argv[2])
diffResults(d_old, d_new)
| 4,045 | 24.446541 | 112 | py |
DMASTE | DMASTE-main/Span-ASTE/train.py | import sys
sys.path.append('aste')
from wrapper import SpanModel
import argparse
import os
def main():
parser = argparse.ArgumentParser(description='Bidirectional MRC-based sentiment triplet extraction')
parser.add_argument('--data_dir', type=str, default="../dataset/")
parser.add_argument('--log_dir', type=str, default="./log/")
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--device', type=int)
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"])
parser.add_argument('--model_dir', type=str, default="./model/")
parser.add_argument('--model_name', type=str, default="1")
args = parser.parse_args()
if args.mode == 'train':
os.makedirs(os.path.join(args.model_dir, args.model_name), exist_ok=True)
source = os.path.join(args.data_dir, args.source)
model = SpanModel(save_dir=os.path.join(args.model_dir, args.model_name), random_seed=int(args.model_name))#, path_config_base=f"training_config/config{args.device}.jsonnet")
model.fit(f'{source}/train.txt', f'{source}/dev.txt', random_seed=int(args.model_name))
else:
os.makedirs(f'{args.log_dir}/{args.model_name}', exist_ok=True)
model = SpanModel(save_dir=os.path.join(args.model_dir, args.model_name), random_seed=int(args.model_name))#, path_config_base=f"training_config/config{args.device}.jsonnet")
log_dir = args.log_dir
pred_file = f'{log_dir}/{args.model_name}/pred.txt'
target = os.path.join(args.data_dir, args.target)
model.predict(f'{target}/test.txt', pred_file, device=args.device)
results = model.score(f'{target}/test.txt', pred_file)
with open(f'{log_dir}/{args.model_name}/metric.txt', 'w') as f:
f.write(str(results) + '\n')
main() | 1,857 | 53.647059 | 182 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/Span-ASTE/span_model/training/ner_metrics.py | from overrides import overrides
from typing import Optional
import torch
from allennlp.training.metrics.metric import Metric
from span_model.training.f1 import compute_f1
# TODO: Need to use the decoded predictions so that we catch the gold examples longer than
# the span boundary.
class NERMetrics(Metric):
"""
Computes precision, recall, and micro-averaged F1 from a list of predicted and gold labels.
"""
def __init__(self, number_of_classes: int, none_label: int = 0):
self.number_of_classes = number_of_classes
self.none_label = none_label
self.reset()
@overrides
def __call__(
self,
predictions: torch.Tensor,
gold_labels: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
predictions = predictions.cpu()
gold_labels = gold_labels.cpu()
mask = mask.cpu()
for i in range(self.number_of_classes):
if i == self.none_label:
continue
self._true_positives += (
((predictions == i) * (gold_labels == i) * mask.bool()).sum().item()
)
self._false_positives += (
((predictions == i) * (gold_labels != i) * mask.bool()).sum().item()
)
self._true_negatives += (
((predictions != i) * (gold_labels != i) * mask.bool()).sum().item()
)
self._false_negatives += (
((predictions != i) * (gold_labels == i) * mask.bool()).sum().item()
)
@overrides
def get_metric(self, reset=False):
"""
Returns
-------
A tuple of the following metrics based on the accumulated count statistics:
precision : float
recall : float
f1-measure : float
"""
predicted = self._true_positives + self._false_positives
gold = self._true_positives + self._false_negatives
matched = self._true_positives
precision, recall, f1_measure = compute_f1(predicted, gold, matched)
# Reset counts if at end of epoch.
if reset:
self.reset()
return precision, recall, f1_measure
@overrides
def reset(self):
self._true_positives = 0
self._false_positives = 0
self._true_negatives = 0
self._false_negatives = 0
| 2,358 | 29.636364 | 95 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/training/f1.py | """
Function to compute F1 scores.
"""
def safe_div(num, denom):
if denom > 0:
return num / denom
else:
return 0
def compute_f1(predicted, gold, matched):
precision = safe_div(matched, predicted)
recall = safe_div(matched, gold)
f1 = safe_div(2 * precision * recall, precision + recall)
return precision, recall, f1
| 360 | 19.055556 | 61 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/training/relation_metrics.py | from overrides import overrides
from allennlp.training.metrics.metric import Metric
from span_model.training.f1 import compute_f1
class RelationMetrics(Metric):
"""
Computes precision, recall, and micro-averaged F1 from a list of predicted and gold spans.
"""
def __init__(self):
self.reset()
# TODO: This requires decoding because the dataset reader gets rid of gold spans wider
# than the span width. So, I can't just compare the tensor of gold labels to the tensor of
# predicted labels.
@overrides
def __call__(self, predicted_relation_list, metadata_list):
for predicted_relations, metadata in zip(
predicted_relation_list, metadata_list
):
gold_relations = metadata.relation_dict
self._total_gold += len(gold_relations)
self._total_predicted += len(predicted_relations)
for (span_1, span_2), label in predicted_relations.items():
ix = (span_1, span_2)
if ix in gold_relations and gold_relations[ix] == label:
self._total_matched += 1
@overrides
def get_metric(self, reset=False):
precision, recall, f1 = compute_f1(
self._total_predicted, self._total_gold, self._total_matched
)
# Reset counts if at end of epoch.
if reset:
self.reset()
return precision, recall, f1
@overrides
def reset(self):
self._total_gold = 0
self._total_predicted = 0
self._total_matched = 0
class SpanPairMetrics(RelationMetrics):
@overrides
def __call__(self, predicted_relation_list, metadata_list):
for predicted_relations, metadata in zip(
predicted_relation_list, metadata_list
):
gold_relations = metadata.relation_dict
self._total_gold += len(gold_relations)
self._total_predicted += len(predicted_relations)
for (span_1, span_2), label in predicted_relations.items():
ix = (span_1, span_2)
if ix in gold_relations:
self._total_matched += 1
| 2,156 | 32.703125 | 94 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/ner.py | import logging
from typing import Any, Dict, List, Optional, Callable
import torch
from torch.nn import functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import TimeDistributed
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from span_model.models.shared import FocalLoss, BiAffineSingleInput
from span_model.training.ner_metrics import NERMetrics
from span_model.data.dataset_readers import document
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class NERTagger(Model):
"""
Named entity recognition module
Parameters
----------
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
make_feedforward: Callable,
span_emb_dim: int,
regularizer: Optional[RegularizerApplicator] = None,
use_bi_affine: bool = False,
neg_class_weight: float = -1,
use_focal_loss: bool = False,
focal_loss_gamma: int = 2,
use_double_scorer: bool = False,
use_gold_for_train_prune_scores: bool = False,
use_single_pool: bool = False,
name: str = "ner_labels"
) -> None:
super(NERTagger, self).__init__(vocab, regularizer)
self.use_single_pool = use_single_pool
self.use_gold_for_train_prune_scores = use_gold_for_train_prune_scores
self.use_double_scorer = use_double_scorer
self.use_bi_affine = use_bi_affine
self._name = name
self._namespaces = [
entry for entry in vocab.get_namespaces() if self._name in entry
]
# Number of classes determine the output dimension of the final layer
self._n_labels = {name: vocab.get_vocab_size(name) for name in self._namespaces}
if self.use_single_pool:
for n in self._namespaces:
self._n_labels[n] -= 1
# Null label is needed to keep track of when calculating the metrics
for namespace in self._namespaces:
null_label = vocab.get_token_index("", namespace)
assert (
null_label == 0
) # If not, the dummy class won't correspond to the null label.
# The output dim is 1 less than the number of labels because we don't score the null label;
# we just give it a score of 0 by default.
# Create a separate scorer and metric for each dataset we're dealing with.
self._ner_scorers = torch.nn.ModuleDict()
self._ner_metrics = {}
for namespace in self._namespaces:
self._ner_scorers[namespace] = self.make_scorer(
make_feedforward, span_emb_dim, self._n_labels[namespace])
if self.use_double_scorer:
self._ner_scorers[namespace] = None # noqa
self._ner_scorers["opinion"] = self.make_scorer(make_feedforward, span_emb_dim, 2)
self._ner_scorers["target"] = self.make_scorer(make_feedforward, span_emb_dim, 2)
self._ner_metrics[namespace] = NERMetrics(
self._n_labels[namespace], null_label
)
self.i_opinion = vocab.get_token_index("OPINION", namespace)
self.i_target = vocab.get_token_index("TARGET", namespace)
if self.use_single_pool:
self.i_opinion = self.i_target = 1
self._active_namespace = None
self._loss = torch.nn.CrossEntropyLoss(reduction="sum")
if neg_class_weight != -1:
assert len(self._namespaces) == 1
num_pos_classes = self._n_labels[self._namespaces[0]] - 1
pos_weight = (1 - neg_class_weight) / num_pos_classes
weight = [neg_class_weight] + [pos_weight] * num_pos_classes
print(dict(ner_class_weight=weight))
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", weight=torch.tensor(weight))
if use_focal_loss:
assert neg_class_weight != -1
self._loss = FocalLoss(
reduction="sum", weight=torch.tensor(weight), gamma=focal_loss_gamma)
print(dict(ner_loss_fn=self._loss))
def make_scorer(self, make_feedforward, span_emb_dim, n_labels):
mention_feedforward = make_feedforward(input_dim=span_emb_dim)
scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(
torch.nn.Linear(
mention_feedforward.get_output_dim(),
n_labels
)
),
)
if self.use_bi_affine:
scorer = BiAffineSingleInput(
input_size=span_emb_dim // 2,
project_size=200,
output_size=n_labels,
)
return scorer
@overrides
def forward(
self, # type: ignore
spans: torch.IntTensor,
span_mask: torch.IntTensor,
span_embeddings: torch.IntTensor,
sentence_lengths: torch.Tensor,
ner_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
TODO: Write documentation.
"""
# Shape: (Batch size, Number of Spans, Span Embedding Size)
# span_embeddings
self._active_namespace = f"{metadata.dataset}__{self._name}"
if self.use_double_scorer:
opinion_scores = self._ner_scorers["opinion"](span_embeddings)
target_scores = self._ner_scorers["target"](span_embeddings)
null_scores = torch.stack([opinion_scores[..., 0], target_scores[..., 0]], dim=-1).mean(dim=-1, keepdim=True)
pool = [null_scores, None, None]
pool[self.i_opinion] = opinion_scores[..., [1]]
pool[self.i_target] = target_scores[..., [1]]
ner_scores = torch.cat(pool, dim=-1)
else:
scorer = self._ner_scorers[self._active_namespace]
ner_scores = scorer(span_embeddings)
# Give large positive scores to "null" class in masked-out elements
ner_scores[..., 0] = util.replace_masked_values(ner_scores[..., 0], span_mask.bool(), 1e20)
_, predicted_ner = ner_scores.max(2)
predictions = self.predict(
ner_scores.detach().cpu(),
spans.detach().cpu(),
span_mask.detach().cpu(),
metadata,
)
output_dict = {"predictions": predictions}
# New
output_dict.update(ner_scores=ner_scores)
output_dict.update(opinion_scores=ner_scores.softmax(dim=-1)[..., [self.i_opinion]])
output_dict.update(target_scores=ner_scores.softmax(dim=-1)[..., [self.i_target]])
if ner_labels is not None:
if self.use_single_pool:
ner_labels = torch.ne(ner_labels, 0.0).long()
if self.use_gold_for_train_prune_scores:
for name, i in dict(opinion_scores=self.i_opinion, target_scores=self.i_target).items():
mask = ner_labels.eq(i).unsqueeze(dim=-1)
assert mask.shape == output_dict[name].shape
output_dict[name] = output_dict[name].masked_fill(mask, 1e20)
metrics = self._ner_metrics[self._active_namespace]
metrics(predicted_ner, ner_labels, span_mask)
ner_scores_flat = ner_scores.view(
-1, self._n_labels[self._active_namespace]
)
ner_labels_flat = ner_labels.view(-1)
mask_flat = span_mask.view(-1).bool()
loss = self._loss(ner_scores_flat[mask_flat], ner_labels_flat[mask_flat])
output_dict["loss"] = loss
return output_dict
def predict(self, ner_scores, spans, span_mask, metadata):
# TODO: Make sure the iteration works in documents with a single sentence.
# Zipping up and iterating iterates over the zeroth dimension of each tensor; this
# corresponds to iterating over sentences.
predictions = []
zipped = zip(ner_scores, spans, span_mask, metadata)
for ner_scores_sent, spans_sent, span_mask_sent, sentence in zipped:
predicted_scores_raw, predicted_labels = ner_scores_sent.max(dim=1)
softmax_scores = F.softmax(ner_scores_sent, dim=1)
predicted_scores_softmax, _ = softmax_scores.max(dim=1)
ix = (predicted_labels != 0) & span_mask_sent.bool()
predictions_sent = []
zip_pred = zip(
predicted_labels[ix],
predicted_scores_raw[ix],
predicted_scores_softmax[ix],
spans_sent[ix],
)
for label, label_score_raw, label_score_softmax, label_span in zip_pred:
label_str = self.vocab.get_token_from_index(
label.item(), self._active_namespace
)
span_start, span_end = label_span.tolist()
ner = [
span_start,
span_end,
label_str,
label_score_raw.item(),
label_score_softmax.item(),
]
prediction = document.PredictedNER(ner, sentence, sentence_offsets=True)
predictions_sent.append(prediction)
predictions.append(predictions_sent)
return predictions
# TODO: This code is repeated elsewhere. Refactor.
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"Loop over the metrics for all namespaces, and return as dict."
res = {}
for namespace, metrics in self._ner_metrics.items():
precision, recall, f1 = metrics.get_metric(reset)
prefix = namespace.replace("_labels", "")
to_update = {
f"{prefix}_precision": precision,
f"{prefix}_recall": recall,
f"{prefix}_f1": f1,
}
res.update(to_update)
res_avg = {}
for name in ["precision", "recall", "f1"]:
values = [res[key] for key in res if name in key]
res_avg[f"MEAN__{self._name.replace('_labels', '')}_{name}"] = sum(values) / len(values) if values else 0
res.update(res_avg)
return res
| 10,868 | 39.707865 | 121 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/embedder.py | from typing import Optional, Tuple
from overrides import overrides
import torch
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder, TokenEmbedder
from allennlp.nn import util
from allennlp.modules.scalar_mix import ScalarMix
@TokenEmbedder.register("double_mix_ptm")
class DoubleMixPTMEmbedder(TokenEmbedder):
# Refer: PretrainedTransformerMismatchedEmbedder
"""
Use this embedder to embed wordpieces given by `PretrainedTransformerMismatchedIndexer`
and to pool the resulting vectors to get word-level representations.
Registered as a `TokenEmbedder` with name "pretrained_transformer_mismatchd".
# Parameters
model_name : `str`
The name of the `transformers` model to use. Should be the same as the corresponding
`PretrainedTransformerMismatchedIndexer`.
max_length : `int`, optional (default = `None`)
If positive, folds input token IDs into multiple segments of this length, pass them
through the transformer model independently, and concatenate the final representations.
Should be set to the same value as the `max_length` option on the
`PretrainedTransformerMismatchedIndexer`.
train_parameters: `bool`, optional (default = `True`)
If this is `True`, the transformer weights get updated during training.
last_layer_only: `bool`, optional (default = `True`)
When `True` (the default), only the final layer of the pretrained transformer is taken
for the embeddings. But if set to `False`, a scalar mix of all of the layers
is used.
gradient_checkpointing: `bool`, optional (default = `None`)
Enable or disable gradient checkpointing.
"""
def __init__(
self,
model_name: str,
max_length: int = None,
train_parameters: bool = True,
last_layer_only: bool = True,
gradient_checkpointing: Optional[bool] = None,
) -> None:
super().__init__()
# The matched version v.s. mismatched
self._matched_embedder = PretrainedTransformerEmbedder(
model_name,
max_length=max_length,
train_parameters=train_parameters,
last_layer_only=last_layer_only,
gradient_checkpointing=gradient_checkpointing,
)
self._matched_embedder.config.output_hidden_states = True
num_layers = self._matched_embedder.config.num_hidden_layers
mix_init = [float(i) for i in range(num_layers)] # Try to give useful prior, after softmax will be [..., 0.08, 0.23, 0.63]
self._mixer_a = ScalarMix(num_layers, initial_scalar_parameters=mix_init)
self._mixer_b = ScalarMix(num_layers, initial_scalar_parameters=mix_init)
self._matched_embedder.transformer_model.forward = self.make_fn_transformer(
self._matched_embedder.transformer_model.forward
)
# This method doesn't work, gradient doesn't propagate properly
# self.embeds_b = None # Bonus output because TokenEmbedder should produce single Tensor output
@classmethod
def make_fn_transformer(cls, fn):
def new_fn(*args, **kwargs):
transformer_output: tuple = fn(*args, **kwargs)
# As far as I can tell, the hidden states will always be the last element
# in the output tuple as long as the model is not also configured to return
# attention scores.
# See, for example, the return value description for BERT:
# https://huggingface.co/transformers/model_doc/bert.html#transformers.BertModel.forward
# These hidden states will also include the embedding layer, which we don't
# include in the scalar mix. Hence the `[1:]` slicing.
hidden_states = transformer_output[-1][1:]
# By default, PTM will return transformer_output[0] so we force the one we want in front
return (hidden_states,) + transformer_output
return new_fn
@overrides
def get_output_dim(self):
return self._matched_embedder.get_output_dim()
@staticmethod
def run_match(embeddings, offsets):
# span_embeddings: (batch_size, num_orig_tokens, max_span_length, embedding_size)
# span_mask: (batch_size, num_orig_tokens, max_span_length)
span_embeddings, span_mask = util.batched_span_select(embeddings.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_embeddings *= span_mask # zero out paddings
span_embeddings_sum = span_embeddings.sum(2)
span_embeddings_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
orig_embeddings = span_embeddings_sum / torch.clamp_min(span_embeddings_len, 1)
# All the places where the span length is zero, write in zeros.
orig_embeddings[(span_embeddings_len == 0).expand(orig_embeddings.shape)] = 0
return orig_embeddings
@overrides
def forward(
self,
token_ids: torch.LongTensor,
mask: torch.BoolTensor,
offsets: torch.LongTensor,
wordpiece_mask: torch.BoolTensor,
type_ids: Optional[torch.LongTensor] = None,
segment_concat_mask: Optional[torch.BoolTensor] = None,
) -> torch.Tensor: # type: ignore
"""
# Parameters
token_ids: `torch.LongTensor`
Shape: [batch_size, num_wordpieces] (for exception see `PretrainedTransformerEmbedder`).
mask: `torch.BoolTensor`
Shape: [batch_size, num_orig_tokens].
offsets: `torch.LongTensor`
Shape: [batch_size, num_orig_tokens, 2].
Maps indices for the original tokens, i.e. those given as input to the indexer,
to a span in token_ids. `token_ids[i][offsets[i][j][0]:offsets[i][j][1] + 1]`
corresponds to the original j-th token from the i-th batch.
wordpiece_mask: `torch.BoolTensor`
Shape: [batch_size, num_wordpieces].
type_ids: `Optional[torch.LongTensor]`
Shape: [batch_size, num_wordpieces].
segment_concat_mask: `Optional[torch.BoolTensor]`
See `PretrainedTransformerEmbedder`.
# Returns
`torch.Tensor`
Shape: [batch_size, num_orig_tokens, embedding_size].
"""
hidden_states = self._matched_embedder( # noqa
token_ids, wordpiece_mask, type_ids=type_ids, segment_concat_mask=segment_concat_mask
)
assert type(hidden_states) in {tuple, list}
embeds_a = self.run_match(self._mixer_a(hidden_states), offsets)
embeds_b = self.run_match(self._mixer_b(hidden_states), offsets)
x = torch.cat([embeds_a, embeds_b], dim=-1)
return x
# self.embeds_b = embeds_b
# return embeds_a
def split_outputs(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Output has to be single tensor to suit forward signature but we need to split
output_dim = self.get_output_dim()
bs, seq_len, hidden_size = x.shape
assert hidden_size == output_dim * 2
return x[:, :, :output_dim], x[:, :, output_dim:]
| 7,218 | 44.689873 | 131 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/relation_proper.py | import logging
from typing import Any, Dict, List, Optional, Callable
import torch
import torch.nn.functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.nn import util, RegularizerApplicator
from allennlp.modules import TimeDistributed
from span_model.models.shared import BiAffine, SpanLengthCrossEntropy, BagPairScorer, BiAffineV2
from span_model.training.relation_metrics import RelationMetrics
from span_model.models.entity_beam_pruner import Pruner, TwoScorePruner
from span_model.data.dataset_readers import document
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
import json
from pydantic import BaseModel
class PruneOutput(BaseModel):
class Config:
arbitrary_types_allowed = True
span_embeddings: torch.Tensor
span_mention_scores: torch.Tensor
num_spans_to_keep: torch.Tensor
span_mask: torch.Tensor
span_indices: torch.Tensor
spans: torch.Tensor
def analyze_info(info: dict):
for k, v in info.items():
if isinstance(v, torch.Size):
v = tuple(v)
info[k] = str(v)
logging.info(json.dumps(info, indent=2))
class DistanceEmbedder(torch.nn.Module):
def __init__(self, dim=128, vocab_size=10):
super().__init__()
self.vocab_size = vocab_size
self.dim = dim
self.embedder = torch.nn.Embedding(self.vocab_size, self.dim)
def to_distance_buckets(self, spans_a: torch.Tensor, spans_b: torch.Tensor) -> torch.Tensor:
bs, num_a, dim = spans_a.shape
bs, num_b, dim = spans_b.shape
assert dim == 2
spans_a = spans_a.view(bs, num_a, 1, dim)
spans_b = spans_b.view(bs, 1, num_b, dim)
d_ab = torch.abs(spans_b[..., 0] - spans_a[..., 1])
d_ba = torch.abs(spans_a[..., 0] - spans_b[..., 1])
distances = torch.minimum(d_ab, d_ba)
# pos_a = spans_a.float().mean(dim=-1).unsqueeze(dim=-1) # bs, num_spans, 1
# pos_b = spans_b.float().mean(dim=-1).unsqueeze(dim=-2) # bs, 1, num_spans
# distances = torch.abs(pos_a - pos_b)
x = util.bucket_values(distances, num_total_buckets=self.vocab_size)
# [0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+]
x = x.long()
assert x.shape == (bs, num_a, num_b)
return x
def forward(self, spans_a: torch.Tensor, spans_b: torch.Tensor) -> torch.Tensor:
buckets = self.to_distance_buckets(spans_a, spans_b)
x = self.embedder(buckets) # bs, num_spans, num_spans, dim
return x
def global_max_pool1d(x: torch.Tensor) -> torch.Tensor:
bs, seq_len, features = x.shape
x = x.transpose(-1, -2)
x = torch.nn.functional.adaptive_max_pool1d(x, output_size=1, return_indices=False)
x = x.transpose(-1, -2)
x = x.squeeze(dim=1)
assert tuple(x.shape) == (bs, features)
return x
def test_pool():
x = torch.zeros(3, 100, 32)
y = global_max_pool1d(x)
print(dict(x=x.shape, y=y.shape))
class ProperRelationExtractor(Model):
def __init__(
self,
vocab: Vocabulary,
make_feedforward: Callable,
span_emb_dim: int,
feature_size: int,
spans_per_word: float,
positive_label_weight: float = 1.0,
regularizer: Optional[RegularizerApplicator] = None,
use_distance_embeds: bool = False,
use_pair_feature_maxpool: bool = False,
use_pair_feature_cls: bool = False,
use_bi_affine_classifier: bool = False,
neg_class_weight: float = -1,
span_length_loss_weight_gamma: float = 0.0,
use_bag_pair_scorer: bool = False,
use_bi_affine_v2: bool = False,
use_pruning: bool = True,
use_single_pool: bool = False,
**kwargs, # noqa
) -> None:
super().__init__(vocab, regularizer)
print(dict(unused_keys=kwargs.keys()))
print(dict(locals=locals()))
self.use_single_pool = use_single_pool
self.use_pruning = use_pruning
self.use_bi_affine_v2 = use_bi_affine_v2
self.use_bag_pair_scorer = use_bag_pair_scorer
self.span_length_loss_weight_gamma = span_length_loss_weight_gamma
self.use_bi_affine_classifier = use_bi_affine_classifier
self.use_distance_embeds = use_distance_embeds
self.use_pair_feature_maxpool = use_pair_feature_maxpool
self.use_pair_feature_cls = use_pair_feature_cls
self._text_embeds: Optional[torch.Tensor] = None
self._text_mask: Optional[torch.Tensor] = None
self._spans_a: Optional[torch.Tensor] = None
self._spans_b: Optional[torch.Tensor] = None
token_emb_dim = 768
relation_scorer_dim = 2 * span_emb_dim
if self.use_distance_embeds:
self.d_embedder = DistanceEmbedder()
relation_scorer_dim += self.d_embedder.dim
if self.use_pair_feature_maxpool:
relation_scorer_dim += token_emb_dim
if self.use_pair_feature_cls:
relation_scorer_dim += token_emb_dim
print(dict(token_emb_dim=token_emb_dim, span_emb_dim=span_emb_dim, relation_scorer_dim=relation_scorer_dim))
self._namespaces = [
entry for entry in vocab.get_namespaces() if "relation_labels" in entry
]
self._n_labels = {name: vocab.get_vocab_size(name) for name in self._namespaces}
assert len(self._n_labels) == 1
n_labels = list(self._n_labels.values())[0] + 1
if self.use_bi_affine_classifier:
self._bi_affine_classifier = BiAffine(span_emb_dim, project_size=200, output_size=n_labels)
if self.use_bi_affine_v2:
self._bi_affine_v2 = BiAffineV2(span_emb_dim, project_size=200, output_size=n_labels)
self._mention_pruners = torch.nn.ModuleDict()
self._relation_feedforwards = torch.nn.ModuleDict()
self._relation_scorers = torch.nn.ModuleDict()
self._relation_metrics = {}
self._pruner_o = self._make_pruner(span_emb_dim, make_feedforward)
self._pruner_t = self._make_pruner(span_emb_dim, make_feedforward)
if not self.use_pruning:
self._pruner_o, self._pruner_t = None, None
if self.use_single_pool:
assert self.use_pruning
self._pruner_o = self._pruner_t
for namespace in self._namespaces:
relation_feedforward = make_feedforward(input_dim=relation_scorer_dim)
if self.use_bag_pair_scorer:
relation_feedforward = BagPairScorer(make_feedforward, span_emb_dim)
self._relation_feedforwards[namespace] = relation_feedforward
relation_scorer = torch.nn.Linear(
relation_feedforward.get_output_dim(), self._n_labels[namespace] + 1
)
self._relation_scorers[namespace] = relation_scorer
self._relation_metrics[namespace] = RelationMetrics()
self._spans_per_word = spans_per_word
self._active_namespace = None
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", ignore_index=-1)
if self.span_length_loss_weight_gamma != 0:
assert neg_class_weight == -1
self._loss = SpanLengthCrossEntropy(
gamma=self.span_length_loss_weight_gamma, reduction="sum", ignore_index=-1)
if neg_class_weight != -1:
assert len(self._namespaces) == 1
num_pos_classes = self._n_labels[self._namespaces[0]]
weight = torch.tensor([neg_class_weight] + [1.0] * num_pos_classes)
print(dict(relation_neg_class_weight=weight))
self._loss = torch.nn.CrossEntropyLoss(reduction="sum", ignore_index=-1, weight=weight)
print(dict(relation_loss_fn=self._loss))
def _make_pruner(self, span_emb_dim:int, make_feedforward:Callable):
mention_feedforward = make_feedforward(input_dim=span_emb_dim)
feedforward_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(
torch.nn.Linear(mention_feedforward.get_output_dim(), 1)
),
)
return Pruner(feedforward_scorer, use_external_score=True)
@overrides
def forward(
self, # type: ignore
spans: torch.IntTensor,
span_mask,
span_embeddings, # TODO: add type.
sentence_lengths,
relation_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
self._active_namespace = f"{metadata.dataset}__relation_labels"
pruned_o: PruneOutput = self._prune_spans(spans, span_mask, span_embeddings, sentence_lengths, "opinion")
pruned_t: PruneOutput = self._prune_spans(spans, span_mask, span_embeddings, sentence_lengths, "target")
relation_scores = self._compute_relation_scores(pruned_o, pruned_t)
prediction_dict, predictions = self.predict(
spans_a=pruned_o.spans.detach().cpu(),
spans_b=pruned_t.spans.detach().cpu(),
relation_scores=relation_scores.detach().cpu(),
num_keep_a=pruned_o.num_spans_to_keep.detach().cpu(),
num_keep_b=pruned_t.num_spans_to_keep.detach().cpu(),
metadata=metadata,
)
output_dict = {"predictions": predictions}
# Evaluate loss and F1 if labels were provided.
if relation_labels is not None:
# Compute cross-entropy loss.
gold_relations = self._get_pruned_gold_relations(
relation_labels, pruned_o, pruned_t
)
self._relation_scores, self._gold_relations = relation_scores, gold_relations
cross_entropy = self._get_cross_entropy_loss(
relation_scores, gold_relations
)
# Compute F1.
assert len(prediction_dict) == len(
metadata
) # Make sure length of predictions is right.
relation_metrics = self._relation_metrics[self._active_namespace]
relation_metrics(prediction_dict, metadata)
output_dict["loss"] = cross_entropy
return output_dict
def _prune_spans(self, spans, span_mask, span_embeddings, sentence_lengths, name: str) -> PruneOutput:
if not self.use_pruning:
bs, num_spans, dim = span_embeddings.shape
device = span_embeddings.device
return PruneOutput(
spans=spans,
span_mask=span_mask.unsqueeze(dim=-1),
span_embeddings=span_embeddings,
num_spans_to_keep=torch.full((bs,), fill_value=num_spans, device=device, dtype=torch.long),
span_indices=torch.arange(num_spans, device=device, dtype=torch.long).view(1, num_spans).expand(bs, -1),
span_mention_scores=torch.zeros(bs, num_spans, 1, device=device),
)
pruner = dict(opinion=self._pruner_o, target=self._pruner_t)[name]
if self.use_single_pool:
self._opinion_scores = torch.maximum(self._opinion_scores, self._target_scores)
self._target_scores = self._opinion_scores
mention_scores = dict(opinion=self._opinion_scores, target=self._target_scores)[name]
pruner.set_external_score(mention_scores.detach())
# Prune
num_spans = spans.size(1) # Max number of spans for the minibatch.
# Keep different number of spans for each minibatch entry.
num_spans_to_keep = torch.ceil(
sentence_lengths.float() * self._spans_per_word
).long()
outputs = pruner(span_embeddings, span_mask, num_spans_to_keep)
(
top_span_embeddings,
top_span_mask,
top_span_indices,
top_span_mention_scores,
num_spans_kept,
) = outputs
top_span_mask = top_span_mask.unsqueeze(-1)
flat_top_span_indices = util.flatten_and_batch_shift_indices(
top_span_indices, num_spans
)
top_spans = util.batched_index_select(
spans, top_span_indices, flat_top_span_indices
)
return PruneOutput(
span_embeddings=top_span_embeddings,
span_mention_scores=top_span_mention_scores,
num_spans_to_keep=num_spans_to_keep,
span_mask=top_span_mask,
span_indices=top_span_indices,
spans=top_spans,
)
def predict(self, spans_a, spans_b, relation_scores, num_keep_a, num_keep_b, metadata):
preds_dict = []
predictions = []
for i in range(relation_scores.shape[0]):
# Each entry/sentence in batch
pred_dict_sent, predictions_sent = self._predict_sentence(
spans_a[i], spans_b[i], relation_scores[i],
num_keep_a[i], num_keep_b[i], metadata[i]
)
preds_dict.append(pred_dict_sent)
predictions.append(predictions_sent)
return preds_dict, predictions
def _predict_sentence(
self, top_spans_a, top_spans_b, relation_scores, num_keep_a, num_keep_b, sentence
):
num_a = num_keep_a.item() # noqa
num_b = num_keep_b.item() # noqa
spans_a = [tuple(x) for x in top_spans_a.tolist()]
spans_b = [tuple(x) for x in top_spans_b.tolist()]
# Iterate over all span pairs and labels. Record the span if the label isn't null.
predicted_scores_raw, predicted_labels = relation_scores.max(dim=-1)
softmax_scores = F.softmax(relation_scores, dim=-1)
predicted_scores_softmax, _ = softmax_scores.max(dim=-1)
predicted_labels -= 1 # Subtract 1 so that null labels get -1.
ix = (predicted_labels >= 0) # TODO: Figure out their keep_mask (relation.py:202)
res_dict = {}
predictions = []
for i, j in ix.nonzero(as_tuple=False):
span_1 = spans_a[i]
span_2 = spans_b[j]
label = predicted_labels[i, j].item()
raw_score = predicted_scores_raw[i, j].item()
softmax_score = predicted_scores_softmax[i, j].item()
label_name = self.vocab.get_token_from_index(
label, namespace=self._active_namespace
)
res_dict[(span_1, span_2)] = label_name
list_entry = (
span_1[0],
span_1[1],
span_2[0],
span_2[1],
label_name,
raw_score,
softmax_score,
)
predictions.append(
document.PredictedRelation(list_entry, sentence, sentence_offsets=True)
)
return res_dict, predictions
# TODO: This code is repeated elsewhere. Refactor.
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"Loop over the metrics for all namespaces, and return as dict."
res = {}
for namespace, metrics in self._relation_metrics.items():
precision, recall, f1 = metrics.get_metric(reset)
prefix = namespace.replace("_labels", "")
to_update = {
f"{prefix}_precision": precision,
f"{prefix}_recall": recall,
f"{prefix}_f1": f1,
}
res.update(to_update)
res_avg = {}
for name in ["precision", "recall", "f1"]:
values = [res[key] for key in res if name in key]
res_avg[f"MEAN__relation_{name}"] = (
sum(values) / len(values) if values else 0
)
res.update(res_avg)
return res
def _make_pair_features(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
assert a.shape == b.shape
bs, num_a, num_b, size = a.shape
features = [a, b]
if self.use_pair_feature_maxpool:
x = self._text_embeds
c = global_max_pool1d(x) # [bs, size]
bs, size = c.shape
c = c.view(bs, 1, 1, size).expand(-1, num_a, num_b, -1)
features.append(c)
if self.use_pair_feature_cls:
c = self._text_embeds[:, 0, :]
bs, size = c.shape
c = c.view(bs, 1, 1, size).expand(-1, num_a, num_b, -1)
features.append(c)
if self.use_distance_embeds:
features.append(self.d_embedder(self._spans_a, self._spans_b))
x = torch.cat(features, dim=-1)
return x
def _compute_span_pair_embeddings(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:
c = self._make_pair_features(a, b)
if self.use_bi_affine_classifier:
c = self._bi_affine_classifier(a, b)
return c
def _compute_relation_scores(self, pruned_a: PruneOutput, pruned_b: PruneOutput):
if self.span_length_loss_weight_gamma != 0:
bs, num_a, _ = pruned_a.spans.shape
bs, num_b, _ = pruned_b.spans.shape
widths_a = pruned_a.spans[..., [1]] - pruned_a.spans[..., [0]] + 1
widths_b = pruned_b.spans[..., [1]] - pruned_b.spans[... ,[0]] + 1
widths_a = widths_a.view(bs, num_a, 1, 1)
widths_b = widths_b.view(bs, 1, num_b, 1)
widths = (widths_a + widths_b) / 2
self._loss.lengths = widths.view(bs * num_a * num_b)
a_orig, b_orig = pruned_a.span_embeddings, pruned_b.span_embeddings
bs, num_a, size = a_orig.shape
bs, num_b, size = b_orig.shape
chunk_size = max(1000 // num_a, 1)
# logging.info(dict(a=num_a, b=num_b, chunk_size=chunk_size))
pool = []
for i in range(0, num_a, chunk_size):
a = a_orig[:, i:i + chunk_size, :]
num_chunk = a.shape[1]
a = a.view(bs, num_chunk, 1, size).expand(-1, -1, num_b, -1)
b = b_orig.view(bs, 1, num_b, size).expand(-1, num_chunk, -1, -1)
assert a.shape == b.shape
self._spans_a = pruned_a.spans[:, i:i + chunk_size, :]
self._spans_b = pruned_b.spans
embeds = self._compute_span_pair_embeddings(a, b)
self._relation_embeds = embeds
if self.use_bi_affine_classifier:
scores = embeds
else:
relation_feedforward = self._relation_feedforwards[self._active_namespace]
relation_scorer = self._relation_scorers[self._active_namespace]
embeds = torch.flatten(embeds, end_dim=-2)
projected = relation_feedforward(embeds)
scores = relation_scorer(projected)
scores = scores.view(bs, num_chunk, num_b, -1)
if self.use_bi_affine_v2:
scores += self._bi_affine_v2(a, b)
pool.append(scores)
scores = torch.cat(pool, dim=1)
return scores
@staticmethod
def _get_pruned_gold_relations(relation_labels: torch.Tensor, pruned_a: PruneOutput, pruned_b: PruneOutput) -> torch.Tensor:
"""
Loop over each slice and get the labels for the spans from that slice.
All labels are offset by 1 so that the "null" label gets class zero. This is the desired
behavior for the softmax. Labels corresponding to masked relations keep the label -1, which
the softmax loss ignores.
"""
# TODO: Test and possibly optimize.
relations = []
indices_a, masks_a = pruned_a.span_indices, pruned_a.span_mask.bool()
indices_b, masks_b = pruned_b.span_indices, pruned_b.span_mask.bool()
for i in range(relation_labels.shape[0]):
# Each entry in batch
entry = relation_labels[i]
entry = entry[indices_a[i], :][:, indices_b[i]]
mask_entry = masks_a[i] & masks_b[i].transpose(0, 1)
assert entry.shape == mask_entry.shape
entry[mask_entry] += 1
entry[~mask_entry] = -1
relations.append(entry)
# return torch.cat(relations, dim=0)
# This should be a mistake, don't want to concat items within a batch together
# Likely undiscovered because current bs=1 and _get_loss flattens everything
return torch.stack(relations, dim=0)
def _get_cross_entropy_loss(self, relation_scores, relation_labels):
"""
Compute cross-entropy loss on relation labels. Ignore diagonal entries and entries giving
relations between masked out spans.
"""
# Need to add one for the null class.
n_labels = self._n_labels[self._active_namespace] + 1
scores_flat = relation_scores.view(-1, n_labels)
# Need to add 1 so that the null label is 0, to line up with indices into prediction matrix.
labels_flat = relation_labels.view(-1)
# Compute cross-entropy loss.
loss = self._loss(scores_flat, labels_flat)
return loss
| 20,888 | 39.561165 | 128 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/shared.py | """
Short utility functions.
"""
from typing import Optional, Callable
import torch
import torch.nn.functional as F
from allennlp.modules import FeedForward
from allennlp.modules.span_extractors import EndpointSpanExtractor, SpanExtractor
from allennlp.nn.util import batched_span_select
from overrides import overrides
from torch import Tensor
def cumsum_shifted(xs):
"""
Assumes `xs` is a 1-d array.
The usual cumsum has elements [x[1], x[1] + x[2], ...]. This one has elements
[0, x[1], x[1] + x[2], ...]. Useful for calculating sentence offsets.
"""
cs = xs.cumsum(dim=0)
shift = torch.zeros(1, dtype=torch.long, device=cs.device) # Put on correct device.
return torch.cat([shift, cs[:-1]], dim=0)
def batch_identity(batch_size, matrix_size, *args, **kwargs):
"""
Tile the identity matrix along axis 0, `batch_size` times.
"""
ident = torch.eye(matrix_size, *args, **kwargs).unsqueeze(0)
res = ident.repeat(batch_size, 1, 1)
return res
def fields_to_batches(d, keys_to_ignore=[]):
"""
The input is a dict whose items are batched tensors. The output is a list of dictionaries - one
per entry in the batch - with the slices of the tensors for that entry. Here's an example.
Input:
d = {"a": [[1, 2], [3,4]], "b": [1, 2]}
Output:
res = [{"a": [1, 2], "b": 1}, {"a": [3, 4], "b": 2}].
"""
keys = [key for key in d.keys() if key not in keys_to_ignore]
# Make sure all input dicts have same length. If they don't, there's a problem.
lengths = {k: len(d[k]) for k in keys}
if len(set(lengths.values())) != 1:
msg = f"fields have different lengths: {lengths}."
# If there's a doc key, add it to specify where the error is.
if "doc_key" in d:
msg = f"For document {d['doc_key']}, " + msg
raise ValueError(msg)
length = list(lengths.values())[0]
res = [{k: d[k][i] for k in keys} for i in range(length)]
return res
def batches_to_fields(batches):
"""
The inverse of `fields_to_batches`.
"""
# Make sure all the keys match.
first_keys = batches[0].keys()
for entry in batches[1:]:
if set(entry.keys()) != set(first_keys):
raise ValueError("Keys to not match on all entries.")
res = {k: [] for k in first_keys}
for batch in batches:
for k, v in batch.items():
res[k].append(v)
return res
class FocalLoss(torch.nn.Module):
# Reference: https://github.com/AdeelH/pytorch-multi-class-focal-loss/blob/master/focal_loss.py
def __init__(
self,
weight: Optional[Tensor] = None,
gamma: float = 0.,
reduction: str = 'mean',
ignore_index: int = -100
):
super().__init__()
assert reduction in {"mean", "sum", "none"}
self.gamma = gamma
self.reduction = reduction
self.nll_loss = torch.nn.NLLLoss(
weight=weight, reduction="none", ignore_index=ignore_index)
def forward(self, x, y):
assert x.ndim == 2
# compute weighted cross entropy term: -alpha * log(pt)
# (alpha is already part of self.nll_loss)
log_p = F.log_softmax(x, dim=-1)
ce = self.nll_loss(log_p, y)
# get true class column from each row
all_rows = torch.arange(len(x))
log_pt = log_p[all_rows, y]
# compute focal term: (1 - pt)^gamma
pt = log_pt.exp()
focal_term = (1 - pt)**self.gamma
# the full loss: -alpha * ((1 - pt)^gamma) * log(pt)
loss = focal_term * ce
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class BiAffine(torch.nn.Module):
def __init__(self, input_size: int, project_size: int, output_size: int):
super().__init__()
self.project_a = torch.nn.Linear(input_size, project_size)
self.project_b = torch.nn.Linear(input_size, project_size)
self.bi_affine = torch.nn.Bilinear(project_size, project_size, output_size)
self.linear = torch.nn.Linear(project_size * 2, output_size)
self.act = torch.nn.Tanh()
self.input_size, self.output_size = input_size, output_size
def forward(self, a: Tensor, b: Tensor) -> Tensor:
a = self.act(self.project_a(a))
b = self.act(self.project_b(b))
c = self.bi_affine(a, b)
d = self.linear(torch.cat([a, b], dim=-1))
return c + d
class BiAffineSingleInput(torch.nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.net = BiAffine(**kwargs)
def forward(self, x: Tensor) -> Tensor:
size = x.shape[-1]
a, b = torch.split(x, split_size_or_sections=size // 2, dim=-1)
return self.net(a, b)
class BiAffineV2(BiAffine):
def forward(self, a: Tensor, b: Tensor) -> Tensor:
a = self.act(self.project_a(a))
b = self.act(self.project_b(b))
c = self.bi_affine(a, b)
return c
class BiAffineSpanExtractor(SpanExtractor):
def __init__(self, endpoint_extractor: EndpointSpanExtractor, **kwargs):
super().__init__()
self.endpoint_extractor = endpoint_extractor
self.net = BiAffineSingleInput(**kwargs)
def get_input_dim(self) -> int:
return self.endpoint_extractor.get_input_dim()
def get_output_dim(self) -> int:
return self.net.net.output_size
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
x = self.endpoint_extractor(sequence_tensor, span_indices, span_indices_mask)
x = self.net(x)
return x
class LSTMWithMarkers(SpanExtractor):
def __init__(self, input_size: int, hidden_size: int):
super().__init__()
self.lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
batch_first=True,
bidirectional=True,
)
self.start = torch.nn.Parameter(torch.randn(input_size))
self.end = torch.nn.Parameter(torch.randn(input_size))
self.input_size = input_size
self.hidden_size = hidden_size
def get_input_dim(self) -> int:
return self.input_size
def get_output_dim(self) -> int:
return self.hidden_size * 2
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
x, mask = batched_span_select(sequence_tensor, span_indices)
assert mask[:, :, 0].float().sum().item() == torch.numel(mask[:, :, 0])
bs, num_spans, max_width, size = x.shape
_mask = mask.view(bs, num_spans, max_width, 1).expand_as(x)
start = self.start.view(1, 1, 1, size).expand(bs, num_spans, 1, size)
end = self.end.view(1, 1, 1, size).expand(bs, num_spans, 1, size)
x = torch.where(_mask, x, end.expand_as(x))
x = torch.cat([start, x, end], dim=-2)
num_special = 2 # Start & end markers
# num_special = 0
x = x.view(bs * num_spans, max_width + num_special, size)
# lengths = mask.view(bs * num_spans, max_width).sum(dim=-1) + num_special
# x = pack_padded_sequence(x, lengths.cpu(), batch_first=True, enforce_sorted=False)
output, (last_hidden, last_cell) = self.lstm(x)
x = last_hidden.view(bs, num_spans, self.get_output_dim())
return x
class LearntWeightCrossEntropy(torch.nn.Module):
def __init__(self, num_classes: int, **kwargs):
super().__init__()
self.w = torch.nn.Parameter(torch.ones(num_classes))
self.kwargs = kwargs
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.cross_entropy(
input, target, weight=self.w, **self.kwargs)
class SpanLengthCrossEntropy(torch.nn.Module):
def __init__(self, gamma: float, reduction: str, ignore_index: int):
super().__init__()
self.gamma = gamma
self.reduction = reduction
self.loss_fn = torch.nn.CrossEntropyLoss(
reduction="none", ignore_index=ignore_index)
self.lengths: Optional[Tensor] = None
def make_instance_weights(self) -> Tensor:
assert self.lengths is not None
w = self.lengths ** self.gamma
self.lengths = None
return w
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
n, c = input.shape
w = self.make_instance_weights()
assert tuple(w.shape) == (n,)
x = self.loss_fn(input, target)
x *= w
if self.reduction == "sum":
x = x.sum()
elif self.reduction == "mean":
x = x.mean()
else:
assert self.reduction == "none", f"Unknown {dict(reduction=self.reduction)}"
return x
class BagPairScorer(torch.nn.Module):
def __init__(self, make_feedforward: Callable[[int], FeedForward], span_emb_dim: int):
super().__init__()
self.feed = make_feedforward(span_emb_dim)
self.input_dim = span_emb_dim * 2
def get_output_dim(self) -> int:
return self.feed.get_output_dim()
def forward(self, x: Tensor) -> Tensor:
*_, size = x.shape
a, b, c, d = torch.split(x, split_size_or_sections=size // 4, dim=-1)
bags = []
for pair in [(a, c), (a, d), (b, c), (b, d)]:
bags.append(self.feed(torch.cat(pair, dim=-1)))
x = torch.stack(bags, dim=0).mean(dim=0)
return x
class DualScorer(torch.nn.Module):
def __init__(self, make_feedforward: Callable[[int], FeedForward], input_size: int, num_classes: int):
super().__init__()
self.make_feedforward = make_feedforward
self.input_size = input_size
self.detector = self.make_scorer(2)
self.classifier = self.make_scorer(num_classes)
def make_scorer(self, num_classes: int):
feedforward = self.make_feedforward(self.input_size)
scorer = torch.nn.Linear(feedforward.get_output_dim(), num_classes)
return torch.nn.Sequential(feedforward, scorer)
def forward(self, x: Tensor, mention_scores: Tensor) -> Tensor:
x_detect = self.detector(x)
x_detect[..., :1] += mention_scores
scores_detect = x_detect.softmax(dim=-1)
scores_class = self.classifier(x).softmax(dim=-1)
scores = torch.cat([scores_detect[..., [0]], scores_class * scores_detect[..., [1]]], dim=-1)
return scores | 10,701 | 33.634304 | 106 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/span_model.py | import logging
from typing import Dict, List, Optional, Union
import copy
import torch
import torch.nn.functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.common.params import Params
from allennlp.models.model import Model
from allennlp.modules import TextFieldEmbedder, FeedForward, TimeDistributed
from allennlp.modules.span_extractors import EndpointSpanExtractor, SelfAttentiveSpanExtractor, SpanExtractor
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from span_model.models.ner import NERTagger
from span_model.models.relation_proper import ProperRelationExtractor
from span_model.models.shared import BiAffineSpanExtractor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# New
from torch import Tensor
class MaxPoolSpanExtractor(SpanExtractor):
def __init__(self, input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@staticmethod
def extract_pooled(x, mask) -> Tensor:
return util.masked_max(x, mask, dim=-2)
@overrides
def forward(
self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
span_indices_mask: torch.BoolTensor = None,
) -> Tensor:
span_embeddings, span_mask = util.batched_span_select(sequence_tensor, span_indices)
bs, num_spans, span_width, size = span_embeddings.shape
span_mask = span_mask.view(bs, num_spans, span_width, 1)
x = self.extract_pooled(span_embeddings, span_mask)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
x *= span_indices_mask.view(bs, num_spans, 1)
assert tuple(x.shape) == (bs, num_spans, size)
return x
class MeanPoolSpanExtractor(MaxPoolSpanExtractor):
@staticmethod
def extract_pooled(x, mask) -> Tensor:
return util.masked_mean(x, mask, dim=-2)
class TextEmbedderWithBiLSTM(TextFieldEmbedder):
def __init__(self, embedder: TextFieldEmbedder, hidden_size: int):
super().__init__()
self.embedder = embedder
self.lstm = torch.nn.LSTM(
input_size=self.embedder.get_output_dim(),
hidden_size=hidden_size,
bidirectional=True,
batch_first=True,
num_layers=1, # Increasing num_layers can help but we want fair comparison
)
self.dropout = torch.nn.Dropout(p=0.5)
self.output_size = hidden_size * 2
def get_output_dim(self) -> int:
return self.output_size
def forward(self, *args, **kwargs) -> torch.Tensor:
x = self.embedder(*args, **kwargs)
x = x.squeeze(dim=0) # For some reason x.shape is (1, 1, seq_len, size)
x = self.dropout(x) # Seems to work best if dropout both before and after lstm
x, state = self.lstm(x)
x = self.dropout(x)
x = x.unsqueeze(dim=0)
return x
@Model.register("span_model")
class SpanModel(Model):
def __init__(
self,
vocab: Vocabulary,
embedder: TextFieldEmbedder,
modules, # TODO: Add type.
feature_size: int,
max_span_width: int,
target_task: str,
feedforward_params: Dict[str, Union[int, float]],
loss_weights: Dict[str, float],
initializer: InitializerApplicator = InitializerApplicator(),
module_initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
display_metrics: List[str] = None,
# New
use_ner_embeds: bool = None,
span_extractor_type: str = None,
use_double_mix_embedder: bool = None,
relation_head_type: str = "base",
use_span_width_embeds: bool = None,
use_bilstm_after_embedder: bool = False,
) -> None:
super(SpanModel, self).__init__(vocab, regularizer)
# New
info = dict(
use_ner_embeds=use_ner_embeds,
span_extractor_type=span_extractor_type,
use_double_mix_embedder=use_double_mix_embedder,
relation_head_type=relation_head_type,
use_span_width_embeds=use_span_width_embeds,
)
for k, v in info.items():
print(dict(locals=(k, v)))
assert v is not None, k
self.use_double_mix_embedder = use_double_mix_embedder
self.relation_head_type = relation_head_type
if use_bilstm_after_embedder:
embedder = TextEmbedderWithBiLSTM(embedder, hidden_size=300)
####################
assert span_extractor_type in {"endpoint", "attn", "max_pool", "mean_pool", "bi_affine"}
# Create span extractor.
if use_span_width_embeds:
self._endpoint_span_extractor = EndpointSpanExtractor(
embedder.get_output_dim(),
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size,
bucket_widths=False,
)
# New
else:
self._endpoint_span_extractor = EndpointSpanExtractor(
embedder.get_output_dim(),
combination="x,y",
)
if span_extractor_type == "attn":
self._endpoint_span_extractor = SelfAttentiveSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "max_pool":
self._endpoint_span_extractor = MaxPoolSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "mean_pool":
self._endpoint_span_extractor = MeanPoolSpanExtractor(
embedder.get_output_dim()
)
if span_extractor_type == "bi_affine":
token_emb_dim = embedder.get_output_dim()
assert self._endpoint_span_extractor.get_output_dim() == token_emb_dim * 2
self._endpoint_span_extractor = BiAffineSpanExtractor(
endpoint_extractor=self._endpoint_span_extractor,
input_size=token_emb_dim,
project_size=200,
output_size=200,
)
self._visualize_outputs = []
####################
# Set parameters.
self._embedder = embedder
self._loss_weights = loss_weights
self._max_span_width = max_span_width
self._display_metrics = self._get_display_metrics(target_task)
token_emb_dim = self._embedder.get_output_dim()
span_emb_dim = self._endpoint_span_extractor.get_output_dim()
# New
self._feature_size = feature_size
####################
# Create submodules.
modules = Params(modules)
# Helper function to create feedforward networks.
def make_feedforward(input_dim):
return FeedForward(
input_dim=input_dim,
num_layers=feedforward_params["num_layers"],
hidden_dims=feedforward_params["hidden_dims"],
activations=torch.nn.ReLU(),
dropout=feedforward_params["dropout"],
)
# Submodules
self._ner = NERTagger.from_params(
vocab=vocab,
make_feedforward=make_feedforward,
span_emb_dim=span_emb_dim,
feature_size=feature_size,
params=modules.pop("ner"),
)
# New
self.use_ner_embeds = use_ner_embeds
if self.use_ner_embeds:
num_ner_labels = sorted(self._ner._n_labels.values())[0]
self._ner_embedder = torch.nn.Linear(num_ner_labels, feature_size)
span_emb_dim += feature_size
params = dict(
vocab=vocab,
make_feedforward=make_feedforward,
span_emb_dim=span_emb_dim,
feature_size=feature_size,
params=modules.pop("relation"),
)
if self.relation_head_type == "proper":
self._relation = ProperRelationExtractor.from_params(**params)
else:
raise ValueError(f"Unknown: {dict(relation_head_type=relation_head_type)}")
####################
# Initialize text embedder and all submodules
for module in [self._ner, self._relation]:
module_initializer(module)
initializer(self)
@staticmethod
def _get_display_metrics(target_task):
"""
The `target` is the name of the task used to make early stopping decisions. Show metrics
related to this task.
"""
lookup = {
"ner": [
f"MEAN__{name}" for name in ["ner_precision", "ner_recall", "ner_f1"]
],
"relation": [
f"MEAN__{name}"
for name in ["relation_precision", "relation_recall", "relation_f1"]
],
}
if target_task not in lookup:
raise ValueError(
f"Invalied value {target_task} has been given as the target task."
)
return lookup[target_task]
@staticmethod
def _debatch(x):
# TODO: Get rid of this when I find a better way to do it.
return x if x is None else x.squeeze(0)
def text_to_span_embeds(self, text_embeddings: torch.Tensor, spans):
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
return span_embeddings
@overrides
def forward(
self,
text,
spans,
metadata,
ner_labels=None,
relation_labels=None,
dep_graph_labels=None, # New
tag_labels=None, # New
grid_labels=None, # New
):
"""
TODO: change this.
"""
# In AllenNLP, AdjacencyFields are passed in as floats. This fixes it.
if relation_labels is not None:
relation_labels = relation_labels.long()
# TODO: Multi-document minibatching isn't supported yet. For now, get rid of the
# extra dimension in the input tensors. Will return to this once the model runs.
if len(metadata) > 1:
raise NotImplementedError("Multi-document minibatching not yet supported.")
metadata = metadata[0]
spans = self._debatch(spans) # (n_sents, max_n_spans, 2)
ner_labels = self._debatch(ner_labels) # (n_sents, max_n_spans)
relation_labels = self._debatch(
relation_labels
) # (n_sents, max_n_spans, max_n_spans)
# Encode using BERT, then debatch.
# Since the data are batched, we use `num_wrapping_dims=1` to unwrap the document dimension.
# (1, n_sents, max_sententence_length, embedding_dim)
# TODO: Deal with the case where the input is longer than 512.
text_embeddings = self._embedder(text, num_wrapping_dims=1)
# (n_sents, max_n_wordpieces, embedding_dim)
text_embeddings = self._debatch(text_embeddings)
# (n_sents, max_sentence_length)
text_mask = self._debatch(
util.get_text_field_mask(text, num_wrapping_dims=1).float()
)
sentence_lengths = text_mask.sum(dim=1).long() # (n_sents)
span_mask = (spans[:, :, 0] >= 0).float() # (n_sents, max_n_spans)
# SpanFields return -1 when they are used as padding. As we do some comparisons based on
# span widths when we attend over the span representations that we generate from these
# indices, we need them to be <= 0. This is only relevant in edge cases where the number of
# spans we consider after the pruning stage is >= the total number of spans, because in this
# case, it is possible we might consider a masked span.
spans = F.relu(spans.float()).long() # (n_sents, max_n_spans, 2)
# New
text_embeds_b = text_embeddings
if self.use_double_mix_embedder:
# DoubleMixPTMEmbedder has to output single concatenated tensor so we need to split
embed_dim = self._embedder.get_output_dim()
assert text_embeddings.shape[-1] == embed_dim * 2
text_embeddings, text_embeds_b = text_embeddings[..., :embed_dim], text_embeddings[..., embed_dim:]
kwargs = dict(spans=spans)
span_embeddings = self.text_to_span_embeds(text_embeddings, **kwargs)
span_embeds_b = self.text_to_span_embeds(text_embeds_b, **kwargs)
# Make calls out to the modules to get results.
output_ner = {"loss": 0}
output_relation = {"loss": 0}
# Make predictions and compute losses for each module
if self._loss_weights["ner"] > 0:
output_ner = self._ner(
spans,
span_mask,
span_embeddings,
sentence_lengths,
ner_labels,
metadata,
)
ner_scores = output_ner.pop("ner_scores")
# New
if self._loss_weights["relation"] > 0:
if getattr(self._relation, "use_ner_scores_for_prune", False):
self._relation._ner_scores = ner_scores
self._relation._opinion_scores = output_ner["opinion_scores"]
self._relation._target_scores = output_ner["target_scores"]
self._relation._text_mask = text_mask
self._relation._text_embeds = text_embeddings
if getattr(self._relation, "use_span_loss_for_pruners", False):
self._relation._ner_labels = ner_labels
output_relation = self._relation(
spans,
span_mask,
# span_embeddings,
span_embeds_b,
sentence_lengths,
relation_labels,
metadata,
)
# Use `get` since there are some cases where the output dict won't have a loss - for
# instance, when doing prediction.
loss = (
+ self._loss_weights["ner"] * output_ner.get("loss", 0)
+ self._loss_weights["relation"] * output_relation.get("loss", 0)
)
# Multiply the loss by the weight multiplier for this document.
weight = metadata.weight if metadata.weight is not None else 1.0
loss *= torch.tensor(weight)
output_dict = dict(
relation=output_relation,
ner=output_ner,
)
output_dict["loss"] = loss
output_dict["metadata"] = metadata
return output_dict
def update_span_embeddings(
self,
span_embeddings,
span_mask,
top_span_embeddings,
top_span_mask,
top_span_indices,
):
# TODO(Ulme) Speed this up by tensorizing
new_span_embeddings = span_embeddings.clone()
for sample_nr in range(len(top_span_mask)):
for top_span_nr, span_nr in enumerate(top_span_indices[sample_nr]):
if (
top_span_mask[sample_nr, top_span_nr] == 0
or span_mask[sample_nr, span_nr] == 0
):
break
new_span_embeddings[sample_nr, span_nr] = top_span_embeddings[
sample_nr, top_span_nr
]
return new_span_embeddings
@overrides
def make_output_human_readable(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
"""
doc = copy.deepcopy(output_dict["metadata"])
if self._loss_weights["ner"] > 0:
for predictions, sentence in zip(output_dict["ner"]["predictions"], doc):
sentence.predicted_ner = predictions
if self._loss_weights["relation"] > 0:
for predictions, sentence in zip(
output_dict["relation"]["predictions"], doc
):
sentence.predicted_relations = predictions
return doc
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
Get all metrics from all modules. For the ones that shouldn't be displayed, prefix their
keys with an underscore.
"""
metrics_ner = self._ner.get_metrics(reset=reset)
metrics_relation = self._relation.get_metrics(reset=reset)
# Make sure that there aren't any conflicting names.
metric_names = (
list(metrics_ner.keys())
+ list(metrics_relation.keys())
)
assert len(set(metric_names)) == len(metric_names)
all_metrics = dict(
list(metrics_ner.items())
+ list(metrics_relation.items())
)
# If no list of desired metrics given, display them all.
if self._display_metrics is None:
return all_metrics
# Otherwise only display the selected ones.
res = {}
for k, v in all_metrics.items():
if k in self._display_metrics:
res[k] = v
else:
new_k = "_" + k
res[new_k] = v
return res
| 17,657 | 35.941423 | 111 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/entity_beam_pruner.py | """
This is basically a copy of AllenNLP's Pruner module, but with support for entity beams.
"""
from typing import Tuple, Union
from overrides import overrides
import torch
from allennlp.nn import util
from allennlp.modules import TimeDistributed
def make_pruner(scorer, entity_beam=False, gold_beam=False):
"""
Create a pruner that either takes outputs of other scorers (i.e. entity beam), or uses its own
scorer (the `default_scorer`).
"""
item_scorer = torch.nn.Sequential(
TimeDistributed(scorer),
TimeDistributed(torch.nn.Linear(scorer.get_output_dim(), 1)),
)
min_score_to_keep = 1e-10 if entity_beam else None
return Pruner(item_scorer, entity_beam, gold_beam, min_score_to_keep)
class Pruner(torch.nn.Module):
"""
This module scores and prunes items in a list using a parameterised scoring function and a
threshold.
Parameters
----------
scorer : ``torch.nn.Module``, required.
A module which, given a tensor of shape (batch_size, num_items, embedding_size),
produces a tensor of shape (batch_size, num_items, 1), representing a scalar score
per item in the tensor.
entity_beam: bool, optional.
If True, use class scores output from another module instead of using own scorer.
gold_beam: bool, optional.
If True, use gold arguments.
min_score_to_keep : float, optional.
If given, only keep items that score at least this high.
"""
def __init__(
self,
scorer: torch.nn.Module,
entity_beam: bool = False,
gold_beam: bool = False,
min_score_to_keep: float = None,
use_external_score: bool = False,
) -> None:
super().__init__()
# If gold beam is on, then entity beam must be off and min_score_to_keep must be None.
assert not (gold_beam and ((min_score_to_keep is not None) or entity_beam))
self._scorer = scorer
self._entity_beam = entity_beam
self._gold_beam = gold_beam
self._min_score_to_keep = min_score_to_keep
self._use_external_score = use_external_score
if self._use_external_score:
self._scorer = None
self._scores = None
def set_external_score(self, x: torch.Tensor):
self._scores = x
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
class_scores: torch.FloatTensor = None,
gold_labels: torch.long = None,
extra_scores: torch.FloatTensor = None, # Scores to add to scorer output
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
"""
Extracts the top-k scoring items with respect to the scorer. We additionally return
the indices of the top-k in their original order, not ordered by score, so that downstream
components can rely on the original ordering (e.g., for knowing what spans are valid
antecedents in a coreference resolution model). May use the same k for all sentences in
minibatch, or different k for each.
Parameters
----------
embeddings : ``torch.FloatTensor``, required.
A tensor of shape (batch_size, num_items, embedding_size), containing an embedding for
each item in the list that we want to prune.
mask : ``torch.LongTensor``, required.
A tensor of shape (batch_size, num_items), denoting unpadded elements of
``embeddings``.
num_items_to_keep : ``Union[int, torch.LongTensor]``, required.
If a tensor of shape (batch_size), specifies the number of items to keep for each
individual sentence in minibatch.
If an int, keep the same number of items for all sentences.
class_scores:
Class scores to be used with entity beam.
candidate_labels: If in debugging mode, use gold labels to get beam.
Returns
-------
top_embeddings : ``torch.FloatTensor``
The representations of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, embedding_size).
top_mask : ``torch.LongTensor``
The corresponding mask for ``top_embeddings``.
Has shape (batch_size, max_num_items_to_keep).
top_indices : ``torch.IntTensor``
The indices of the top-k scoring items into the original ``embeddings``
tensor. This is returned because it can be useful to retain pointers to
the original items, if each item is being scored by multiple distinct
scorers, for instance. Has shape (batch_size, max_num_items_to_keep).
top_item_scores : ``torch.FloatTensor``
The values of the top-k scoring items.
Has shape (batch_size, max_num_items_to_keep, 1).
num_items_kept
"""
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(num_items_to_keep, int):
batch_size = mask.size(0)
# Put the tensor on same device as the mask.
num_items_to_keep = num_items_to_keep * torch.ones(
[batch_size], dtype=torch.long, device=mask.device
)
mask = mask.unsqueeze(-1)
num_items = embeddings.size(1)
num_items_to_keep = torch.clamp(num_items_to_keep, max=num_items)
# Shape: (batch_size, num_items, 1)
# If entity beam is one, use the class scores. Else ignore them and use the scorer.
if self._entity_beam:
scores, _ = class_scores.max(dim=-1)
scores = scores.unsqueeze(-1)
# If gold beam is one, give a score of 0 wherever the gold label is non-zero (indicating a
# non-null label), otherwise give a large negative number.
elif self._gold_beam:
scores = torch.where(
gold_labels > 0,
torch.zeros_like(gold_labels, dtype=torch.float),
-1e20 * torch.ones_like(gold_labels, dtype=torch.float),
)
scores = scores.unsqueeze(-1)
else:
if self._use_external_score:
scores = self._scores
else:
scores = self._scorer(embeddings)
if extra_scores is not None:
# Assumes extra_scores is already in [0, 1] range
scores = scores.sigmoid() + extra_scores
# If we're only keeping items that score above a given threshold, change the number of kept
# items here.
if self._min_score_to_keep is not None:
num_good_items = torch.sum(
scores > self._min_score_to_keep, dim=1
).squeeze()
num_items_to_keep = torch.min(num_items_to_keep, num_good_items)
# If gold beam is on, keep the gold items.
if self._gold_beam:
num_items_to_keep = torch.sum(gold_labels > 0, dim=1)
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
if scores.size(-1) != 1 or scores.dim() != 3:
raise ValueError(
f"The scorer passed to Pruner must produce a tensor of shape"
f"(batch_size, num_items, 1), but found shape {scores.size()}"
)
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
class TwoScorePruner(torch.nn.Module):
"""
Output has 2 columns instead of 1
So that we have a invalid/valid score for spans separately
This way we can add the "invalid" span score to "invalid" relation score
And add the "valid" span score to pos/neg/neu relation score
Internally we normalize both columns and take 1 for top-k sorting
But output span scores should be un-normalized logits
"""
def __init__(self, scorer: torch.nn.Module) -> None:
super().__init__()
self._scorer = scorer
self.output_size = 2
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
# If an int was given for number of items to keep, construct tensor by repeating the value.
if isinstance(num_items_to_keep, int):
batch_size = mask.size(0)
# Put the tensor on same device as the mask.
num_items_to_keep = num_items_to_keep * torch.ones(
[batch_size], dtype=torch.long, device=mask.device
)
mask = mask.unsqueeze(-1)
num_items = embeddings.size(1)
output_scores = self._scorer(embeddings)
assert output_scores.shape[-1] == self.output_size
scores = output_scores.softmax(dim=-1)[..., [1]] # Normalize for sorting
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
if scores.size(-1) != 1 or scores.dim() != 3:
raise ValueError(
f"The scorer passed to Pruner must produce a tensor of shape"
f"(batch_size, num_items, 1), but found shape {scores.size()}"
)
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(output_scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
class ClassifyMaskPruner(Pruner):
def __init__(self, scorer: torch.nn.Module, threshold=0.5, **kwargs):
super().__init__(scorer, **kwargs)
self._threshold = threshold
@overrides
def forward(
self, # pylint: disable=arguments-differ
embeddings: torch.FloatTensor,
mask: torch.LongTensor,
num_items_to_keep: Union[int, torch.LongTensor],
class_scores: torch.FloatTensor = None,
gold_labels: torch.long = None,
extra_scores: torch.FloatTensor = None, # Scores to add to scorer output
) -> Tuple[
torch.FloatTensor, torch.LongTensor, torch.LongTensor, torch.FloatTensor
]:
mask = mask.unsqueeze(-1)
scores = self._scorer(embeddings)
bs, num_items, size = scores.shape
assert size == 1
if extra_scores is not None:
# Assumes extra_scores is already in [0, 1] range
scores = scores.sigmoid() + extra_scores
# Make sure that we don't select any masked items by setting their scores to be very
# negative. These are logits, typically, so -1e20 should be plenty negative.
# NOTE(`mask` needs to be a byte tensor now.)
scores = util.replace_masked_values(scores, mask.bool(), -1e20)
keep = torch.gt(scores.sigmoid(), self._threshold).long()
num_items_to_keep = keep.sum(dim=1).view(bs)
num_items_to_keep = torch.clamp(num_items_to_keep, min=1)
# import logging
# logging.info(dict(num_items_to_keep=num_items_to_keep))
# Always keep at least one item to avoid edge case with empty matrix.
max_items_to_keep = max(num_items_to_keep.max().item(), 1)
# Shape: (batch_size, max_num_items_to_keep, 1)
_, top_indices = scores.topk(max_items_to_keep, 1)
# Mask based on number of items to keep for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
top_indices_mask = util.get_mask_from_sequence_lengths(
num_items_to_keep, max_items_to_keep
)
top_indices_mask = top_indices_mask.bool()
# Shape: (batch_size, max_num_items_to_keep)
top_indices = top_indices.squeeze(-1)
# Fill all masked indices with largest "top" index for that sentence, so that all masked
# indices will be sorted to the end.
# Shape: (batch_size, 1)
fill_value, _ = top_indices.max(dim=1)
fill_value = fill_value.unsqueeze(-1)
# Shape: (batch_size, max_num_items_to_keep)
top_indices = torch.where(top_indices_mask, top_indices, fill_value)
# Now we order the selected indices in increasing order with
# respect to their indices (and hence, with respect to the
# order they originally appeared in the ``embeddings`` tensor).
top_indices, _ = torch.sort(top_indices, 1)
# Shape: (batch_size * max_num_items_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select items for each element in the batch.
flat_top_indices = util.flatten_and_batch_shift_indices(top_indices, num_items)
# Shape: (batch_size, max_num_items_to_keep, embedding_size)
top_embeddings = util.batched_index_select(
embeddings, top_indices, flat_top_indices
)
# Combine the masks on spans that are out-of-bounds, and the mask on spans that are outside
# the top k for each sentence.
# Shape: (batch_size, max_num_items_to_keep)
sequence_mask = util.batched_index_select(mask, top_indices, flat_top_indices)
sequence_mask = sequence_mask.squeeze(-1).bool()
top_mask = top_indices_mask & sequence_mask
top_mask = top_mask.long()
# Shape: (batch_size, max_num_items_to_keep, 1)
top_scores = util.batched_index_select(scores, top_indices, flat_top_indices)
return top_embeddings, top_mask, top_indices, top_scores, num_items_to_keep
| 18,631 | 43.46778 | 99 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/models/__init__.py | from span_model.models.span_model import SpanModel
| 51 | 25 | 50 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/predictors/span_model.py | from typing import List
import numpy as np
import warnings
from overrides import overrides
import numpy
import json
from allennlp.common.util import JsonDict
from allennlp.nn import util
from allennlp.data import Batch
from allennlp.data import DatasetReader
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
@Predictor.register("span_model")
class SpanModelPredictor(Predictor):
"""
Predictor for SpanModel model.
If model was trained on coref, prediction is done on a whole document at
once. This risks overflowing memory on large documents.
If the model was trained without coref, prediction is done by sentence.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader) -> None:
super().__init__(model, dataset_reader)
def predict(self, document):
return self.predict_json({"document": document})
def predict_tokenized(self, tokenized_document: List[str]) -> JsonDict:
instance = self._words_list_to_instance(tokenized_document)
return self.predict_instance(instance)
@overrides
def dump_line(self, outputs):
# Need to override to tell Python how to deal with Numpy ints.
return json.dumps(outputs, default=int) + "\n"
# TODO: Can this be implemented in `forward_on_instance` instead?
@overrides
def predict_instance(self, instance):
"""
An instance is an entire document, represented as a list of sentences.
"""
model = self._model
cuda_device = model._get_prediction_device()
# Try to predict this batch.
try:
dataset = Batch([instance])
dataset.index_instances(model.vocab)
model_input = util.move_to_device(dataset.as_tensor_dict(), cuda_device)
prediction = model.make_output_human_readable(
model(**model_input)
).to_json()
# If we run out of GPU memory, warn user and indicate that this document failed.
# This way, prediction doesn't grind to a halt every time we run out of GPU.
except RuntimeError as err:
# doc_key, dataset, sentences, message
metadata = instance["metadata"].metadata
doc_key = metadata.doc_key
msg = (
f"Encountered a RunTimeError on document {doc_key}. Skipping this example."
f" Error message:\n{err.args[0]}."
)
warnings.warn(msg)
prediction = metadata.to_json()
prediction["_FAILED_PREDICTION"] = True
return prediction
| 2,612 | 34.310811 | 91 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/predictors/__init__.py | from span_model.predictors.span_model import SpanModelPredictor
| 64 | 31.5 | 63 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/data/__init__.py | from span_model.data.dataset_readers.span_model import SpanModelReader
from span_model.data.dataset_readers.document import Document
| 133 | 43.666667 | 70 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/data/dataset_readers/document.py | from span_model.models.shared import fields_to_batches, batches_to_fields
import copy
import numpy as np
import re
import json
def format_float(x):
return round(x, 4)
class SpanCrossesSentencesError(ValueError):
pass
def get_sentence_of_span(span, sentence_starts, doc_tokens):
"""
Return the index of the sentence that the span is part of.
"""
# Inclusive sentence ends
sentence_ends = [x - 1 for x in sentence_starts[1:]] + [doc_tokens - 1]
in_between = [
span[0] >= start and span[1] <= end
for start, end in zip(sentence_starts, sentence_ends)
]
if sum(in_between) != 1:
raise SpanCrossesSentencesError
the_sentence = in_between.index(True)
return the_sentence
class Dataset:
def __init__(self, documents):
self.documents = documents
def __getitem__(self, i):
return self.documents[i]
def __len__(self):
return len(self.documents)
def __repr__(self):
return f"Dataset with {self.__len__()} documents."
@classmethod
def from_jsonl(cls, fname):
documents = []
with open(fname, "r") as f:
for line in f:
doc = Document.from_json(json.loads(line))
documents.append(doc)
return cls(documents)
def to_jsonl(self, fname):
to_write = [doc.to_json() for doc in self]
with open(fname, "w") as f:
for entry in to_write:
print(json.dumps(entry), file=f)
class Document:
def __init__(
self,
doc_key,
dataset,
sentences,
weight=None,
):
self.doc_key = doc_key
self.dataset = dataset
self.sentences = sentences
self.weight = weight
@classmethod
def from_json(cls, js):
"Read in from json-loaded dict."
cls._check_fields(js)
doc_key = js["doc_key"]
dataset = js.get("dataset")
entries = fields_to_batches(
js,
[
"doc_key",
"dataset",
"weight",
],
)
sentence_lengths = [len(entry["sentences"]) for entry in entries]
sentence_starts = np.cumsum(sentence_lengths)
sentence_starts = np.roll(sentence_starts, 1)
sentence_starts[0] = 0
sentence_starts = sentence_starts.tolist()
sentences = [
Sentence(entry, sentence_start, sentence_ix)
for sentence_ix, (entry, sentence_start) in enumerate(
zip(entries, sentence_starts)
)
]
# Get the loss weight for this document.
weight = js.get("weight", None)
return cls(
doc_key,
dataset,
sentences,
weight,
)
@staticmethod
def _check_fields(js):
"Make sure we only have allowed fields."
allowed_field_regex = (
"doc_key|dataset|sentences|weight|.*ner$|"
".*relations$|.*clusters$|.*events$|^_.*"
)
allowed_field_regex = re.compile(allowed_field_regex)
unexpected = []
for field in js.keys():
if not allowed_field_regex.match(field):
unexpected.append(field)
if unexpected:
msg = f"The following unexpected fields should be prefixed with an underscore: {', '.join(unexpected)}."
raise ValueError(msg)
def to_json(self):
"Write to json dict."
res = {"doc_key": self.doc_key, "dataset": self.dataset}
sents_json = [sent.to_json() for sent in self]
fields_json = batches_to_fields(sents_json)
res.update(fields_json)
if self.weight is not None:
res["weight"] = self.weight
return res
# TODO: Write a unit test to make sure this does the correct thing.
def split(self, max_tokens_per_doc):
"""
Greedily split a long document into smaller documents, each shorter than
`max_tokens_per_doc`. Each split document will get the same weight as its parent.
"""
# If the document is already short enough, return it as a list with a single item.
if self.n_tokens <= max_tokens_per_doc:
return [self]
sentences = copy.deepcopy(self.sentences)
sentence_groups = []
current_group = []
group_length = 0
sentence_tok_offset = 0
sentence_ix_offset = 0
for sentence in sentences:
# Can't deal with single sentences longer than the limit.
if len(sentence) > max_tokens_per_doc:
msg = f"Sentence \"{''.join(sentence.text)}\" has more than {max_tokens_per_doc} tokens. Please split this sentence."
raise ValueError(msg)
if group_length + len(sentence) <= max_tokens_per_doc:
# If we're not at the limit, add it to the current sentence group.
sentence.sentence_start -= sentence_tok_offset
sentence.sentence_ix -= sentence_ix_offset
current_group.append(sentence)
group_length += len(sentence)
else:
# Otherwise, start a new sentence group and adjust sentence offsets.
sentence_groups.append(current_group)
sentence_tok_offset = sentence.sentence_start
sentence_ix_offset = sentence.sentence_ix
sentence.sentence_start -= sentence_tok_offset
sentence.sentence_ix -= sentence_ix_offset
current_group = [sentence]
group_length = len(sentence)
# Add the final sentence group.
sentence_groups.append(current_group)
# Create a separate document for each sentence group.
doc_keys = [f"{self.doc_key}_SPLIT_{i}" for i in range(len(sentence_groups))]
res = [
self.__class__(
doc_key,
self.dataset,
sentence_group,
self.weight,
)
for doc_key, sentence_group in zip(doc_keys, sentence_groups)
]
return res
def __repr__(self):
return "\n".join(
[
str(i) + ": " + " ".join(sent.text)
for i, sent in enumerate(self.sentences)
]
)
def __getitem__(self, ix):
return self.sentences[ix]
def __len__(self):
return len(self.sentences)
def print_plaintext(self):
for sent in self:
print(" ".join(sent.text))
@property
def n_tokens(self):
return sum([len(sent) for sent in self.sentences])
class Sentence:
def __init__(self, entry, sentence_start, sentence_ix):
self.sentence_start = sentence_start
self.sentence_ix = sentence_ix
self.text = entry["sentences"]
# Metadata fields are prefixed with a `_`.
self.metadata = {k: v for k, v in entry.items() if re.match("^_", k)}
if "ner" in entry:
self.ner = [NER(this_ner, self) for this_ner in entry["ner"]]
self.ner_dict = {entry.span.span_sent: entry.label for entry in self.ner}
else:
self.ner = None
self.ner_dict = None
# Predicted ner.
if "predicted_ner" in entry:
self.predicted_ner = [
PredictedNER(this_ner, self) for this_ner in entry["predicted_ner"]
]
else:
self.predicted_ner = None
# Store relations.
if "relations" in entry:
self.relations = [
Relation(this_relation, self) for this_relation in entry["relations"]
]
relation_dict = {}
for rel in self.relations:
key = (rel.pair[0].span_sent, rel.pair[1].span_sent)
relation_dict[key] = rel.label
self.relation_dict = relation_dict
else:
self.relations = None
self.relation_dict = None
# Predicted relations.
if "predicted_relations" in entry:
self.predicted_relations = [
PredictedRelation(this_relation, self)
for this_relation in entry["predicted_relations"]
]
else:
self.predicted_relations = None
def to_json(self):
res = {"sentences": self.text}
if self.ner is not None:
res["ner"] = [entry.to_json() for entry in self.ner]
if self.predicted_ner is not None:
res["predicted_ner"] = [entry.to_json() for entry in self.predicted_ner]
if self.relations is not None:
res["relations"] = [entry.to_json() for entry in self.relations]
if self.predicted_relations is not None:
res["predicted_relations"] = [
entry.to_json() for entry in self.predicted_relations
]
for k, v in self.metadata.items():
res[k] = v
return res
def __repr__(self):
the_text = " ".join(self.text)
the_lengths = [len(x) for x in self.text]
tok_ixs = ""
for i, offset in enumerate(the_lengths):
true_offset = offset if i < 10 else offset - 1
tok_ixs += str(i)
tok_ixs += " " * true_offset
return the_text + "\n" + tok_ixs
def __len__(self):
return len(self.text)
class Span:
def __init__(self, start, end, sentence, sentence_offsets=False):
# The `start` and `end` are relative to the document. We convert them to be relative to the
# sentence.
self.sentence = sentence
# Need to store the sentence text to make span objects hashable.
self.sentence_text = " ".join(sentence.text)
self.start_sent = start if sentence_offsets else start - sentence.sentence_start
self.end_sent = end if sentence_offsets else end - sentence.sentence_start
@property
def start_doc(self):
return self.start_sent + self.sentence.sentence_start
@property
def end_doc(self):
return self.end_sent + self.sentence.sentence_start
@property
def span_doc(self):
return (self.start_doc, self.end_doc)
@property
def span_sent(self):
return (self.start_sent, self.end_sent)
@property
def text(self):
return self.sentence.text[self.start_sent : self.end_sent + 1]
def __repr__(self):
return str((self.start_sent, self.end_sent, self.text))
def __eq__(self, other):
return (
self.span_doc == other.span_doc
and self.span_sent == other.span_sent
and self.sentence == other.sentence
)
def __hash__(self):
tup = self.span_sent + (self.sentence_text,)
return hash(tup)
class Token:
def __init__(self, ix, sentence, sentence_offsets=False):
self.sentence = sentence
self.ix_sent = ix if sentence_offsets else ix - sentence.sentence_start
@property
def ix_doc(self):
return self.ix_sent + self.sentence.sentence_start
@property
def text(self):
return self.sentence.text[self.ix_sent]
def __repr__(self):
return str((self.ix_sent, self.text))
class NER:
def __init__(self, ner, sentence, sentence_offsets=False):
self.span = Span(ner[0], ner[1], sentence, sentence_offsets)
self.label = ner[2]
def __repr__(self):
return f"{self.span.__repr__()}: {self.label}"
def __eq__(self, other):
return self.span == other.span and self.label == other.label
def to_json(self):
return list(self.span.span_doc) + [self.label]
class PredictedNER(NER):
def __init__(self, ner, sentence, sentence_offsets=False):
"The input should be a list: [span_start, span_end, label, raw_score, softmax_score]."
super().__init__(ner, sentence, sentence_offsets)
self.raw_score = ner[3]
self.softmax_score = ner[4]
def __repr__(self):
return super().__repr__() + f" with confidence {self.softmax_score:0.4f}"
def to_json(self):
return super().to_json() + [
format_float(self.raw_score),
format_float(self.softmax_score),
]
class Relation:
def __init__(self, relation, sentence, sentence_offsets=False):
start1, end1 = relation[0], relation[1]
start2, end2 = relation[2], relation[3]
label = relation[4]
span1 = Span(start1, end1, sentence, sentence_offsets)
span2 = Span(start2, end2, sentence, sentence_offsets)
self.pair = (span1, span2)
self.label = label
def __repr__(self):
return f"{self.pair[0].__repr__()}, {self.pair[1].__repr__()}: {self.label}"
def __eq__(self, other):
return (self.pair == other.pair) and (self.label == other.label)
def to_json(self):
return list(self.pair[0].span_doc) + list(self.pair[1].span_doc) + [self.label]
class PredictedRelation(Relation):
def __init__(self, relation, sentence, sentence_offsets=False):
"Input format: [start_1, end_1, start_2, end_2, label, raw_score, softmax_score]."
super().__init__(relation, sentence, sentence_offsets)
self.raw_score = relation[5]
self.softmax_score = relation[6]
def __repr__(self):
return super().__repr__() + f" with confidence {self.softmax_score:0.4f}"
def to_json(self):
return super().to_json() + [
format_float(self.raw_score),
format_float(self.softmax_score),
] | 13,593 | 30.836066 | 133 | py |
DMASTE | DMASTE-main/Span-ASTE/span_model/data/dataset_readers/span_model.py | import json
import logging
import pickle as pkl
import warnings
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Union
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import enumerate_spans
from allennlp.data.fields import (AdjacencyField, LabelField, ListField,
MetadataField, SequenceLabelField, SpanField,
TextField)
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from overrides import overrides
from span_model.data.dataset_readers.document import Document, Sentence
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# New
import sys
sys.path.append("aste")
from data_utils import BioesTagMaker
from pydantic import BaseModel
class Stats(BaseModel):
entity_total: int = 0
entity_drop: int = 0
relation_total: int = 0
relation_drop: int = 0
graph_total: int = 0
graph_edges: int = 0
grid_total: int = 0
grid_paired: int = 0
class SpanModelDataException(Exception):
pass
@DatasetReader.register("span_model")
class SpanModelReader(DatasetReader):
"""
Reads a single JSON-formatted file. This is the same file format as used in the
scierc, but is preprocessed
"""
def __init__(
self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
# New
self.stats = Stats()
self.is_train = False
self.tag_maker = BioesTagMaker()
print("#" * 80)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as f:
lines = f.readlines()
self.is_train = "train" in file_path # New
for line in lines:
# Loop over the documents.
doc_text = json.loads(line)
instance = self.text_to_instance(doc_text)
yield instance
# New
print(dict(file_path=file_path, stats=self.stats))
self.stats = Stats()
def _too_long(self, span):
return span[1] - span[0] + 1 > self._max_span_width
def _process_ner(self, span_tuples, sent):
ner_labels = [""] * len(span_tuples)
for span, label in sent.ner_dict.items():
if self._too_long(span):
continue
# New
self.stats.entity_total += 1
if span not in span_tuples:
self.stats.entity_drop += 1
continue
ix = span_tuples.index(span)
ner_labels[ix] = label
return ner_labels
def _process_tags(self, sent) -> List[str]:
if not sent.ner_dict:
return []
spans, labels = zip(*sent.ner_dict.items())
return self.tag_maker.run(spans, labels, num_tokens=len(sent.text))
def _process_relations(self, span_tuples, sent):
relations = []
relation_indices = []
# Loop over the gold spans. Look up their indices in the list of span tuples and store
# values.
for (span1, span2), label in sent.relation_dict.items():
# If either span is beyond the max span width, skip it.
if self._too_long(span1) or self._too_long(span2):
continue
# New
self.stats.relation_total += 1
if (span1 not in span_tuples) or (span2 not in span_tuples):
self.stats.relation_drop += 1
continue
ix1 = span_tuples.index(span1)
ix2 = span_tuples.index(span2)
relation_indices.append((ix1, ix2))
relations.append(label)
return relations, relation_indices
def _process_grid(self, sent):
indices = []
for ((a_start, a_end), (b_start, b_end)), label in sent.relation_dict.items():
for i in [a_start, a_end]:
for j in [b_start, b_end]:
indices.append((i, j))
indices = sorted(set(indices))
assert indices
self.stats.grid_paired += len(indices)
self.stats.grid_total += len(sent.text) ** 2
return indices
def _process_sentence(self, sent: Sentence, dataset: str):
# Get the sentence text and define the `text_field`.
sentence_text = [self._normalize_word(word) for word in sent.text]
text_field = TextField(
[Token(word) for word in sentence_text], self._token_indexers
)
# Enumerate spans.
spans = []
for start, end in enumerate_spans(
sentence_text, max_span_width=self._max_span_width
):
spans.append(SpanField(start, end, text_field))
# New
# spans = spans[:len(spans)//2] # bug: deliberately truncate
# labeled:Set[Tuple[int, int]] = set([span for span,label in sent.ner_dict.items()])
# for span_pair, label in sent.relation_dict.items():
# labeled.update(span_pair)
# existing:Set[Tuple[int, int]] = set([(s.span_start, s.span_end) for s in spans])
# for start, end in labeled:
# if (start, end) not in existing:
# spans.append(SpanField(start, end, text_field))
span_field = ListField(spans)
span_tuples = [(span.span_start, span.span_end) for span in spans]
# Convert data to fields.
# NOTE: The `ner_labels` and `coref_labels` would ideally have type
# `ListField[SequenceLabelField]`, where the sequence labels are over the `SpanField` of
# `spans`. But calling `as_tensor_dict()` fails on this specific data type. Matt G
# recognized that this is an AllenNLP API issue and suggested that represent these as
# `ListField[ListField[LabelField]]` instead.
fields = {}
fields["text"] = text_field
fields["spans"] = span_field
if sent.ner is not None:
ner_labels = self._process_ner(span_tuples, sent)
fields["ner_labels"] = ListField(
[
LabelField(entry, label_namespace=f"{dataset}__ner_labels")
for entry in ner_labels
]
)
fields["tag_labels"] = SequenceLabelField(
self._process_tags(sent),
text_field,
label_namespace=f"{dataset}__tag_labels",
)
if sent.relations is not None:
relation_labels, relation_indices = self._process_relations(
span_tuples, sent
)
fields["relation_labels"] = AdjacencyField(
indices=relation_indices,
sequence_field=span_field,
labels=relation_labels,
label_namespace=f"{dataset}__relation_labels",
)
fields["grid_labels"] = AdjacencyField(
indices=self._process_grid(sent),
sequence_field=text_field,
labels=None,
label_namespace=f"{dataset}__grid_labels",
)
return fields
def _process_sentence_fields(self, doc: Document):
# Process each sentence.
sentence_fields = [
self._process_sentence(sent, doc.dataset) for sent in doc.sentences
]
# Make sure that all sentences have the same set of keys.
first_keys = set(sentence_fields[0].keys())
for entry in sentence_fields:
if set(entry.keys()) != first_keys:
raise SpanModelDataException(
f"Keys do not match across sentences for document {doc.doc_key}."
)
# For each field, store the data from all sentences together in a ListField.
fields = {}
keys = sentence_fields[0].keys()
for key in keys:
this_field = ListField([sent[key] for sent in sentence_fields])
fields[key] = this_field
return fields
@overrides
def text_to_instance(self, doc_text: Dict[str, Any]):
"""
Convert a Document object into an instance.
"""
doc = Document.from_json(doc_text)
# Make sure there are no single-token sentences; these break things.
sent_lengths = [len(x) for x in doc.sentences]
if min(sent_lengths) < 2:
msg = (
f"Document {doc.doc_key} has a sentence with a single token or no tokens. "
"This may break the modeling code."
)
warnings.warn(msg)
fields = self._process_sentence_fields(doc)
fields["metadata"] = MetadataField(doc)
return Instance(fields)
@overrides
def _instances_from_cache_file(self, cache_filename):
with open(cache_filename, "rb") as f:
for entry in pkl.load(f):
yield entry
@overrides
def _instances_to_cache_file(self, cache_filename, instances):
with open(cache_filename, "wb") as f:
pkl.dump(instances, f, protocol=pkl.HIGHEST_PROTOCOL)
@staticmethod
def _normalize_word(word):
if word == "/." or word == "/?":
return word[1:]
else:
return word
| 9,644 | 33.819495 | 96 | py |
DMASTE | DMASTE-main/Span-ASTE/scripts/cross-domain/run_eq.py | import os
import sys
import time
import random
import threading
import itertools as it
source_list = ['electronics', 'home', 'beauty', 'fashion']
# source_list = ['_'.join(sorted(x)) for x in it.combinations(t_list, 2)] + ['_'.join(sorted(x)) for x in it.combinations(t_list, 3)]
target_list = ['book', 'grocery', 'pet', 'toy']
class Param:
def __init__(self, model_name, source,):
self.model_name = model_name
self.source = source
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
print(f'bash scripts/cross-domain/sub/{self.threadID}.sh')
os.system(f'bash scripts/cross-domain/sub/{self.threadID}.sh')
def main():
param_list = []
for source in source_list:
for model_name in range(5):
param = Param(model_name=model_name, source=source)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/sub')
os.makedirs('./scripts/cross-domain/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/base_eq.sh {p.source} {p.model_name} {i % num_device}\n')
f.close()
thread_list = []
worker = int(sys.argv[2])
for i in range(num_device):
thread = myThread(i + num_device * worker)
thread.start()
thread_list.append(thread)
time.sleep(2)
for t in thread_list:
t.join()
main() | 1,784 | 30.875 | 133 | py |
DMASTE | DMASTE-main/Span-ASTE/scripts/cross-domain/run_multi.py | import os
import sys
import time
import random
import threading
import itertools as it
t_list = ['electronics', 'home', 'beauty', 'fashion']
# source_list = ['_'.join(sorted(x)) for x in it.combinations(t_list, 2)] + ['_'.join(sorted(x)) for x in it.combinations(t_list, 3)]
source_list = ['_'.join(sorted(x)) for x in it.combinations(t_list, 4)]
target_list = ['book', 'grocery', 'pet', 'toy']
class Param:
def __init__(self, model_name, source,):
self.model_name = model_name
self.source = source
class myThread(threading.Thread):
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
print(f'bash scripts/cross-domain/sub/{self.threadID}.sh')
os.system(f'bash scripts/cross-domain/sub/{self.threadID}.sh')
def main():
param_list = []
for source in source_list:
for model_name in range(5):
param = Param(model_name=model_name, source=source)
param_list.append(param)
num_params = len(param_list)
random.seed(0)
param_list = random.sample(param_list, num_params)
num_batch = int(sys.argv[1])
num_device = 8
batch_size = num_params // num_batch
os.system('rm -r ./scripts/cross-domain/sub')
os.makedirs('./scripts/cross-domain/sub', exist_ok=True)
for i, p in enumerate(param_list):
f = open(f'./scripts/cross-domain/sub/{i % num_batch}.sh', 'a')
f.write(f'bash scripts/cross-domain/multi_base.sh {p.source} {p.model_name} {i % num_device}\n')
f.close()
# thread_list = []
# worker = int(sys.argv[2])
# for i in range(num_device):
# thread = myThread(i + num_device * worker)
# thread.start()
# thread_list.append(thread)
# time.sleep(2)
# for t in thread_list:
# t.join()
main() | 1,873 | 31.310345 | 133 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/main.py | import json
import shutil
import time
from os import remove
from pathlib import Path
from typing import List, Tuple, Optional
import _jsonnet # noqa
import pandas as pd
from fire import Fire
from pydantic import BaseModel
from data_utils import (
LabelEnum,
SplitEnum,
Sentence,
SentimentTriple,
Data,
ResultAnalyzer,
)
from evaluation import nereval, LinearInstance, FScore
from utils import Shell, hash_text, update_nested_dict
class SpanModelDocument(BaseModel):
sentences: List[List[str]]
ner: List[List[Tuple[int, int, str]]]
relations: List[List[Tuple[int, int, int, int, str]]]
doc_key: str
@property
def is_valid(self) -> bool:
return len(set(map(len, [self.sentences, self.ner, self.relations]))) == 1
@classmethod
def from_sentence(cls, x: Sentence):
ner: List[Tuple[int, int, str]] = []
for t in x.triples:
ner.append((t.o_start, t.o_end, LabelEnum.opinion))
ner.append((t.t_start, t.t_end, LabelEnum.target))
ner = sorted(set(ner), key=lambda n: n[0])
relations = [
(t.o_start, t.o_end, t.t_start, t.t_end, t.label) for t in x.triples
]
return cls(
sentences=[x.tokens],
ner=[ner],
relations=[relations],
doc_key=str(x.id),
)
class SpanModelPrediction(SpanModelDocument):
predicted_ner: List[List[Tuple[int, int, LabelEnum, float, float]]] = [
[]
] # If loss_weights["ner"] == 0.0
predicted_relations: List[List[Tuple[int, int, int, int, LabelEnum, float, float]]]
def to_sentence(self) -> Sentence:
for lst in [self.sentences, self.predicted_ner, self.predicted_relations]:
assert len(lst) == 1
triples = [
SentimentTriple(o_start=os, o_end=oe, t_start=ts, t_end=te, label=label)
for os, oe, ts, te, label, value, prob in self.predicted_relations[0]
]
return Sentence(
id=int(self.doc_key),
tokens=self.sentences[0],
pos=[],
weight=1,
is_labeled=False,
triples=triples,
spans=[lst[:3] for lst in self.predicted_ner[0]],
)
def update_instance(self, x: LinearInstance) -> LinearInstance:
x.set_prediction(self.to_sentence().to_instance().output)
return x
class SpanModelData(BaseModel):
root: Path
data_split: SplitEnum
documents: Optional[List[SpanModelDocument]]
@classmethod
def read(cls, path: Path) -> List[SpanModelDocument]:
docs = []
with open(path) as f:
for line in f:
line = line.strip()
raw: dict = json.loads(line)
docs.append(SpanModelDocument(**raw))
return docs
def load(self):
if self.documents is None:
path = self.root / f"{self.data_split}.json"
self.documents = self.read(path)
def dump(self, path: Path, sep="\n"):
for d in self.documents:
assert d.is_valid
with open(path, "w") as f:
f.write(sep.join([d.json() for d in self.documents]))
assert all(
[a.dict() == b.dict() for a, b in zip(self.documents, self.read(path))]
)
@classmethod
def from_data(cls, x: Data):
data = cls(root=x.root, data_split=x.data_split)
data.documents = [SpanModelDocument.from_sentence(s) for s in x.sentences]
return data
class SpanModelConfigMaker(BaseModel):
root: Path = Path("/tmp/config_maker")
def run(self, path_in: Path, **kwargs) -> Path:
self.root.mkdir(exist_ok=True)
path_out = self.root / path_in.name
config = json.loads(_jsonnet.evaluate_file(str(path_in)))
assert isinstance(config, dict)
for key, value in kwargs.items():
config = update_nested_dict(config, key, value)
with open(path_out, "w") as f:
f.write(json.dumps(config, indent=2))
return path_out
class SpanModelTrainer(BaseModel):
root: Path
train_kwargs: dict
path_config: Path = Path("training_config/aste.jsonnet").resolve()
repo_span_model: Path = Path(".").resolve()
output_dir: Optional[Path]
model_path: Optional[Path]
data_name: Optional[str]
task_name: Optional[str]
@property
def name(self) -> str:
hash_id = hash_text(str(self.train_kwargs))
return "_".join([self.task_name, self.data_name, hash_id])
def load(self, overwrite: bool):
if self.data_name is None:
self.data_name = self.root.stem
if self.task_name is None:
self.task_name = self.path_config.stem
if self.model_path is None:
self.model_path = Path(f"models/{self.name}/model.tar.gz")
if self.output_dir is None:
self.output_dir = Path(f"model_outputs/{self.name}")
if self.model_path.parent.exists() and overwrite:
print(dict(rmtree=self.model_path.parent))
shutil.rmtree(self.model_path.parent)
if self.output_dir.exists() and overwrite:
print(dict(rmtree=self.output_dir))
shutil.rmtree(self.output_dir)
self.output_dir.mkdir(exist_ok=True, parents=True)
print(self.json(indent=2))
def get_processed_data_path(self, data_split: SplitEnum) -> Path:
# Should match the path in .jsonnet config file
return self.output_dir / f"{data_split}.json"
def get_predict_path(self, data_split: SplitEnum) -> Path:
return self.output_dir / f"predict_{data_split}.jsonl"
def setup_data(self):
for data_split in [SplitEnum.train, SplitEnum.dev, SplitEnum.test]:
data = Data(root=self.root, data_split=data_split)
data.load()
new = SpanModelData.from_data(data)
new.dump(self.get_processed_data_path(data_split))
def train(self, overwrite=True):
self.load(overwrite=overwrite)
if overwrite and self.model_path.exists():
return
self.setup_data()
kwargs = dict(self.train_kwargs)
data_map = dict(
train_data_path=SplitEnum.train,
validation_data_path=SplitEnum.dev,
test_data_path=SplitEnum.test,
)
for k, v in data_map.items():
kwargs[k] = str(self.get_processed_data_path(v).resolve())
kwargs.setdefault("seed", 0) # A bit sneaky to put "seed" in **kwargs but this is surgical
seed = kwargs.pop("seed")
for key in ["random_seed", "numpy_seed", "pytorch_seed"]:
kwargs[key] = seed
config_maker = SpanModelConfigMaker(root=self.output_dir)
path_config = config_maker.run(self.path_config, **kwargs).resolve()
shell = Shell()
shell.run(
f"cd {self.repo_span_model} && allennlp train {path_config}",
serialization_dir=self.model_path.parent,
include_package="span_model",
)
assert self.model_path.exists()
def predict(self, data_split: SplitEnum) -> Path:
self.load(overwrite=False)
path = self.get_predict_path(data_split)
if path.exists():
remove(path)
shell = Shell()
shell.run(
f"cd {self.repo_span_model} && allennlp predict {self.model_path}",
self.get_processed_data_path(data_split),
predictor="span_model",
include_package="span_model",
use_dataset_reader="",
output_file=path,
cuda_device=self.train_kwargs["trainer__cuda_device"],
silent="",
)
return path
def eval(self, data_split: SplitEnum) -> FScore:
data = Data(root=self.root, data_split=data_split)
data.load()
instances = [s.to_instance() for s in data.sentences]
path = self.predict(data_split)
with open(path) as f:
preds = [SpanModelPrediction(**json.loads(line.strip())) for line in f]
for i, p in zip(instances, preds):
p.update_instance(i)
pred_sents = [p.to_sentence() for p in preds]
for name, sents in dict(pred=pred_sents, gold=data.sentences).items():
path_out = self.output_dir / f"sentences_{data_split}_{name}.json"
print(dict(path_out=path_out))
with open(path_out, "w") as f:
f.write("\n".join([s.json() for s in sents]))
scorer = nereval()
analyzer = ResultAnalyzer()
analyzer.run(pred=pred_sents, gold=data.sentences)
return scorer.eval(instances) # noqa
def main_single(path: Path, overwrite=False, **kwargs):
trainer = SpanModelTrainer(root=path.resolve(), train_kwargs=kwargs)
trainer.train(overwrite=overwrite)
scores = {}
for data_split in [SplitEnum.dev, SplitEnum.test]:
scores[data_split] = trainer.eval(data_split=data_split)
return scores
def main(
root="aste/data/triplet_data",
names=("14lap",),
seeds=(0,),
sep=",",
name_out="results",
**kwargs,
):
print(json.dumps(locals(), indent=2))
records = {}
names = names if type(names) in {tuple, list} else names.split(sep)
paths = [Path(root) / n for n in names]
assert all([p.exists() for p in paths])
assert len(seeds) == len(paths)
for i, p in enumerate(paths):
start = time.time()
scores = main_single(p, overwrite=True, seed=seeds[i], **kwargs)
duration = time.time() - start
for k, v in scores.items():
row = dict(name=p.stem, k=k, score=str(v), duration=duration)
records.setdefault(k, []).append(row)
df = pd.DataFrame(records[k])
print(df)
path = Path(f"{name_out}_{k}.csv")
path.parent.mkdir(exist_ok=True)
df.to_csv(path, index=False)
print(dict(path_results=path))
if __name__ == "__main__":
Fire(main)
| 9,999 | 33.129693 | 99 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/wrapper.py | import json
import os
from pathlib import Path
from typing import List
import _jsonnet
from fire import Fire
from pydantic import BaseModel
from tqdm import tqdm
from data_utils import Data, SentimentTriple, SplitEnum
from main import SpanModelData, SpanModelPrediction
from utils import Shell, safe_divide
class SpanModel(BaseModel):
save_dir: str
random_seed: int
path_config_base: str = "training_config/config.jsonnet"
def save_temp_data(self, path_in: str, name: str, is_test: bool = False) -> Path:
path_temp = Path(self.save_dir) / "temp_data" / f"{name}.json"
path_temp = path_temp.resolve()
path_temp.parent.mkdir(exist_ok=True, parents=True)
data = Data.load_from_full_path(path_in)
if is_test:
# SpanModel error if s.triples is empty list
assert data.sentences is not None
for s in data.sentences:
s.triples = [SentimentTriple.make_dummy()]
span_data = SpanModelData.from_data(data)
span_data.dump(path_temp)
return path_temp
def fit(self, path_train: str, path_dev: str, random_seed):
self.random_seed = random_seed
weights_dir = Path(self.save_dir) / "weights"
weights_dir.mkdir(exist_ok=True, parents=True)
print(dict(weights_dir=weights_dir))
path_config = Path(self.save_dir) / "config.jsonnet"
config = json.loads(_jsonnet.evaluate_file(self.path_config_base))
for key in ["random_seed", "pytorch_seed", "numpy_seed"]:
assert key in config.keys()
config[key] = self.random_seed
print({key: self.random_seed})
for name, path in dict(
train=path_train, validation=path_dev, test=path_dev
).items():
key = f"{name}_data_path"
assert key in config.keys()
path_temp = self.save_temp_data(path, name)
config[key] = str(path_temp)
print({key: path_temp})
with open(path_config, "w") as f:
f.write(json.dumps(config, indent=2))
print(dict(path_config=path_config))
shell = Shell()
work_dir = Path(".").resolve()
shell.run(
f"cd {work_dir} && allennlp train {path_config}",
serialization_dir=str(weights_dir),
include_package="span_model",
)
def predict(self, path_in: str, path_out: str, device=0):
work_dir = Path(".").resolve()
path_model = Path(self.save_dir) / "weights" / "model.tar.gz"
path_temp_in = self.save_temp_data(path_in, "pred_in", is_test=True)
path_temp_out = Path(self.save_dir) / "temp_data" / "pred_out.json"
if path_temp_out.exists():
os.remove(path_temp_out)
shell = Shell()
shell.run(
f"cd {work_dir} && allennlp predict {path_model}",
str(path_temp_in),
predictor="span_model",
include_package="span_model",
use_dataset_reader="",
output_file=str(path_temp_out),
cuda_device=0,
silent="",
)
with open(path_temp_out) as f:
preds = [SpanModelPrediction(**json.loads(line.strip())) for line in f]
data = Data(
root=Path(),
data_split=SplitEnum.test,
sentences=[p.to_sentence() for p in preds],
)
data.save_to_path(path_out)
def score(self, path_pred: str, path_gold: str) -> dict:
pred = Data.load_from_full_path(path_pred)
gold = Data.load_from_full_path(path_gold)
assert pred.sentences is not None
assert gold.sentences is not None
assert len(pred.sentences) == len(gold.sentences)
num_pred = 0
num_gold = 0
num_correct = 0
for i in range(len(gold.sentences)):
num_pred += len(pred.sentences[i].triples)
num_gold += len(gold.sentences[i].triples)
for p in pred.sentences[i].triples:
for g in gold.sentences[i].triples:
if p.dict() == g.dict():
num_correct += 1
precision = safe_divide(num_correct, num_pred)
recall = safe_divide(num_correct, num_gold)
info = dict(
path_pred=path_pred,
path_gold=path_gold,
precision=precision,
recall=recall,
score=safe_divide(2 * precision * recall, precision + recall),
)
return info
def run_train(path_train: str, path_dev: str, save_dir: str, random_seed: int):
print(dict(run_train=locals()))
if Path(save_dir).exists():
return
model = SpanModel(save_dir=save_dir, random_seed=random_seed)
model.fit(path_train, path_dev)
def run_train_many(save_dir_template: str, random_seeds: List[int], **kwargs):
for seed in tqdm(random_seeds):
save_dir = save_dir_template.format(seed)
run_train(save_dir=save_dir, random_seed=seed, **kwargs)
def run_eval(path_test: str, save_dir: str):
print(dict(run_eval=locals()))
model = SpanModel(save_dir=save_dir, random_seed=0)
path_pred = str(Path(save_dir) / "pred.txt")
model.predict(path_test, path_pred)
results = model.score(path_pred, path_test)
print(results)
return results
def run_eval_many(save_dir_template: str, random_seeds: List[int], **kwargs):
results = []
for seed in tqdm(random_seeds):
save_dir = save_dir_template.format(seed)
results.append(run_eval(save_dir=save_dir, **kwargs))
precision = sum(r["precision"] for r in results) / len(random_seeds)
recall = sum(r["recall"] for r in results) / len(random_seeds)
score = safe_divide(2 * precision * recall, precision + recall)
print(dict(precision=precision, recall=recall, score=score))
if __name__ == "__main__":
Fire()
| 5,884 | 33.617647 | 85 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/utils.py | import copy
import hashlib
import pickle
import subprocess
import time
from pathlib import Path
from typing import List, Set, Tuple, Union
from fire import Fire
from pydantic import BaseModel
class Shell(BaseModel):
verbose: bool = True
@classmethod
def format_kwargs(cls, **kwargs) -> str:
outputs = []
for k, v in kwargs.items():
k = k.replace("_", "-")
k = f"--{k}"
outputs.extend([k, str(v)])
return " ".join(outputs)
def run_command(self, command: str) -> str:
# Continuously print outputs for long-running commands
# Refer: https://fabianlee.org/2019/09/15/python-getting-live-output-from-subprocess-using-poll/
print(dict(command=command))
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
outputs = []
while True:
if process.poll() is not None:
break
o = process.stdout.readline().decode()
if o:
outputs.append(o)
if self.verbose:
print(o.strip())
return "".join(outputs)
def run(self, command: str, *args, **kwargs) -> str:
args = [str(a) for a in args]
command = " ".join([command] + args + [self.format_kwargs(**kwargs)])
return self.run_command(command)
def hash_text(x: str) -> str:
return hashlib.md5(x.encode()).hexdigest()
class Timer(BaseModel):
name: str = ""
start: float = 0.0
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
duration = round(time.time() - self.start, 3)
print(f"Timer {self.name}: {duration}s")
class PickleSaver(BaseModel):
path: Path
def dump(self, obj):
if not self.path.parent.exists():
self.path.parent.mkdir(exist_ok=True)
with open(self.path, "wb") as f:
pickle.dump(obj, f)
def load(self):
with Timer(name=str(self.path)):
with open(self.path, "rb") as f:
return pickle.load(f)
class FlexiModel(BaseModel):
class Config:
arbitrary_types_allowed = True
def get_simple_stats(numbers: List[Union[int, float]]):
return dict(
min=min(numbers),
max=max(numbers),
avg=sum(numbers) / len(numbers),
)
def count_joins(spans: Set[Tuple[int, int]]) -> int:
count = 0
for a_start, a_end in spans:
for b_start, b_end in spans:
if (a_start, a_end) == (b_start, b_end):
continue
if b_start <= a_start <= b_end + 1 or b_start - 1 <= a_end <= b_end:
count += 1
return count // 2
def update_nested_dict(d: dict, k: str, v, i=0, sep="__"):
d = copy.deepcopy(d)
keys = k.split(sep)
assert keys[i] in d.keys(), str(dict(keys=keys, d=d, i=i))
if i == len(keys) - 1:
orig = d[keys[i]]
if v != orig:
print(dict(updated_key=k, new_value=v, orig=orig))
d[keys[i]] = v
else:
d[keys[i]] = update_nested_dict(d=d[keys[i]], k=k, v=v, i=i + 1)
return d
def test_update_nested_dict():
d = dict(top=dict(middle_a=dict(last=1), middle_b=0))
print(update_nested_dict(d, k="top__middle_b", v=-1))
print(update_nested_dict(d, k="top__middle_a__last", v=-1))
print(update_nested_dict(d, k="top__middle_a__last", v=1))
def clean_up_triplet_data(path: str):
outputs = []
with open(path) as f:
for line in f:
sep = "####"
text, tags_t, tags_o, triplets = line.split(sep)
outputs.append(sep.join([text, " ", " ", triplets]))
with open(path, "w") as f:
f.write("".join(outputs))
def clean_up_many(pattern: str = "data/triplet_data/*/*.txt"):
for path in sorted(Path().glob(pattern)):
print(path)
clean_up_triplet_data(str(path))
def merge_data(
folders_in: List[str] = [
"aste/data/triplet_data/14res/",
"aste/data/triplet_data/15res/",
"aste/data/triplet_data/16res/",
],
folder_out: str = "aste/data/triplet_data/res_all/",
):
for name in ["train.txt", "dev.txt", "test.txt"]:
outputs = []
for folder in folders_in:
path = Path(folder) / name
with open(path) as f:
for line in f:
assert line.endswith("\n")
outputs.append(line)
path_out = Path(folder_out) / name
path_out.parent.mkdir(exist_ok=True, parents=True)
with open(path_out, "w") as f:
f.write("".join(outputs))
def safe_divide(a: float, b: float) -> float:
if a == 0 or b == 0:
return 0
return a / b
if __name__ == "__main__":
Fire()
| 4,805 | 26.306818 | 104 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/data_utils.py | import ast
import copy
import json
import os
from collections import Counter
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pandas as pd
from fire import Fire
from pydantic import BaseModel
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from evaluation import LinearInstance, TagReader, nereval
from utils import count_joins, get_simple_stats
RawTriple = Tuple[List[int], int, int, int, int]
Span = Tuple[int, int]
class SplitEnum(str, Enum):
train = "train"
dev = "dev"
test = "test"
class LabelEnum(str, Enum):
positive = "POS"
negative = "NEG"
neutral = "NEU"
opinion = "OPINION"
target = "TARGET"
@classmethod
def as_list(cls):
return [cls.neutral, cls.positive, cls.negative]
@classmethod
def i_to_label(cls, i: int):
return cls.as_list()[i]
@classmethod
def label_to_i(cls, label) -> int:
return cls.as_list().index(label)
class SentimentTriple(BaseModel):
o_start: int
o_end: int
t_start: int
t_end: int
label: LabelEnum
@classmethod
def make_dummy(cls):
return cls(o_start=0, o_end=0, t_start=0, t_end=0, label=LabelEnum.neutral)
@property
def opinion(self) -> Tuple[int, int]:
return self.o_start, self.o_end
@property
def target(self) -> Tuple[int, int]:
return self.t_start, self.t_end
@classmethod
def from_raw_triple(cls, x: RawTriple):
(o_start, o_end), polarity, direction, gap_a, gap_b = x
# Refer: TagReader
if direction == 0:
t_end = o_start - gap_a
t_start = o_start - gap_b
elif direction == 1:
t_start = gap_a + o_start
t_end = gap_b + o_start
else:
raise ValueError
return cls(
o_start=o_start,
o_end=o_end,
t_start=t_start,
t_end=t_end,
label=LabelEnum.i_to_label(polarity),
)
def to_raw_triple(self) -> RawTriple:
polarity = LabelEnum.label_to_i(self.label)
if self.t_start < self.o_start:
direction = 0
gap_a, gap_b = self.o_start - self.t_end, self.o_start - self.t_start
else:
direction = 1
gap_a, gap_b = self.t_start - self.o_start, self.t_end - self.o_start
return [self.o_start, self.o_end], polarity, direction, gap_a, gap_b
def as_text(self, tokens: List[str]) -> str:
opinion = " ".join(tokens[self.o_start : self.o_end + 1])
target = " ".join(tokens[self.t_start : self.t_end + 1])
return f"{opinion}-{target} ({self.label})"
class TripleHeuristic(BaseModel):
@staticmethod
def run(
opinion_to_label: Dict[Span, LabelEnum],
target_to_label: Dict[Span, LabelEnum],
) -> List[SentimentTriple]:
# For each target, pair with the closest opinion (and vice versa)
spans_o = list(opinion_to_label.keys())
spans_t = list(target_to_label.keys())
pos_o = np.expand_dims(np.array(spans_o).mean(axis=-1), axis=1)
pos_t = np.expand_dims(np.array(spans_t).mean(axis=-1), axis=0)
dists = np.absolute(pos_o - pos_t)
raw_triples: Set[Tuple[int, int, LabelEnum]] = set()
closest = np.argmin(dists, axis=1)
for i, span in enumerate(spans_o):
raw_triples.add((i, int(closest[i]), opinion_to_label[span]))
closest = np.argmin(dists, axis=0)
for i, span in enumerate(spans_t):
raw_triples.add((int(closest[i]), i, target_to_label[span]))
triples = []
for i, j, label in raw_triples:
os, oe = spans_o[i]
ts, te = spans_t[j]
triples.append(
SentimentTriple(o_start=os, o_end=oe, t_start=ts, t_end=te, label=label)
)
return triples
class TagMaker(BaseModel):
@staticmethod
def run(spans: List[Span], labels: List[LabelEnum], num_tokens: int) -> List[str]:
raise NotImplementedError
class BioesTagMaker(TagMaker):
@staticmethod
def run(spans: List[Span], labels: List[LabelEnum], num_tokens: int) -> List[str]:
tags = ["O"] * num_tokens
for (start, end), lab in zip(spans, labels):
assert end >= start
length = end - start + 1
if length == 1:
tags[start] = f"S-{lab}"
else:
tags[start] = f"B-{lab}"
tags[end] = f"E-{lab}"
for i in range(start + 1, end):
tags[i] = f"I-{lab}"
return tags
class Sentence(BaseModel):
tokens: List[str]
pos: List[str]
weight: int
id: int
is_labeled: bool
triples: List[SentimentTriple]
spans: List[Tuple[int, int, LabelEnum]] = []
def extract_spans(self) -> List[Tuple[int, int, LabelEnum]]:
spans = []
for t in self.triples:
spans.append((t.o_start, t.o_end, LabelEnum.opinion))
spans.append((t.t_start, t.t_end, LabelEnum.target))
spans = sorted(set(spans))
return spans
@classmethod
def from_instance(cls, x: LinearInstance):
sentence = cls(
tokens=x.input,
weight=x.weight,
pos=x.output[0],
id=x.instance_id,
triples=[SentimentTriple.from_raw_triple(o) for o in x.output[1]],
is_labeled=x.is_labeled,
)
assert vars(x) == vars(sentence.to_instance())
return sentence
def to_instance(self) -> LinearInstance:
output = (self.pos, [t.to_raw_triple() for t in self.triples])
instance = LinearInstance(self.id, self.weight, self.tokens, output)
instance.is_labeled = self.is_labeled
return instance
def as_text(self) -> str:
tokens = list(self.tokens)
for t in self.triples:
tokens[t.o_start] = "(" + tokens[t.o_start]
tokens[t.o_end] = tokens[t.o_end] + ")"
tokens[t.t_start] = "[" + tokens[t.t_start]
tokens[t.t_end] = tokens[t.t_end] + "]"
return " ".join(tokens)
@classmethod
def from_line_format(cls, text: str):
front, back = text.split("####")
tokens = front.split(" ")
triples = []
for a, b, label in ast.literal_eval(back):
t = SentimentTriple(
t_start=a[0],
t_end=a[0] if len(a) == 1 else a[-1],
o_start=b[0],
o_end=b[0] if len(b) == 1 else b[-1],
label=label,
)
triples.append(t)
return cls(
tokens=tokens, triples=triples, id=0, pos=[], weight=1, is_labeled=True
)
def to_line_format(self) -> str:
# ([1], [4], 'POS')
# ([1,2], [4], 'POS')
triplets = []
for t in self.triples:
parts = []
for start, end in [(t.t_start, t.t_end), (t.o_start, t.o_end)]:
if start == end:
parts.append([start])
else:
parts.append([start, end])
parts.append(f"{t.label}")
triplets.append(tuple(parts))
line = " ".join(self.tokens) + "####" + str(triplets) + "\n"
assert self.from_line_format(line).tokens == self.tokens
assert self.from_line_format(line).triples == self.triples
return line
class Data(BaseModel):
root: Path
data_split: SplitEnum
sentences: Optional[List[Sentence]]
full_path: str = ""
num_instances: int = -1
opinion_offset: int = 3 # Refer: jet_o.py
is_labeled: bool = False
def load(self):
if self.sentences is None:
path = self.root / f"{self.data_split}.txt"
if self.full_path:
path = self.full_path
with open(path) as f:
self.sentences = [Sentence.from_line_format(line) for line in f]
@classmethod
def load_from_full_path(cls, path: str):
data = cls(full_path=path, root=Path(path).parent, data_split=SplitEnum.train)
data.load()
return data
def save_to_path(self, path: str):
assert self.sentences is not None
Path(path).parent.mkdir(exist_ok=True, parents=True)
with open(path, "w") as f:
for s in self.sentences:
f.write(s.to_line_format())
data = Data.load_from_full_path(path)
assert data.sentences is not None
for i, s in enumerate(data.sentences):
assert s.tokens == self.sentences[i].tokens
assert s.triples == self.sentences[i].triples
def analyze_spans(self):
print("\nHow often is target closer to opinion than any invalid target?")
records = []
for s in self.sentences:
valid_pairs = set([(a.opinion, a.target) for a in s.triples])
for a in s.triples:
closest = None
for b in s.triples:
dist_a = abs(np.mean(a.opinion) - np.mean(a.target))
dist_b = abs(np.mean(a.opinion) - np.mean(b.target))
if dist_b <= dist_a and (a.opinion, b.target) not in valid_pairs:
closest = b.target
spans = [a.opinion, a.target]
if closest is not None:
spans.append(closest)
tokens = list(s.tokens)
for start, end in spans:
tokens[start] = "[" + tokens[start]
tokens[end] = tokens[end] + "]"
start = min([s[0] for s in spans])
end = max([s[1] for s in spans])
tokens = tokens[start : end + 1]
records.append(dict(is_closest=closest is None, text=" ".join(tokens)))
df = pd.DataFrame(records)
print(df["is_closest"].mean())
print(df[~df["is_closest"]].head())
def analyze_joined_spans(self):
print("\nHow often are target/opinion spans joined?")
join_targets = 0
join_opinions = 0
total_targets = 0
total_opinions = 0
for s in self.sentences:
targets = set([t.target for t in s.triples])
opinions = set([t.opinion for t in s.triples])
total_targets += len(targets)
total_opinions += len(opinions)
join_targets += count_joins(targets)
join_opinions += count_joins(opinions)
print(
dict(
targets=join_targets / total_targets,
opinions=join_opinions / total_opinions,
)
)
def analyze_tag_counts(self):
print("\nHow many tokens are target/opinion/none?")
record = []
for s in self.sentences:
tags = [str(None) for _ in s.tokens]
for t in s.triples:
for i in range(t.o_start, t.o_end + 1):
tags[i] = "Opinion"
for i in range(t.t_start, t.t_end + 1):
tags[i] = "Target"
record.extend(tags)
print({k: v / len(record) for k, v in Counter(record).items()})
def analyze_span_distance(self):
print("\nHow far is the target/opinion from each other on average?")
distances = []
for s in self.sentences:
for t in s.triples:
x_opinion = (t.o_start + t.o_end) / 2
x_target = (t.t_start + t.t_end) / 2
distances.append(abs(x_opinion - x_target))
print(get_simple_stats(distances))
def analyze_opinion_labels(self):
print("\nFor opinion/target how often is it associated with only 1 polarity?")
for key in ["opinion", "target"]:
records = []
for s in self.sentences:
term_to_labels: Dict[Tuple[int, int], List[LabelEnum]] = {}
for t in s.triples:
term_to_labels.setdefault(getattr(t, key), []).append(t.label)
records.extend([len(set(labels)) for labels in term_to_labels.values()])
is_single_label = [n == 1 for n in records]
print(
dict(
key=key,
is_single_label=sum(is_single_label) / len(is_single_label),
stats=get_simple_stats(records),
)
)
def analyze_tag_score(self):
print("\nIf have all target and opinion terms (unpaired), what is max f_score?")
pred = copy.deepcopy(self.sentences)
for s in pred:
target_to_label = {t.target: t.label for t in s.triples}
opinion_to_label = {t.opinion: t.label for t in s.triples}
s.triples = TripleHeuristic().run(opinion_to_label, target_to_label)
analyzer = ResultAnalyzer()
analyzer.run(pred, gold=self.sentences, print_limit=0)
def analyze_ner(self):
print("\n How many opinion/target per sentence?")
num_o, num_t = [], []
for s in self.sentences:
opinions, targets = set(), set()
for t in s.triples:
opinions.add((t.o_start, t.o_end))
targets.add((t.t_start, t.t_end))
num_o.append(len(opinions))
num_t.append(len(targets))
print(
dict(
num_o=get_simple_stats(num_o),
num_t=get_simple_stats(num_t),
sentences=len(self.sentences),
)
)
def analyze_direction(self):
print("\n For targets, is opinion offset always positive/negative/both?")
records = []
for s in self.sentences:
span_to_offsets = {}
for t in s.triples:
off = np.mean(t.target) - np.mean(t.opinion)
span_to_offsets.setdefault(t.opinion, []).append(off)
for span, offsets in span_to_offsets.items():
labels = [
LabelEnum.positive if off > 0 else LabelEnum.negative
for off in offsets
]
lab = labels[0] if len(set(labels)) == 1 else LabelEnum.neutral
records.append(
dict(
span=" ".join(s.tokens[span[0] : span[1] + 1]),
text=s.as_text(),
offsets=lab,
)
)
df = pd.DataFrame(records)
print(df["offsets"].value_counts(normalize=True))
df = df[df["offsets"] == LabelEnum.neutral].drop(columns=["offsets"])
with pd.option_context("display.max_colwidth", 999):
print(df.head())
def analyze(self):
triples = [t for s in self.sentences for t in s.triples]
info = dict(
root=self.root,
sentences=len(self.sentences),
sentiments=Counter([t.label for t in triples]),
target_lengths=get_simple_stats(
[abs(t.t_start - t.t_end) + 1 for t in triples]
),
opinion_lengths=get_simple_stats(
[abs(t.o_start - t.o_end) + 1 for t in triples]
),
sentence_lengths=get_simple_stats([len(s.tokens) for s in self.sentences]),
)
for k, v in info.items():
print(k, v)
self.analyze_direction()
self.analyze_ner()
self.analyze_spans()
self.analyze_joined_spans()
self.analyze_tag_counts()
self.analyze_span_distance()
self.analyze_opinion_labels()
self.analyze_tag_score()
print("#" * 80)
def test_from_line_format(path: str = "aste/data/triplet_data/14lap/train.txt"):
print("\nCompare old TagReader with new Sentence.from_line_format")
instances = TagReader.read_inst(
file=path,
is_labeled=False,
number=-1,
opinion_offset=3,
)
a = Data(
root=Path(),
data_split=SplitEnum.test,
sentences=[Sentence.from_instance(x) for x in instances],
)
assert a.sentences is not None
with open(path) as f:
for i, line in enumerate(f):
s = Sentence.from_line_format(line)
assert s.tokens == a.sentences[i].tokens
set_a = set(t.json() for t in a.sentences[i].triples)
set_b = set(t.json() for t in s.triples)
assert set_a == set_b
def test_save_to_path(path: str = "aste/data/triplet_data/14lap/train.txt"):
print("\nEnsure that Data.save_to_path works properly")
path_temp = "temp.txt"
data = Data.load_from_full_path(path)
data.save_to_path(path_temp)
print("\nSamples")
with open(path_temp) as f:
for line in f.readlines()[:5]:
print(line)
os.remove(path_temp)
def merge_data(items: List[Data]) -> Data:
merged = Data(root=Path(), data_split=items[0].data_split, sentences=[])
for data in items:
data.load()
merged.sentences.extend(data.sentences)
return merged
class Result(BaseModel):
num_sentences: int
num_pred: int = 0
num_gold: int = 0
num_correct: int = 0
num_start_correct: int = 0
num_start_end_correct: int = 0
num_opinion_correct: int = 0
num_target_correct: int = 0
num_span_overlap: int = 0
precision: float = 0.0
recall: float = 0.0
f_score: float = 0.0
class ResultAnalyzer(BaseModel):
@staticmethod
def check_overlap(a_start: int, a_end: int, b_start: int, b_end: int) -> bool:
return (b_start <= a_start <= b_end) or (b_start <= a_end <= b_end)
@staticmethod
def run_sentence(pred: Sentence, gold: Sentence):
assert pred.tokens == gold.tokens
triples_gold = set([t.as_text(gold.tokens) for t in gold.triples])
triples_pred = set([t.as_text(pred.tokens) for t in pred.triples])
tp = triples_pred.intersection(triples_gold)
fp = triples_pred.difference(triples_gold)
fn = triples_gold.difference(triples_pred)
if fp or fn:
print(dict(gold=gold.as_text()))
print(dict(pred=pred.as_text()))
print(dict(tp=tp))
print(dict(fp=fp))
print(dict(fn=fn))
print("#" * 80)
@staticmethod
def analyze_labels(pred: List[Sentence], gold: List[Sentence]):
y_pred = []
y_gold = []
for i in range(len(pred)):
for p in pred[i].triples:
for g in gold[i].triples:
if (p.opinion, p.target) == (g.opinion, g.target):
y_pred.append(str(p.label))
y_gold.append(str(g.label))
print(dict(num_span_correct=len(y_pred)))
if y_pred:
print(classification_report(y_gold, y_pred))
@staticmethod
def analyze_spans(pred: List[Sentence], gold: List[Sentence]):
num_triples_gold, triples_found_o, triples_found_t = 0, set(), set()
for label in [LabelEnum.opinion, LabelEnum.target]:
num_correct, num_pred, num_gold = 0, 0, 0
is_target = {LabelEnum.opinion: False, LabelEnum.target: True}[label]
for i, (p, g) in enumerate(zip(pred, gold)):
spans_gold = set(g.spans if g.spans else g.extract_spans())
spans_pred = set(p.spans if p.spans else p.extract_spans())
spans_gold = set([s for s in spans_gold if s[-1] == label])
spans_pred = set([s for s in spans_pred if s[-1] == label])
num_gold += len(spans_gold)
num_pred += len(spans_pred)
num_correct += len(spans_gold.intersection(spans_pred))
for t in g.triples:
num_triples_gold += 1
span = (t.target if is_target else t.opinion) + (label,)
if span in spans_pred:
t_unique = (i,) + tuple(t.dict().items())
if is_target:
triples_found_t.add(t_unique)
else:
triples_found_o.add(t_unique)
if num_correct and num_pred and num_gold:
p = round(num_correct / num_pred, ndigits=4)
r = round(num_correct / num_gold, ndigits=4)
f = round(2 * p * r / (p + r), ndigits=4)
info = dict(label=label, p=p, r=r, f=f)
print(json.dumps(info, indent=2))
assert num_triples_gold % 2 == 0 # Was double-counted above
num_triples_gold = num_triples_gold // 2
num_triples_pred_ceiling = len(triples_found_o.intersection(triples_found_t))
triples_pred_recall_ceiling = num_triples_pred_ceiling / num_triples_gold
print("\n What is the upper bound for RE from predicted O & T?")
print(dict(recall=round(triples_pred_recall_ceiling, ndigits=4)))
@classmethod
def run(cls, pred: List[Sentence], gold: List[Sentence], print_limit=16):
assert len(pred) == len(gold)
cls.analyze_labels(pred, gold)
r = Result(num_sentences=len(pred))
for i in range(len(pred)):
if i < print_limit:
cls.run_sentence(pred[i], gold[i])
r.num_pred += len(pred[i].triples)
r.num_gold += len(gold[i].triples)
for p in pred[i].triples:
for g in gold[i].triples:
if p.dict() == g.dict():
r.num_correct += 1
if (p.o_start, p.t_start) == (g.o_start, g.t_start):
r.num_start_correct += 1
if (p.opinion, p.target) == (g.opinion, g.target):
r.num_start_end_correct += 1
if p.opinion == g.opinion:
r.num_opinion_correct += 1
if p.target == g.target:
r.num_target_correct += 1
if cls.check_overlap(*p.opinion, *g.opinion) and cls.check_overlap(
*p.target, *g.target
):
r.num_span_overlap += 1
e = 1e-9
r.precision = round(r.num_correct / (r.num_pred + e), 4)
r.recall = round(r.num_correct / (r.num_gold + e), 4)
r.f_score = round(2 * r.precision * r.recall / (r.precision + r.recall + e), 3)
print(r.json(indent=2))
cls.analyze_spans(pred, gold)
def test_aste(root="aste/data/triplet_data"):
for folder in Path(root).iterdir():
scorer = nereval()
data = Data(root=folder, data_split=SplitEnum.train)
data.load()
data.analyze()
instances = [s.to_instance() for s in data.sentences]
for i in instances:
i.set_prediction(i.output)
print(dict(score=str(scorer.eval(instances))))
print(SentimentTriple.from_raw_triple(instances[0].output[1][0]))
def test_merge(root="aste/data/triplet_data"):
unmerged = [Data(root=p, data_split=SplitEnum.train) for p in Path(root).iterdir()]
data = merge_data(unmerged)
data.analyze()
if __name__ == "__main__":
Fire()
| 23,239 | 34.589587 | 88 | py |
DMASTE | DMASTE-main/Span-ASTE/aste/__init__.py | 0 | 0 | 0 | py |
|
DMASTE | DMASTE-main/Span-ASTE/aste/evaluation.py | from abc import abstractmethod
class Instance:
def __init__(self, instance_id, weight, inputs=None, output=None):
self.instance_id = instance_id
self.weight = weight
self.input = inputs
self.output = output
self.labeled_instance = None
self.unlabeled_instance = None
self.prediction = None
self.is_labeled = True
def set_instance_id(self, inst_id):
self.instance_id = inst_id
def get_instance_id(self):
return self.instance_id
def get_weight(self):
return self.weight
def set_weight(self, weight):
self.weight = weight
def set_labeled(self):
self.is_labeled = True
def set_unlabeled(self):
self.is_labeled = False
def remove_output(self):
self.output = None
# def is_labeled(self):
# return self.is_labeled
@abstractmethod
def size(self):
pass
@abstractmethod
def duplicate(self):
pass
@abstractmethod
def removeOutput(self):
pass
@abstractmethod
def removePrediction(self):
pass
@abstractmethod
def get_input(self):
pass
@abstractmethod
def get_output(self):
pass
@abstractmethod
def get_prediction(self):
pass
@abstractmethod
def set_prediction(self, *args):
pass
@abstractmethod
def has_output(self):
pass
@abstractmethod
def has_prediction(self):
pass
def get_islabeled(self):
return self.is_labeled
def get_labeled_instance(self):
if self.is_labeled:
return self
def set_label_instance(self, inst):
self.labeled_instance = inst
def get_unlabeled_instance(self):
pass
def set_unlabel_instance(self, inst):
self.unlabeled_instance = inst
class LinearInstance(Instance):
def __init__(self, instance_id, weight, inputs, output):
super().__init__(instance_id, weight, inputs, output)
self.word_seq = None
def size(self):
# print('input:', self.input)
return len(self.input)
def duplicate(self):
dup = LinearInstance(self.instance_id, self.weight, self.input, self.output)
dup.word_seq = self.word_seq
# print('dup input:', dup.get_input())
return dup
def removeOutput(self):
self.output = None
def removePrediction(self):
self.prediction = None
def get_input(self):
return self.input
def get_output(self):
return self.output
def get_prediction(self):
return self.prediction
def set_prediction(self, prediction):
self.prediction = prediction
def has_output(self):
return self.output is not None
def has_prediction(self):
return self.prediction is not None
def __str__(self):
return (
"input:"
+ str(self.input)
+ "\toutput:"
+ str(self.output)
+ " is_labeled:"
+ str(self.is_labeled)
)
class TagReader:
# 0 neu, 1 pos, 2 neg
label2id_map = {"<START>": 0}
@classmethod
def read_inst(cls, file, is_labeled, number, opinion_offset):
insts = []
# inputs = []
# outputs = []
total_p = 0
original_p = 0
f = open(file, "r", encoding="utf-8")
# read AAAI2020 data
for line in f:
line = line.strip()
line = line.split("####")
inputs = line[0].split() # sentence
# t_output = line[1].split() # target
o_output = line[2].split() # opinion
raw_pairs = eval(line[3]) # triplets
# prepare tagging sequence
output = ["O" for _ in range(len(inputs))]
for i, t in enumerate(o_output):
t = t.split("=")[1]
if t != "O":
output[i] = t
output_o = cls.ot2bieos_o(output)
output = ["O" for _ in range(len(inputs))]
for i in range(len(inputs)):
if output_o[i] != "O":
output[i] = output_o[i].split("-")[0]
# re-format original triplets to jet_o tagging format
new_raw_pairs = []
for new_pair in raw_pairs:
opinion_s = new_pair[1][0]
opinion_e = new_pair[1][-1]
target_s = new_pair[0][0]
target_e = new_pair[0][-1]
# change sentiment to value --> 0 neu, 1 pos, 2 neg
if new_pair[2] == "NEG":
polarity = 2
elif new_pair[2] == "POS":
polarity = 1
else:
polarity = 0
# check direction and append
if target_s < opinion_s:
dire = 0
new_raw_pairs.append(
(
[opinion_s, opinion_e],
polarity,
dire,
opinion_s - target_e,
opinion_s - target_s,
)
)
else:
dire = 1
new_raw_pairs.append(
(
[opinion_s, opinion_e],
polarity,
dire,
target_s - opinion_s,
target_e - opinion_s,
)
)
new_raw_pairs.sort(key=lambda x: x[0][0])
original_p += len(raw_pairs)
# remove train data that offset (M) larger than setting and nosiy data during training
if is_labeled:
new_pairs = []
opinion_idxs = []
remove_idxs = []
for pair in new_raw_pairs:
if opinion_offset > pair[-1] >= pair[-2] > 0:
new_pairs.append(pair)
opinion_idxs.extend(list(range(pair[0][0], pair[0][1] + 1)))
else:
remove_idxs.extend(list(range(pair[0][0], pair[0][-1] + 1)))
for idx in remove_idxs:
if idx not in opinion_idxs:
output[idx] = "O"
else:
# keep all original triplets during eval and test for calculating F1 score
new_pairs = new_raw_pairs
output = output
total_p += len(new_pairs)
output = (output, new_pairs)
if len(new_pairs) > 0:
inst = LinearInstance(len(insts) + 1, 1, inputs, output)
for label in output[0]:
if label not in TagReader.label2id_map and is_labeled:
output_id = len(TagReader.label2id_map)
TagReader.label2id_map[label] = output_id
if is_labeled:
inst.set_labeled()
else:
inst.set_unlabeled()
insts.append(inst)
if len(insts) >= number > 0:
break
print("# of original triplets: ", original_p)
print("# of triplets for current setup: ", total_p)
return insts
@staticmethod
def ot2bieos_o(ts_tag_sequence):
"""
ot2bieos function for opinion
"""
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = "$$$"
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if cur_ts_tag == "O":
new_ts_sequence.append("O")
cur_pos = "O"
else:
cur_pos = cur_ts_tag
# cur_pos is T
if cur_pos != prev_pos:
# prev_pos is O and new_cur_pos can only be B or S
if i == n_tags - 1:
new_ts_sequence.append("s-o")
else:
next_ts_tag = ts_tag_sequence[i + 1]
if next_ts_tag == "O":
new_ts_sequence.append("s-o")
else:
new_ts_sequence.append("b-o")
else:
# prev_pos is T and new_cur_pos can only be I or E
if i == n_tags - 1:
new_ts_sequence.append("e-o")
else:
next_ts_tag = ts_tag_sequence[i + 1]
if next_ts_tag == "O":
new_ts_sequence.append("e-o")
else:
new_ts_sequence.append("i-o")
prev_pos = cur_pos
return new_ts_sequence
class Span:
def __init__(self, left, right, type):
self.left = left
self.right = right
self.type = type
def __eq__(self, other):
return (
self.left == other.left
and self.right == other.right
and self.type == other.type
)
def __hash__(self):
return hash((self.left, self.right, self.type))
class Score:
@abstractmethod
def larger_than(self, obj):
pass
@abstractmethod
def update_score(self, obj):
pass
class FScore(object):
def __init__(self, precision, recall, fscore):
self.recall = recall
self.precision = precision
self.fscore = fscore
def __str__(self):
return "(Precision={:.2f}%, Recall={:.2f}%, FScore={:.2f}%)".format(
self.precision * 100, self.recall * 100, self.fscore * 100
)
class Eval:
@abstractmethod
def eval(self, insts) -> Score:
pass
## the input to the evaluation should already have
## have the predictions which is the label.
## iobest tagging scheme
class nereval(Eval):
def eval(self, insts):
pp = 0
total_entity = 0
total_predict = 0
opinion_eval = False
target_eval = False
baseline_eval = False
pair_eval = True
test_pairs = []
idx = 0
if baseline_eval:
with open("baseline_result.txt", "w") as f:
for inst in insts:
prediction = inst.prediction
# print('--------', prediction)
gold_pair = inst.output[1]
# print(gold_pair)
predict_span_ts = []
p_start = -1
for i in range(len(prediction)):
if prediction[i].startswith("B"):
p_start = i
if prediction[i].startswith("E"):
p_end = i
predict_span_ts.append(
[[p_start, p_end], prediction[i][2:]]
)
if prediction[i].startswith("S"):
predict_span_ts.append([[i], prediction[i][2:]])
predict_span_os = []
p_start = -1
for i in range(len(prediction)):
if prediction[i].startswith("b"):
p_start = i
if prediction[i].startswith("e"):
p_end = i
predict_span_os.append(
[[p_start, p_end], prediction[i][2:]]
)
if prediction[i].startswith("s"):
predict_span_os.append([[i], prediction[i][2:]])
pairs = []
if len(predict_span_ts) > 0:
for target in predict_span_ts:
t_pos = target[0][0]
min_distance = len(prediction)
if len(predict_span_os) > 0:
for opinion in predict_span_os:
o_pos = opinion[0][0]
if min_distance > abs(t_pos - o_pos):
min_distance = abs(t_pos - o_pos)
pair = (target[0], opinion[0], target[1])
pairs.append(pair)
new_pairs = []
for p in pairs:
opinion_idx = list(range(p[1][0], p[1][-1] + 1))
if len(opinion_idx) == 1:
opinion_idx.append(opinion_idx[0])
if p[-1] == "POS":
polarity = 1
elif p[-1] == "NEG":
polarity = 2
elif p[-1] == "NEU":
polarity = 0
direction = 1
if p[1][0] > p[0][0]:
direction = 0
target_idx = (abs(p[1][0] - p[0][-1]), abs(p[1][0] - p[0][0]))
if direction == 1:
target_idx = (
abs(p[1][0] - p[0][0]),
abs(p[1][0] - p[0][-1]),
)
new_pairs.append(
(
opinion_idx,
polarity,
direction,
target_idx[0],
target_idx[1],
)
)
# print('new pairs', new_pairs)
total_entity += len(gold_pair)
total_predict += len(new_pairs)
for pred in new_pairs:
for gold in gold_pair:
if pred == gold:
pp += 1
test_pairs.append(new_pairs)
idx += 1
f.write(str(inst.get_input()) + "\n")
f.write(str(inst.get_output()) + "\n")
f.write(str(inst.get_prediction()) + str(new_pairs) + "\n")
f.write("\n")
f.close()
# print(test_pairs)
if not baseline_eval:
for inst in insts:
output = inst.output[0]
prediction = inst.prediction
# print(inst)
# print('----',output)
# print('-------', prediction)
if pair_eval:
output = inst.output[1]
prediction = inst.prediction[1]
total_entity += len(output)
total_predict += len(prediction)
for pred in prediction:
for gold in output:
if pred == gold:
pp += 1
# convert to span
output_spans = set()
if target_eval:
start = -1
for i in range(len(output)):
if output[i].startswith("B"):
start = i
if output[i].startswith("E"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("S"):
output_spans.add(Span(i, i, output[i][2:]))
if opinion_eval:
start = -1
for i in range(len(output)):
if output[i].startswith("b"):
start = i
if output[i].startswith("e"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("s"):
output_spans.add(Span(i, i, output[i][2:]))
predict_spans = set()
if target_eval:
p_start = -1
for i in range(len(prediction)):
if prediction[i].startswith("B"):
p_start = i
if prediction[i].startswith("E"):
p_end = i
predict_spans.add(Span(p_start, p_end, prediction[i][2:]))
if prediction[i].startswith("S"):
predict_spans.add(Span(i, i, prediction[i][2:]))
if opinion_eval:
p_start = -1
for i in range(len(prediction)):
if prediction[i].startswith("b"):
p_start = i
if prediction[i].startswith("e"):
p_end = i
predict_spans.add(Span(p_start, p_end, prediction[i][2:]))
if prediction[i].startswith("s"):
predict_spans.add(Span(i, i, prediction[i][2:]))
# print(output_spans)
# print(predict_spans)
if not pair_eval:
total_entity += len(output_spans)
total_predict += len(predict_spans)
pp += len(predict_spans.intersection(output_spans))
print("toal num of entity: ", total_entity)
print("total num of prediction: ", total_predict)
precision = pp * 1.0 / total_predict if total_predict != 0 else 0
recall = pp * 1.0 / total_entity if total_entity != 0 else 0
fscore = (
2.0 * precision * recall / (precision + recall)
if precision != 0 or recall != 0
else 0
)
# ret = [precision, recall, fscore]
fscore = FScore(precision, recall, fscore)
return fscore
| 18,132 | 33.020638 | 98 | py |
DMASTE | DMASTE-main/BMRC/main.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import argparse
import Data
import Model
import utils
import torch
from torch.nn import functional as F
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
import os
from torch.utils.data import Dataset
import random
import numpy as np
# os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(2)
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def test(model, t, batch_generator, standard, beta, logger):
model.eval()
all_target = []
all_pred = []
triplet_target_num = 0
asp_target_num = 0
opi_target_num = 0
asp_opi_target_num = 0
asp_pol_target_num = 0
triplet_predict_num = 0
asp_predict_num = 0
opi_predict_num = 0
asp_opi_predict_num = 0
asp_pol_predict_num = 0
triplet_match_num = 0
asp_match_num = 0
opi_match_num = 0
asp_opi_match_num = 0
asp_pol_match_num = 0
for batch_index, batch_dict in enumerate(batch_generator):
triplets_target = standard[batch_index]['triplet']
asp_target = standard[batch_index]['asp_target']
opi_target = standard[batch_index]['opi_target']
asp_opi_target = standard[batch_index]['asp_opi_target']
asp_pol_target = standard[batch_index]['asp_pol_target']
# 预测三元组
triplets_predict = []
asp_predict = []
opi_predict = []
asp_opi_predict = []
asp_pol_predict = []
forward_pair_list = []
forward_pair_prob = []
forward_pair_ind_list = []
backward_pair_list = []
backward_pair_prob = []
backward_pair_ind_list = []
final_asp_list = []
final_opi_list = []
final_asp_ind_list = []
final_opi_ind_list = []
# forward q_1
passenge_index = batch_dict['forward_asp_answer_start'][0].gt(-1).float().nonzero()
passenge = batch_dict['forward_asp_query'][0][passenge_index].squeeze(1)
f_asp_start_scores, f_asp_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_start_scores = F.softmax(f_asp_start_scores[0], dim=1)
f_asp_end_scores = F.softmax(f_asp_end_scores[0], dim=1)
f_asp_start_prob, f_asp_start_ind = torch.max(f_asp_start_scores, dim=1)
f_asp_end_prob, f_asp_end_ind = torch.max(f_asp_end_scores, dim=1)
f_asp_start_prob_temp = []
f_asp_end_prob_temp = []
f_asp_start_index_temp = []
f_asp_end_index_temp = []
for i in range(f_asp_start_ind.size(0)):
if batch_dict['forward_asp_answer_start'][0, i] != -1:
if f_asp_start_ind[i].item() == 1:
f_asp_start_index_temp.append(i)
f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
if f_asp_end_ind[i].item() == 1:
f_asp_end_index_temp.append(i)
f_asp_end_prob_temp.append(f_asp_end_prob[i].item())
f_asp_start_index, f_asp_end_index, f_asp_prob = utils.filter_unpaired(
f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp)
for i in range(len(f_asp_start_index)):
opinion_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What opinion given the aspect'.split(' ')])
for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1):
opinion_query.append(batch_dict['forward_asp_query'][0][j].item())
opinion_query.append(t.convert_tokens_to_ids('?'))
opinion_query.append(t.convert_tokens_to_ids('[SEP]'))
opinion_query_seg = [0] * len(opinion_query)
f_opi_length = len(opinion_query)
opinion_query = torch.tensor(opinion_query).long().cuda()
opinion_query = torch.cat([opinion_query, passenge], -1).unsqueeze(0)
opinion_query_seg += [1] * passenge.size(0)
opinion_query_mask = torch.ones(opinion_query.size(1)).float().cuda().unsqueeze(0)
opinion_query_seg = torch.tensor(opinion_query_seg).long().cuda().unsqueeze(0)
f_opi_start_scores, f_opi_end_scores = model(opinion_query, opinion_query_mask, opinion_query_seg, 0)
f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
f_opi_start_prob, f_opi_start_ind = torch.max(f_opi_start_scores, dim=1)
f_opi_end_prob, f_opi_end_ind = torch.max(f_opi_end_scores, dim=1)
f_opi_start_prob_temp = []
f_opi_end_prob_temp = []
f_opi_start_index_temp = []
f_opi_end_index_temp = []
for k in range(f_opi_start_ind.size(0)):
if opinion_query_seg[0, k] == 1:
if f_opi_start_ind[k].item() == 1:
f_opi_start_index_temp.append(k)
f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
if f_opi_end_ind[k].item() == 1:
f_opi_end_index_temp.append(k)
f_opi_end_prob_temp.append(f_opi_end_prob[k].item())
f_opi_start_index, f_opi_end_index, f_opi_prob = utils.filter_unpaired(
f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_index_temp, f_opi_end_index_temp)
for idx in range(len(f_opi_start_index)):
asp = [batch_dict['forward_asp_query'][0][j].item() for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1)]
opi = [opinion_query[0][j].item() for j in range(f_opi_start_index[idx], f_opi_end_index[idx] + 1)]
asp_ind = [f_asp_start_index[i]-5, f_asp_end_index[i]-5]
opi_ind = [f_opi_start_index[idx]-f_opi_length, f_opi_end_index[idx]-f_opi_length]
temp_prob = f_asp_prob[i] * f_opi_prob[idx]
if asp_ind + opi_ind not in forward_pair_ind_list:
forward_pair_list.append([asp] + [opi])
forward_pair_prob.append(temp_prob)
forward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# backward q_1
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
b_opi_start_prob, b_opi_start_ind = torch.max(b_opi_start_scores, dim=1)
b_opi_end_prob, b_opi_end_ind = torch.max(b_opi_end_scores, dim=1)
b_opi_start_prob_temp = []
b_opi_end_prob_temp = []
b_opi_start_index_temp = []
b_opi_end_index_temp = []
for i in range(b_opi_start_ind.size(0)):
if batch_dict['backward_opi_answer_start'][0, i] != -1:
if b_opi_start_ind[i].item() == 1:
b_opi_start_index_temp.append(i)
b_opi_start_prob_temp.append(b_opi_start_prob[i].item())
if b_opi_end_ind[i].item() == 1:
b_opi_end_index_temp.append(i)
b_opi_end_prob_temp.append(b_opi_end_prob[i].item())
b_opi_start_index, b_opi_end_index, b_opi_prob = utils.filter_unpaired(
b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_index_temp, b_opi_end_index_temp)
# backward q_2
for i in range(len(b_opi_start_index)):
aspect_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What aspect does the opinion'.split(' ')])
for j in range(b_opi_start_index[i], b_opi_end_index[i] + 1):
aspect_query.append(batch_dict['backward_opi_query'][0][j].item())
aspect_query.append(t.convert_tokens_to_ids('describe'))
aspect_query.append(t.convert_tokens_to_ids('?'))
aspect_query.append(t.convert_tokens_to_ids('[SEP]'))
aspect_query_seg = [0] * len(aspect_query)
b_asp_length = len(aspect_query)
aspect_query = torch.tensor(aspect_query).long().cuda()
aspect_query = torch.cat([aspect_query, passenge], -1).unsqueeze(0)
aspect_query_seg += [1] * passenge.size(0)
aspect_query_mask = torch.ones(aspect_query.size(1)).float().cuda().unsqueeze(0)
aspect_query_seg = torch.tensor(aspect_query_seg).long().cuda().unsqueeze(0)
b_asp_start_scores, b_asp_end_scores = model(aspect_query, aspect_query_mask, aspect_query_seg, 0)
b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
b_asp_start_prob, b_asp_start_ind = torch.max(b_asp_start_scores, dim=1)
b_asp_end_prob, b_asp_end_ind = torch.max(b_asp_end_scores, dim=1)
b_asp_start_prob_temp = []
b_asp_end_prob_temp = []
b_asp_start_index_temp = []
b_asp_end_index_temp = []
for k in range(b_asp_start_ind.size(0)):
if aspect_query_seg[0, k] == 1:
if b_asp_start_ind[k].item() == 1:
b_asp_start_index_temp.append(k)
b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
if b_asp_end_ind[k].item() == 1:
b_asp_end_index_temp.append(k)
b_asp_end_prob_temp.append(b_asp_end_prob[k].item())
b_asp_start_index, b_asp_end_index, b_asp_prob = utils.filter_unpaired(
b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_index_temp, b_asp_end_index_temp)
for idx in range(len(b_asp_start_index)):
opi = [batch_dict['backward_opi_query'][0][j].item() for j in
range(b_opi_start_index[i], b_opi_end_index[i] + 1)]
asp = [aspect_query[0][j].item() for j in range(b_asp_start_index[idx], b_asp_end_index[idx] + 1)]
asp_ind = [b_asp_start_index[idx]-b_asp_length, b_asp_end_index[idx]-b_asp_length]
opi_ind = [b_opi_start_index[i]-5, b_opi_end_index[i]-5]
temp_prob = b_asp_prob[idx] * b_opi_prob[i]
if asp_ind + opi_ind not in backward_pair_ind_list:
backward_pair_list.append([asp] + [opi])
backward_pair_prob.append(temp_prob)
backward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# filter triplet
# forward
for idx in range(len(forward_pair_list)):
if forward_pair_list[idx] in backward_pair_list:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
else:
if forward_pair_prob[idx] >= beta:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
# backward
for idx in range(len(backward_pair_list)):
if backward_pair_list[idx] not in forward_pair_list:
if backward_pair_prob[idx] >= beta:
if backward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(backward_pair_list[idx][0])
final_opi_list.append([backward_pair_list[idx][1]])
final_asp_ind_list.append(backward_pair_ind_list[idx][:2])
final_opi_ind_list.append([backward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(backward_pair_list[idx][0])
if backward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(backward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(backward_pair_ind_list[idx][2:])
# sentiment
for idx in range(len(final_asp_list)):
predict_opinion_num = len(final_opi_list[idx])
sentiment_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What sentiment given the aspect'.split(' ')])
sentiment_query+=final_asp_list[idx]
sentiment_query += t.convert_tokens_to_ids([word.lower() for word in 'and the opinion'.split(' ')])
# # 拼接所有的opinion
for idy in range(predict_opinion_num):
sentiment_query+=final_opi_list[idx][idy]
if idy < predict_opinion_num - 1:
sentiment_query.append(t.convert_tokens_to_ids('/'))
sentiment_query.append(t.convert_tokens_to_ids('?'))
sentiment_query.append(t.convert_tokens_to_ids('[SEP]'))
sentiment_query_seg = [0] * len(sentiment_query)
sentiment_query = torch.tensor(sentiment_query).long().cuda()
sentiment_query = torch.cat([sentiment_query, passenge], -1).unsqueeze(0)
sentiment_query_seg += [1] * passenge.size(0)
sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().cuda().unsqueeze(0)
sentiment_query_seg = torch.tensor(sentiment_query_seg).long().cuda().unsqueeze(0)
sentiment_scores = model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 1)
sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()
# 每个opinion对应一个三元组
for idy in range(predict_opinion_num):
asp_f = []
opi_f = []
asp_f.append(final_asp_ind_list[idx][0])
asp_f.append(final_asp_ind_list[idx][1])
opi_f.append(final_opi_ind_list[idx][idy][0])
opi_f.append(final_opi_ind_list[idx][idy][1])
triplet_predict = asp_f + opi_f + [sentiment_predicted]
triplets_predict.append(triplet_predict)
if opi_f not in opi_predict:
opi_predict.append(opi_f)
if asp_f + opi_f not in asp_opi_predict:
asp_opi_predict.append(asp_f + opi_f)
if asp_f + [sentiment_predicted] not in asp_pol_predict:
asp_pol_predict.append(asp_f + [sentiment_predicted])
if asp_f not in asp_predict:
asp_predict.append(asp_f)
all_target.append(triplets_target)
all_pred.append(triplets_predict)
triplet_target_num += len(triplets_target)
asp_target_num += len(asp_target)
opi_target_num += len(opi_target)
asp_opi_target_num += len(asp_opi_target)
asp_pol_target_num += len(asp_pol_target)
triplet_predict_num += len(triplets_predict)
asp_predict_num += len(asp_predict)
opi_predict_num += len(opi_predict)
asp_opi_predict_num += len(asp_opi_predict)
asp_pol_predict_num += len(asp_pol_predict)
for trip in triplets_target:
for trip_ in triplets_predict:
if trip_ == trip:
triplet_match_num += 1
for trip in asp_target:
for trip_ in asp_predict:
if trip_ == trip:
asp_match_num += 1
for trip in opi_target:
for trip_ in opi_predict:
if trip_ == trip:
opi_match_num += 1
for trip in asp_opi_target:
for trip_ in asp_opi_predict:
if trip_ == trip:
asp_opi_match_num += 1
for trip in asp_pol_target:
for trip_ in asp_pol_predict:
if trip_ == trip:
asp_pol_match_num += 1
precision = float(triplet_match_num) / float(triplet_predict_num+1e-6)
recall = float(triplet_match_num) / float(triplet_target_num+1e-6)
f1 = 2 * precision * recall / (precision + recall+1e-6)
logger.info('Triplet - Precision: {}\tRecall: {}\tF1: {}'.format(precision, recall, f1))
precision_aspect = float(asp_match_num) / float(asp_predict_num+1e-6)
recall_aspect = float(asp_match_num) / float(asp_target_num+1e-6)
f1_aspect = 2 * precision_aspect * recall_aspect / (precision_aspect + recall_aspect+1e-6)
logger.info('Aspect - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect, recall_aspect, f1_aspect))
precision_opinion = float(opi_match_num) / float(opi_predict_num+1e-6)
recall_opinion = float(opi_match_num) / float(opi_target_num+1e-6)
f1_opinion = 2 * precision_opinion * recall_opinion / (precision_opinion + recall_opinion+1e-6)
logger.info('Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_opinion, recall_opinion, f1_opinion))
precision_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_predict_num+1e-6)
recall_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_target_num+1e-6)
f1_aspect_sentiment = 2 * precision_aspect_sentiment * recall_aspect_sentiment / (
precision_aspect_sentiment + recall_aspect_sentiment+1e-6)
logger.info('Aspect-Sentiment - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_sentiment,
recall_aspect_sentiment,
f1_aspect_sentiment))
precision_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_predict_num+1e-6)
recall_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_target_num+1e-6)
f1_aspect_opinion = 2 * precision_aspect_opinion * recall_aspect_opinion / (
precision_aspect_opinion + recall_aspect_opinion+1e-6)
logger.info(
'Aspect-Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_opinion, recall_aspect_opinion,
f1_aspect_opinion))
metric = {'triplet': {'p': precision, 'r': recall, 'f1': f1},
'aspect': {'p': precision_aspect, 'r': recall_aspect, 'f1': f1_aspect},
'opinion': {'p': precision_opinion, 'r': recall_opinion, 'f1': f1_opinion},
'aspect-sentiment': {'p': precision_aspect_sentiment, 'r': recall_aspect_sentiment, 'f1': f1_aspect_sentiment},
'aspect-opinion': {'p': precision_aspect_opinion, 'r': recall_aspect_opinion, 'f1': f1_aspect_opinion}}
triplets = {'pred': all_pred, 'target': all_target}
return metric, triplets
def main(args, tokenize):
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_log), exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
# init logger
logger = utils.get_logger(os.path.join(args.tmp_log, args.model_name + '.log'))
# load data
logger.info('loading data......')
# init model
logger.info('initial model......')
model = Model.BERTModel(args)
if args.ifgpu:
model = model.cuda()
# print args
logger.info(args)
if args.mode == 'test':
logger.info('start testing......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
test_standard = target_standard_data['test']
test_data = target_total_data['test']
test_dataset = Data.ReviewDataset(None, None, test_data, 'test')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
elif args.mode == 'train':
source_data_path = os.path.join(args.data_dir, args.source + '.pt')
source_standard_data_path = os.path.join(args.data_dir, args.source + '_standard.pt')
source_total_data = torch.load(source_data_path)
source_standard_data = torch.load(source_standard_data_path)
train_data = source_total_data['train']
dev_data = source_total_data['dev']
dev_standard = source_standard_data['dev']
train_dataset = Data.ReviewDataset(train_data, dev_data, None, 'train')
dev_dataset = Data.ReviewDataset(train_data, dev_data, None, 'dev')
batch_num_train = train_dataset.get_batch_num(args.batch_size)
# optimizer
logger.info('initial optimizer......')
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if "_bert" in n], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if "_bert" not in n],
'lr': args.learning_rate, 'weight_decay': 0.01}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.tuning_bert_rate, correct_bias=False)
# load saved model, optimizer and epoch num
if args.reload and os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
logger.info('Reload model and optimizer after training epoch {}'.format(checkpoint['epoch']))
else:
start_epoch = 1
logger.info('New model and optimizer from epoch 0')
# scheduler
training_steps = args.epoch_num * batch_num_train
warmup_steps = int(training_steps * args.warm_up)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=training_steps)
# training
logger.info('begin training......')
best_dev_f1 = 0.
for epoch in range(start_epoch, args.epoch_num+1):
model.train()
model.zero_grad()
batch_generator = Data.generate_fi_batches(dataset=train_dataset, batch_size=args.batch_size,
ifgpu=args.ifgpu)
for batch_index, batch_dict in enumerate(batch_generator):
optimizer.zero_grad()
# q1_a
f_aspect_start_scores, f_aspect_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_loss = utils.calculate_entity_loss(f_aspect_start_scores, f_aspect_end_scores,
batch_dict['forward_asp_answer_start'],
batch_dict['forward_asp_answer_end'])
# q1_b
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_loss = utils.calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
batch_dict['backward_opi_answer_start'],
batch_dict['backward_opi_answer_end'])
# q2_a
f_opi_start_scores, f_opi_end_scores = model(
batch_dict['forward_opi_query'].view(-1, batch_dict['forward_opi_query'].size(-1)),
batch_dict['forward_opi_query_mask'].view(-1, batch_dict['forward_opi_query_mask'].size(-1)),
batch_dict['forward_opi_query_seg'].view(-1, batch_dict['forward_opi_query_seg'].size(-1)),
0)
f_opi_loss = utils.calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)),
batch_dict['forward_opi_answer_end'].view(-1, batch_dict['forward_opi_answer_end'].size(-1)))
# q2_b
b_asp_start_scores, b_asp_end_scores = model(
batch_dict['backward_asp_query'].view(-1, batch_dict['backward_asp_query'].size(-1)),
batch_dict['backward_asp_query_mask'].view(-1, batch_dict['backward_asp_query_mask'].size(-1)),
batch_dict['backward_asp_query_seg'].view(-1, batch_dict['backward_asp_query_seg'].size(-1)),
0)
b_asp_loss = utils.calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)),
batch_dict['backward_asp_answer_end'].view(-1, batch_dict['backward_asp_answer_end'].size(-1)))
# q_3
sentiment_scores = model(batch_dict['sentiment_query'].view(-1, batch_dict['sentiment_query'].size(-1)),
batch_dict['sentiment_query_mask'].view(-1, batch_dict['sentiment_query_mask'].size(-1)),
batch_dict['sentiment_query_seg'].view(-1, batch_dict['sentiment_query_seg'].size(-1)),
1)
sentiment_loss = utils.calculate_sentiment_loss(sentiment_scores, batch_dict['sentiment_answer'].view(-1))
# loss
loss_sum = f_asp_loss + f_opi_loss + b_opi_loss + b_asp_loss + args.beta*sentiment_loss
loss_sum.backward()
optimizer.step()
scheduler.step()
# train logger
if batch_index % 10 == 0:
logger.info('Epoch:[{}/{}]\t Batch:[{}/{}]\t Loss Sum:{}\t '
'forward Loss:{};{}\t backward Loss:{};{}\t Sentiment Loss:{}'.
format(epoch, args.epoch_num, batch_index, batch_num_train,
round(loss_sum.item(), 4),
round(f_asp_loss.item(), 4), round(f_opi_loss.item(), 4),
round(b_asp_loss.item(), 4), round(b_opi_loss.item(), 4),
round(sentiment_loss.item(), 4)))
# validation
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
metric, _ = test(model, tokenize, batch_generator_dev, dev_standard, args.inference_beta, logger)
f1 = metric['triplet']['f1']
# save model and optimizer
if f1 > best_dev_f1:
best_dev_f1 = f1
logger.info('best dev f1: {}\t epoch: {}'.format(best_dev_f1, epoch))
logger.info('Model saved after epoch {}'.format(epoch))
state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, os.path.join(args.model_dir, args.model_name + '.pt'))
# test
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.inference_beta, logger)
# logger.info('start testing......')
# test_dataset = Data.ReviewDataset(train_data, dev_data, test_data, 'test')
# # load checkpoint
# logger.info('loading checkpoint......')
# checkpoint = torch.load(args.save_model_path)
# model.load_state_dict(checkpoint['net'])
# model.eval()
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# # eval
# logger.info('evaluating......')
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
else:
logger.info('Error mode!')
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bidirectional MRC-based sentiment triplet extraction')
parser.add_argument('--data_dir', type=str, default="./data/preprocess/")
parser.add_argument('--log_dir', type=str, default="./log/")
parser.add_argument('--tmp_log', type=str)
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"])
parser.add_argument('--reload', type=bool, default=False)
parser.add_argument('--checkpoint_path', type=str, default="./model/14lap/modelFinal.model")
parser.add_argument('--model_dir', type=str, default="./model/")
parser.add_argument('--model_name', type=str, default="1")
# model hyper-parameter
parser.add_argument('--bert_model_type', type=str, default="bert-base-uncased")
parser.add_argument('--hidden_size', type=int, default=768)
parser.add_argument('--inference_beta', type=float, default=0.8)
# training hyper-parameter
parser.add_argument('--ifgpu', type=bool, default=True)
parser.add_argument('--epoch_num', type=int, default=40)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--tuning_bert_rate', type=float, default=1e-5)
parser.add_argument('--warm_up', type=float, default=0.1)
parser.add_argument('--beta', type=float, default=1)
args = parser.parse_args()
t = BertTokenizer.from_pretrained(args.bert_model_type)
main(args, t)
| 34,866 | 52.3951 | 156 | py |
DMASTE | DMASTE-main/BMRC/DANN_main.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import argparse
import Data
import DANN_Model as Model
import utils
import torch
from torch.nn import functional as F
from transformers import AdamW, get_linear_schedule_with_warmup, BertTokenizer
import os
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
from data_utils import Unlabeled_Dataset, Domain
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data[
'_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data[
'_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
def test(model, t, batch_generator, standard, beta, logger):
model.eval()
all_target = []
all_pred = []
triplet_target_num = 0
asp_target_num = 0
opi_target_num = 0
asp_opi_target_num = 0
asp_pol_target_num = 0
triplet_predict_num = 0
asp_predict_num = 0
opi_predict_num = 0
asp_opi_predict_num = 0
asp_pol_predict_num = 0
triplet_match_num = 0
asp_match_num = 0
opi_match_num = 0
asp_opi_match_num = 0
asp_pol_match_num = 0
for batch_index, batch_dict in enumerate(batch_generator):
triplets_target = standard[batch_index]['triplet']
asp_target = standard[batch_index]['asp_target']
opi_target = standard[batch_index]['opi_target']
asp_opi_target = standard[batch_index]['asp_opi_target']
asp_pol_target = standard[batch_index]['asp_pol_target']
# 预测三元组
triplets_predict = []
asp_predict = []
opi_predict = []
asp_opi_predict = []
asp_pol_predict = []
forward_pair_list = []
forward_pair_prob = []
forward_pair_ind_list = []
backward_pair_list = []
backward_pair_prob = []
backward_pair_ind_list = []
final_asp_list = []
final_opi_list = []
final_asp_ind_list = []
final_opi_ind_list = []
# forward q_1
passenge_index = batch_dict['forward_asp_answer_start'][0].gt(-1).float().nonzero()
passenge = batch_dict['forward_asp_query'][0][passenge_index].squeeze(1)
f_asp_start_scores, f_asp_end_scores = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0)
f_asp_start_scores = F.softmax(f_asp_start_scores[0], dim=1)
f_asp_end_scores = F.softmax(f_asp_end_scores[0], dim=1)
f_asp_start_prob, f_asp_start_ind = torch.max(f_asp_start_scores, dim=1)
f_asp_end_prob, f_asp_end_ind = torch.max(f_asp_end_scores, dim=1)
f_asp_start_prob_temp = []
f_asp_end_prob_temp = []
f_asp_start_index_temp = []
f_asp_end_index_temp = []
for i in range(f_asp_start_ind.size(0)):
if batch_dict['forward_asp_answer_start'][0, i] != -1:
if f_asp_start_ind[i].item() == 1:
f_asp_start_index_temp.append(i)
f_asp_start_prob_temp.append(f_asp_start_prob[i].item())
if f_asp_end_ind[i].item() == 1:
f_asp_end_index_temp.append(i)
f_asp_end_prob_temp.append(f_asp_end_prob[i].item())
f_asp_start_index, f_asp_end_index, f_asp_prob = utils.filter_unpaired(
f_asp_start_prob_temp, f_asp_end_prob_temp, f_asp_start_index_temp, f_asp_end_index_temp)
for i in range(len(f_asp_start_index)):
opinion_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What opinion given the aspect'.split(' ')])
for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1):
opinion_query.append(batch_dict['forward_asp_query'][0][j].item())
opinion_query.append(t.convert_tokens_to_ids('?'))
opinion_query.append(t.convert_tokens_to_ids('[SEP]'))
opinion_query_seg = [0] * len(opinion_query)
f_opi_length = len(opinion_query)
opinion_query = torch.tensor(opinion_query).long().cuda()
opinion_query = torch.cat([opinion_query, passenge], -1).unsqueeze(0)
opinion_query_seg += [1] * passenge.size(0)
opinion_query_mask = torch.ones(opinion_query.size(1)).float().cuda().unsqueeze(0)
opinion_query_seg = torch.tensor(opinion_query_seg).long().cuda().unsqueeze(0)
f_opi_start_scores, f_opi_end_scores = model(opinion_query, opinion_query_mask, opinion_query_seg, 0)
f_opi_start_scores = F.softmax(f_opi_start_scores[0], dim=1)
f_opi_end_scores = F.softmax(f_opi_end_scores[0], dim=1)
f_opi_start_prob, f_opi_start_ind = torch.max(f_opi_start_scores, dim=1)
f_opi_end_prob, f_opi_end_ind = torch.max(f_opi_end_scores, dim=1)
f_opi_start_prob_temp = []
f_opi_end_prob_temp = []
f_opi_start_index_temp = []
f_opi_end_index_temp = []
for k in range(f_opi_start_ind.size(0)):
if opinion_query_seg[0, k] == 1:
if f_opi_start_ind[k].item() == 1:
f_opi_start_index_temp.append(k)
f_opi_start_prob_temp.append(f_opi_start_prob[k].item())
if f_opi_end_ind[k].item() == 1:
f_opi_end_index_temp.append(k)
f_opi_end_prob_temp.append(f_opi_end_prob[k].item())
f_opi_start_index, f_opi_end_index, f_opi_prob = utils.filter_unpaired(
f_opi_start_prob_temp, f_opi_end_prob_temp, f_opi_start_index_temp, f_opi_end_index_temp)
for idx in range(len(f_opi_start_index)):
asp = [batch_dict['forward_asp_query'][0][j].item() for j in range(f_asp_start_index[i], f_asp_end_index[i] + 1)]
opi = [opinion_query[0][j].item() for j in range(f_opi_start_index[idx], f_opi_end_index[idx] + 1)]
asp_ind = [f_asp_start_index[i]-5, f_asp_end_index[i]-5]
opi_ind = [f_opi_start_index[idx]-f_opi_length, f_opi_end_index[idx]-f_opi_length]
temp_prob = f_asp_prob[i] * f_opi_prob[idx]
if asp_ind + opi_ind not in forward_pair_ind_list:
forward_pair_list.append([asp] + [opi])
forward_pair_prob.append(temp_prob)
forward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# backward q_1
b_opi_start_scores, b_opi_end_scores = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0)
b_opi_start_scores = F.softmax(b_opi_start_scores[0], dim=1)
b_opi_end_scores = F.softmax(b_opi_end_scores[0], dim=1)
b_opi_start_prob, b_opi_start_ind = torch.max(b_opi_start_scores, dim=1)
b_opi_end_prob, b_opi_end_ind = torch.max(b_opi_end_scores, dim=1)
b_opi_start_prob_temp = []
b_opi_end_prob_temp = []
b_opi_start_index_temp = []
b_opi_end_index_temp = []
for i in range(b_opi_start_ind.size(0)):
if batch_dict['backward_opi_answer_start'][0, i] != -1:
if b_opi_start_ind[i].item() == 1:
b_opi_start_index_temp.append(i)
b_opi_start_prob_temp.append(b_opi_start_prob[i].item())
if b_opi_end_ind[i].item() == 1:
b_opi_end_index_temp.append(i)
b_opi_end_prob_temp.append(b_opi_end_prob[i].item())
b_opi_start_index, b_opi_end_index, b_opi_prob = utils.filter_unpaired(
b_opi_start_prob_temp, b_opi_end_prob_temp, b_opi_start_index_temp, b_opi_end_index_temp)
# backward q_2
for i in range(len(b_opi_start_index)):
aspect_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What aspect does the opinion'.split(' ')])
for j in range(b_opi_start_index[i], b_opi_end_index[i] + 1):
aspect_query.append(batch_dict['backward_opi_query'][0][j].item())
aspect_query.append(t.convert_tokens_to_ids('describe'))
aspect_query.append(t.convert_tokens_to_ids('?'))
aspect_query.append(t.convert_tokens_to_ids('[SEP]'))
aspect_query_seg = [0] * len(aspect_query)
b_asp_length = len(aspect_query)
aspect_query = torch.tensor(aspect_query).long().cuda()
aspect_query = torch.cat([aspect_query, passenge], -1).unsqueeze(0)
aspect_query_seg += [1] * passenge.size(0)
aspect_query_mask = torch.ones(aspect_query.size(1)).float().cuda().unsqueeze(0)
aspect_query_seg = torch.tensor(aspect_query_seg).long().cuda().unsqueeze(0)
b_asp_start_scores, b_asp_end_scores = model(aspect_query, aspect_query_mask, aspect_query_seg, 0)
b_asp_start_scores = F.softmax(b_asp_start_scores[0], dim=1)
b_asp_end_scores = F.softmax(b_asp_end_scores[0], dim=1)
b_asp_start_prob, b_asp_start_ind = torch.max(b_asp_start_scores, dim=1)
b_asp_end_prob, b_asp_end_ind = torch.max(b_asp_end_scores, dim=1)
b_asp_start_prob_temp = []
b_asp_end_prob_temp = []
b_asp_start_index_temp = []
b_asp_end_index_temp = []
for k in range(b_asp_start_ind.size(0)):
if aspect_query_seg[0, k] == 1:
if b_asp_start_ind[k].item() == 1:
b_asp_start_index_temp.append(k)
b_asp_start_prob_temp.append(b_asp_start_prob[k].item())
if b_asp_end_ind[k].item() == 1:
b_asp_end_index_temp.append(k)
b_asp_end_prob_temp.append(b_asp_end_prob[k].item())
b_asp_start_index, b_asp_end_index, b_asp_prob = utils.filter_unpaired(
b_asp_start_prob_temp, b_asp_end_prob_temp, b_asp_start_index_temp, b_asp_end_index_temp)
for idx in range(len(b_asp_start_index)):
opi = [batch_dict['backward_opi_query'][0][j].item() for j in
range(b_opi_start_index[i], b_opi_end_index[i] + 1)]
asp = [aspect_query[0][j].item() for j in range(b_asp_start_index[idx], b_asp_end_index[idx] + 1)]
asp_ind = [b_asp_start_index[idx]-b_asp_length, b_asp_end_index[idx]-b_asp_length]
opi_ind = [b_opi_start_index[i]-5, b_opi_end_index[i]-5]
temp_prob = b_asp_prob[idx] * b_opi_prob[i]
if asp_ind + opi_ind not in backward_pair_ind_list:
backward_pair_list.append([asp] + [opi])
backward_pair_prob.append(temp_prob)
backward_pair_ind_list.append(asp_ind + opi_ind)
else:
print('erro')
exit(1)
# filter triplet
# forward
for idx in range(len(forward_pair_list)):
if forward_pair_list[idx] in backward_pair_list:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
else:
if forward_pair_prob[idx] >= beta:
if forward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(forward_pair_list[idx][0])
final_opi_list.append([forward_pair_list[idx][1]])
final_asp_ind_list.append(forward_pair_ind_list[idx][:2])
final_opi_ind_list.append([forward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(forward_pair_list[idx][0])
if forward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(forward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(forward_pair_ind_list[idx][2:])
# backward
for idx in range(len(backward_pair_list)):
if backward_pair_list[idx] not in forward_pair_list:
if backward_pair_prob[idx] >= beta:
if backward_pair_list[idx][0] not in final_asp_list:
final_asp_list.append(backward_pair_list[idx][0])
final_opi_list.append([backward_pair_list[idx][1]])
final_asp_ind_list.append(backward_pair_ind_list[idx][:2])
final_opi_ind_list.append([backward_pair_ind_list[idx][2:]])
else:
asp_index = final_asp_list.index(backward_pair_list[idx][0])
if backward_pair_list[idx][1] not in final_opi_list[asp_index]:
final_opi_list[asp_index].append(backward_pair_list[idx][1])
final_opi_ind_list[asp_index].append(backward_pair_ind_list[idx][2:])
# sentiment
for idx in range(len(final_asp_list)):
predict_opinion_num = len(final_opi_list[idx])
sentiment_query = t.convert_tokens_to_ids(
[word.lower() if word not in ['[CLS]', '[SEP]'] else word for word in
'[CLS] What sentiment given the aspect'.split(' ')])
sentiment_query+=final_asp_list[idx]
sentiment_query += t.convert_tokens_to_ids([word.lower() for word in 'and the opinion'.split(' ')])
# # 拼接所有的opinion
for idy in range(predict_opinion_num):
sentiment_query+=final_opi_list[idx][idy]
if idy < predict_opinion_num - 1:
sentiment_query.append(t.convert_tokens_to_ids('/'))
sentiment_query.append(t.convert_tokens_to_ids('?'))
sentiment_query.append(t.convert_tokens_to_ids('[SEP]'))
sentiment_query_seg = [0] * len(sentiment_query)
sentiment_query = torch.tensor(sentiment_query).long().cuda()
sentiment_query = torch.cat([sentiment_query, passenge], -1).unsqueeze(0)
sentiment_query_seg += [1] * passenge.size(0)
sentiment_query_mask = torch.ones(sentiment_query.size(1)).float().cuda().unsqueeze(0)
sentiment_query_seg = torch.tensor(sentiment_query_seg).long().cuda().unsqueeze(0)
sentiment_scores = model(sentiment_query, sentiment_query_mask, sentiment_query_seg, 1)
sentiment_predicted = torch.argmax(sentiment_scores[0], dim=0).item()
# 每个opinion对应一个三元组
for idy in range(predict_opinion_num):
asp_f = []
opi_f = []
asp_f.append(final_asp_ind_list[idx][0])
asp_f.append(final_asp_ind_list[idx][1])
opi_f.append(final_opi_ind_list[idx][idy][0])
opi_f.append(final_opi_ind_list[idx][idy][1])
triplet_predict = asp_f + opi_f + [sentiment_predicted]
triplets_predict.append(triplet_predict)
if opi_f not in opi_predict:
opi_predict.append(opi_f)
if asp_f + opi_f not in asp_opi_predict:
asp_opi_predict.append(asp_f + opi_f)
if asp_f + [sentiment_predicted] not in asp_pol_predict:
asp_pol_predict.append(asp_f + [sentiment_predicted])
if asp_f not in asp_predict:
asp_predict.append(asp_f)
all_target.append(triplets_target)
all_pred.append(triplets_predict)
triplet_target_num += len(triplets_target)
asp_target_num += len(asp_target)
opi_target_num += len(opi_target)
asp_opi_target_num += len(asp_opi_target)
asp_pol_target_num += len(asp_pol_target)
triplet_predict_num += len(triplets_predict)
asp_predict_num += len(asp_predict)
opi_predict_num += len(opi_predict)
asp_opi_predict_num += len(asp_opi_predict)
asp_pol_predict_num += len(asp_pol_predict)
for trip in triplets_target:
for trip_ in triplets_predict:
if trip_ == trip:
triplet_match_num += 1
for trip in asp_target:
for trip_ in asp_predict:
if trip_ == trip:
asp_match_num += 1
for trip in opi_target:
for trip_ in opi_predict:
if trip_ == trip:
opi_match_num += 1
for trip in asp_opi_target:
for trip_ in asp_opi_predict:
if trip_ == trip:
asp_opi_match_num += 1
for trip in asp_pol_target:
for trip_ in asp_pol_predict:
if trip_ == trip:
asp_pol_match_num += 1
precision = float(triplet_match_num) / float(triplet_predict_num+1e-6)
recall = float(triplet_match_num) / float(triplet_target_num+1e-6)
f1 = 2 * precision * recall / (precision + recall+1e-6)
logger.info('Triplet - Precision: {}\tRecall: {}\tF1: {}'.format(precision, recall, f1))
precision_aspect = float(asp_match_num) / float(asp_predict_num+1e-6)
recall_aspect = float(asp_match_num) / float(asp_target_num+1e-6)
f1_aspect = 2 * precision_aspect * recall_aspect / (precision_aspect + recall_aspect+1e-6)
logger.info('Aspect - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect, recall_aspect, f1_aspect))
precision_opinion = float(opi_match_num) / float(opi_predict_num+1e-6)
recall_opinion = float(opi_match_num) / float(opi_target_num+1e-6)
f1_opinion = 2 * precision_opinion * recall_opinion / (precision_opinion + recall_opinion+1e-6)
logger.info('Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_opinion, recall_opinion, f1_opinion))
precision_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_predict_num+1e-6)
recall_aspect_sentiment = float(asp_pol_match_num) / float(asp_pol_target_num+1e-6)
f1_aspect_sentiment = 2 * precision_aspect_sentiment * recall_aspect_sentiment / (
precision_aspect_sentiment + recall_aspect_sentiment+1e-6)
logger.info('Aspect-Sentiment - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_sentiment,
recall_aspect_sentiment,
f1_aspect_sentiment))
precision_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_predict_num+1e-6)
recall_aspect_opinion = float(asp_opi_match_num) / float(asp_opi_target_num+1e-6)
f1_aspect_opinion = 2 * precision_aspect_opinion * recall_aspect_opinion / (
precision_aspect_opinion + recall_aspect_opinion+1e-6)
logger.info(
'Aspect-Opinion - Precision: {}\tRecall: {}\tF1: {}'.format(precision_aspect_opinion, recall_aspect_opinion,
f1_aspect_opinion))
metric = {'triplet': {'p': precision, 'r': recall, 'f1': f1},
'aspect': {'p': precision_aspect, 'r': recall_aspect, 'f1': f1_aspect},
'opinion': {'p': precision_opinion, 'r': recall_opinion, 'f1': f1_opinion},
'aspect-sentiment': {'p': precision_aspect_sentiment, 'r': recall_aspect_sentiment, 'f1': f1_aspect_sentiment},
'aspect-opinion': {'p': precision_aspect_opinion, 'r': recall_aspect_opinion, 'f1': f1_aspect_opinion}}
triplets = {'pred': all_pred, 'target': all_target}
return metric, triplets
def main(args, tokenize):
os.makedirs(os.path.join(args.log_dir, args.model_name), exist_ok=True)
os.makedirs(os.path.join(args.tmp_log), exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
# init logger
logger = utils.get_logger(os.path.join(args.tmp_log, args.model_name + '.log'))
# load data
logger.info('loading data......')
# init model
logger.info('initial model......')
model = Model.BERTModel(args)
if args.ifgpu:
model = model.cuda()
# print args
logger.info(args)
if args.mode == 'test':
logger.info('start dev......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
dev_standard = target_standard_data['dev']
dev_data = target_total_data['dev']
dev_dataset = Data.ReviewDataset(None, dev_data, None, 'dev')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_dev, dev_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'dev_metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'dev_pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
logger.info('start testing......')
target_data_path = os.path.join(args.data_dir, args.target + '.pt')
target_standard_data_path = os.path.join(args.data_dir, args.target + '_standard.pt')
target_total_data = torch.load(target_data_path)
target_standard_data = torch.load(target_standard_data_path)
test_standard = target_standard_data['test']
test_data = target_total_data['test']
test_dataset = Data.ReviewDataset(None, None, test_data, 'test')
# load checkpoint
logger.info('loading checkpoint......')
checkpoint = torch.load(os.path.join(args.model_dir, args.model_name + '.pt'))
model.load_state_dict(checkpoint['net'])
model.eval()
batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
# eval
logger.info('evaluating......')
metric, triplets = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
with open(os.path.join(args.log_dir, args.model_name, 'test_metric.txt'), 'w') as f:
f.write(str(metric) + '\n')
with open(os.path.join(args.log_dir, args.model_name, 'test_pred.txt'), 'w') as f:
for p, t in zip(triplets['pred'], triplets['target']):
f.write(str({'pred': p, 'target': t}) + '\n')
elif args.mode == 'train':
source_data_path = os.path.join(args.data_dir, args.source + '.pt')
source_standard_data_path = os.path.join(args.data_dir, args.source + '_standard.pt')
source_total_data = torch.load(source_data_path)
source_standard_data = torch.load(source_standard_data_path)
train_data = source_total_data['train']
dev_data = source_total_data['dev']
dev_standard = source_standard_data['dev']
train_dataset = Data.ReviewDataset(train_data, dev_data, None, 'train')
dev_dataset = Data.ReviewDataset(train_data, dev_data, None, 'dev')
batch_num_train = train_dataset.get_batch_num(args.batch_size)
unlabeled_dataset = Unlabeled_Dataset(os.path.join(args.unlabeled_data, args.target + '.txt'), tokenize)
# optimizer
logger.info('initial optimizer......')
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if "_bert" in n], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if "_bert" not in n],
'lr': args.learning_rate, 'weight_decay': 0.01}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.tuning_bert_rate, correct_bias=False)
# load saved model, optimizer and epoch num
if args.reload and os.path.exists(args.checkpoint_path):
checkpoint = torch.load(args.checkpoint_path)
model.load_state_dict(checkpoint['net'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch'] + 1
logger.info('Reload model and optimizer after training epoch {}'.format(checkpoint['epoch']))
else:
start_epoch = 1
logger.info('New model and optimizer from epoch 0')
# scheduler
training_steps = args.epoch_num * batch_num_train
warmup_steps = int(training_steps * args.warm_up)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=training_steps)
# training
logger.info('begin training......')
best_dev_f1 = 0.
# unlabeled_generator = Data.generate_fi_batches(dataset=unlabeled_dataset, batch_size=args.batch_size,
# ifgpu=args.ifgpu)
unlabled_dataloader = DataLoader(dataset=unlabeled_dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
it = iter(unlabled_dataloader)
step = 0
for epoch in range(start_epoch, args.epoch_num+1):
model.train()
model.zero_grad()
batch_generator = Data.generate_fi_batches(dataset=train_dataset, batch_size=args.batch_size,
ifgpu=args.ifgpu)
for batch_index, batch_dict in enumerate(batch_generator):
step += 1
p = step / training_steps
alpha = 2. / (1. + np.exp(-10 * p)) - 1
optimizer.zero_grad()
# q1_a
zero = torch.zeros_like(batch_dict['forward_asp_answer_start'])
domain_label = torch.where(batch_dict['forward_asp_answer_start'] != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['forward_asp_query'],
batch_dict['forward_asp_query_mask'],
batch_dict['forward_asp_query_seg'], 0, alpha=alpha, domain=domain_label)
(f_aspect_start_scores, f_aspect_end_scores), f_aspect_domain_scores = ret['cls'], ret['domain_scores']
f_asp_loss = utils.calculate_entity_loss(f_aspect_start_scores, f_aspect_end_scores,
batch_dict['forward_asp_answer_start'],
batch_dict['forward_asp_answer_end'])
f_asp_domain_loss = utils.calculate_domain_loss(f_aspect_domain_scores, domain_label)
# q1_b
zero = torch.zeros_like(batch_dict['backward_opi_answer_start'])
domain_label = torch.where(batch_dict['backward_opi_answer_start'] != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['backward_opi_query'],
batch_dict['backward_opi_query_mask'],
batch_dict['backward_opi_query_seg'], 0, alpha=alpha, domain=domain_label)
(b_opi_start_scores, b_opi_end_scores), b_opi_domain_scores = ret['cls'], ret['domain_scores']
b_opi_loss = utils.calculate_entity_loss(b_opi_start_scores, b_opi_end_scores,
batch_dict['backward_opi_answer_start'],
batch_dict['backward_opi_answer_end'])
b_opi_domain_loss = utils.calculate_domain_loss(b_opi_domain_scores, domain_label)
# q2_a
zero = torch.zeros_like(batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)))
domain_label = torch.where(batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)) != -1, zero + Domain.Source, zero - 1)
ret = model(
batch_dict['forward_opi_query'].view(-1, batch_dict['forward_opi_query'].size(-1)),
batch_dict['forward_opi_query_mask'].view(-1, batch_dict['forward_opi_query_mask'].size(-1)),
batch_dict['forward_opi_query_seg'].view(-1, batch_dict['forward_opi_query_seg'].size(-1)),
0, alpha=alpha, domain=domain_label)
(f_opi_start_scores, f_opi_end_scores), f_opi_domain_scores = ret['cls'], ret['domain_scores']
f_opi_loss = utils.calculate_entity_loss(f_opi_start_scores, f_opi_end_scores,
batch_dict['forward_opi_answer_start'].view(-1, batch_dict['forward_opi_answer_start'].size(-1)),
batch_dict['forward_opi_answer_end'].view(-1, batch_dict['forward_opi_answer_end'].size(-1)))
f_opi_domain_loss = utils.calculate_domain_loss(f_opi_domain_scores, domain_label)
# q2_b
zero = torch.zeros_like(batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)))
domain_label = torch.where(batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)) != -1, zero + Domain.Source, zero - 1)
ret = model(
batch_dict['backward_asp_query'].view(-1, batch_dict['backward_asp_query'].size(-1)),
batch_dict['backward_asp_query_mask'].view(-1, batch_dict['backward_asp_query_mask'].size(-1)),
batch_dict['backward_asp_query_seg'].view(-1, batch_dict['backward_asp_query_seg'].size(-1)),
0, alpha=alpha, domain=domain_label)
(b_asp_start_scores, b_asp_end_scores), b_asp_domain_scores = ret['cls'], ret['domain_scores']
b_asp_loss = utils.calculate_entity_loss(b_asp_start_scores, b_asp_end_scores,
batch_dict['backward_asp_answer_start'].view(-1, batch_dict['backward_asp_answer_start'].size(-1)),
batch_dict['backward_asp_answer_end'].view(-1, batch_dict['backward_asp_answer_end'].size(-1)))
b_asp_domain_loss = utils.calculate_domain_loss(b_asp_domain_scores, domain_label)
# q_3
zero = torch.zeros_like(batch_dict['sentiment_answer'].view(-1))
domain_label = torch.where(batch_dict['sentiment_answer'].view(-1) != -1, zero + Domain.Source, zero - 1)
ret = model(batch_dict['sentiment_query'].view(-1, batch_dict['sentiment_query'].size(-1)),
batch_dict['sentiment_query_mask'].view(-1, batch_dict['sentiment_query_mask'].size(-1)),
batch_dict['sentiment_query_seg'].view(-1, batch_dict['sentiment_query_seg'].size(-1)),
1, alpha=alpha, domain=domain_label)
sentiment_scores, sentiment_domain_scores = ret['cls'], ret['domain_scores']
sentiment_loss = utils.calculate_sentiment_loss(sentiment_scores, batch_dict['sentiment_answer'].view(-1))
sentiment_domain_loss = utils.calculate_sentiment_domain_loss(sentiment_domain_scores, domain_label)
# loss
loss_sum = f_asp_loss + f_opi_loss + b_opi_loss + b_asp_loss + args.beta*sentiment_loss
if step % args.ad_steps == 0:
domain_loss = 0.2 * (f_asp_domain_loss + f_opi_domain_loss + b_opi_domain_loss + b_asp_domain_loss + sentiment_domain_loss)
try:
unlabeled = it.next()
except StopIteration:
it = iter(unlabled_dataloader)
unlabeled = it.next()
# for i in range(len(unlabeled)):
# unlabeled[i] = unlabeled[i].cuda()
for k in unlabeled:
unlabeled[k] = unlabeled[k].cuda()
# domain_scores = model(unlabeled[0], unlabeled[2], unlabeled[1], step=-1, alpha=alpha, domain=unlabeled[3])['domain_scores']
domain_scores = model(query_tensor=unlabeled['input_ids'], query_mask=unlabeled['attention_mask'], query_seg=unlabeled['token_type_ids'], step=0, alpha=alpha, domain=unlabeled['domain_label'])['domain_scores']
target_domain_loss = utils.calculate_domain_loss(domain_scores, unlabeled['domain_label'])
domain_loss += target_domain_loss
loss_sum += domain_loss
loss_sum.backward()
optimizer.step()
scheduler.step()
# train logger
if batch_index % 10 == 0:
logger.info('Epoch:[{}/{}]\t Batch:[{}/{}]\t Loss Sum:{}\t '
'forward Loss:{};{}\t backward Loss:{};{}\t Sentiment Loss:{}'.
format(epoch, args.epoch_num, batch_index, batch_num_train,
round(loss_sum.item(), 4),
round(f_asp_loss.item(), 4), round(f_opi_loss.item(), 4),
round(b_asp_loss.item(), 4), round(b_opi_loss.item(), 4),
round(sentiment_loss.item(), 4)))
# validation
batch_generator_dev = Data.generate_fi_batches(dataset=dev_dataset, batch_size=1, shuffle=False,
ifgpu=args.ifgpu)
metric, _ = test(model, tokenize, batch_generator_dev, dev_standard, args.inference_beta, logger)
f1 = metric['triplet']['f1']
# save model and optimizer
if f1 > best_dev_f1:
best_dev_f1 = f1
logger.info('best dev f1: {}\t epoch: {}'.format(best_dev_f1, epoch))
logger.info('Model saved after epoch {}'.format(epoch))
state = {'net': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch}
torch.save(state, os.path.join(args.model_dir, args.model_name + '.pt'))
# test
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.inference_beta, logger)
# logger.info('start testing......')
# test_dataset = Data.ReviewDataset(train_data, dev_data, test_data, 'test')
# # load checkpoint
# logger.info('loading checkpoint......')
# checkpoint = torch.load(args.save_model_path)
# model.load_state_dict(checkpoint['net'])
# model.eval()
# batch_generator_test = Data.generate_fi_batches(dataset=test_dataset, batch_size=1, shuffle=False,
# ifgpu=args.ifgpu)
# # eval
# logger.info('evaluating......')
# f1 = test(model, tokenize, batch_generator_test, test_standard, args.beta, logger)
else:
logger.info('Error mode!')
exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bidirectional MRC-based sentiment triplet extraction')
parser.add_argument('--data_dir', type=str, default="./data/preprocess/")
parser.add_argument('--log_dir', type=str, default="./log/")
parser.add_argument('--tmp_log', type=str)
parser.add_argument('--source', type=str)
parser.add_argument('--target', type=str)
parser.add_argument('--mode', type=str, default="train", choices=["train", "test"])
parser.add_argument('--reload', type=bool, default=False)
parser.add_argument('--checkpoint_path', type=str, default="./model/14lap/modelFinal.model")
parser.add_argument('--model_dir', type=str, default="./model/")
parser.add_argument('--model_name', type=str, default="1")
# model hyper-parameter
parser.add_argument('--bert_model_type', type=str, default="bert-base-uncased")
parser.add_argument('--hidden_size', type=int, default=768)
parser.add_argument('--inference_beta', type=float, default=0.8)
# training hyper-parameter
parser.add_argument('--ifgpu', type=bool, default=True)
parser.add_argument('--epoch_num', type=int, default=40)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--tuning_bert_rate', type=float, default=1e-5)
parser.add_argument('--warm_up', type=float, default=0.1)
parser.add_argument('--beta', type=float, default=1)
parser.add_argument('--unlabeled_data', type=str, default='../amazon')
parser.add_argument('--ad_steps', type=int)
args = parser.parse_args()
if os.path.exists(os.path.join(args.log_dir, args.model_name, 'test_metric.txt')):
print('-' * 20, 'file exists', '-' * 20)
else:
t = BertTokenizer.from_pretrained(args.bert_model_type)
main(args, t)
| 40,827 | 54.928767 | 229 | py |
DMASTE | DMASTE-main/BMRC/DANN_Model.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from transformers import BertTokenizer, BertModel, BertConfig
import torch.nn as nn
from functions import ReverseLayerF
class BERTModel(nn.Module):
def __init__(self, args):
hidden_size = args.hidden_size
super(BERTModel, self).__init__()
# BERT模型
# if args.bert_model_type == 'bert-base-uncased':
self._bert = BertModel.from_pretrained(args.bert_model_type)
self._tokenizer = BertTokenizer.from_pretrained(args.bert_model_type)
print('Bertbase model loaded')
# else:
# raise KeyError('Config.args.bert_model_type should be bert-based-uncased. ')
self.classifier_start = nn.Linear(hidden_size, 2)
self.classifier_end = nn.Linear(hidden_size, 2)
self._classifier_sentiment = nn.Linear(hidden_size, 3)
self.domain_classifier = nn.Linear(hidden_size, 2)
def forward(self, query_tensor, query_mask, query_seg, step, alpha=None, domain=None):
hidden_states = self._bert(query_tensor, attention_mask=query_mask, token_type_ids=query_seg)[0]
ret = dict()
if step == 0: # predict entity
out_scores_start = self.classifier_start(hidden_states)
out_scores_end = self.classifier_end(hidden_states)
ret['cls'] = [out_scores_start, out_scores_end]
# return out_scores_start, out_scores_end
else: # predict sentiment
cls_hidden_states = hidden_states[:, 0, :]
cls_hidden_scores = self._classifier_sentiment(cls_hidden_states)
ret['cls'] = cls_hidden_scores
# return cls_hidden_scores
if domain is not None:
reverse_feature = ReverseLayerF.apply(hidden_states if step == 0 else hidden_states[:, 0, :], alpha)
domain_scores = self.domain_classifier(reverse_feature)
ret['domain_scores'] = domain_scores
return ret
else:
return ret['cls']
| 2,062 | 38.673077 | 112 | py |
DMASTE | DMASTE-main/BMRC/Data.py | # coding: UTF-8
# @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
from torch.utils.data import Dataset, DataLoader
import numpy as np
class OriginalDataset(Dataset):
def __init__(self, pre_data):
self._forward_asp_query = pre_data['_forward_asp_query']
self._forward_opi_query = pre_data['_forward_opi_query']
self._forward_asp_answer_start = pre_data['_forward_asp_answer_start']
self._forward_asp_answer_end = pre_data['_forward_asp_answer_end']
self._forward_opi_answer_start = pre_data['_forward_opi_answer_start']
self._forward_opi_answer_end = pre_data['_forward_opi_answer_end']
self._forward_asp_query_mask = pre_data['_forward_asp_query_mask']
self._forward_opi_query_mask = pre_data['_forward_opi_query_mask']
self._forward_asp_query_seg = pre_data['_forward_asp_query_seg']
self._forward_opi_query_seg = pre_data['_forward_opi_query_seg']
self._backward_asp_query = pre_data['_backward_asp_query']
self._backward_opi_query = pre_data['_backward_opi_query']
self._backward_asp_answer_start = pre_data['_backward_asp_answer_start']
self._backward_asp_answer_end = pre_data['_backward_asp_answer_end']
self._backward_opi_answer_start = pre_data['_backward_opi_answer_start']
self._backward_opi_answer_end = pre_data['_backward_opi_answer_end']
self._backward_asp_query_mask = pre_data['_backward_asp_query_mask']
self._backward_opi_query_mask = pre_data['_backward_opi_query_mask']
self._backward_asp_query_seg = pre_data['_backward_asp_query_seg']
self._backward_opi_query_seg = pre_data['_backward_opi_query_seg']
self._sentiment_query = pre_data['_sentiment_query']
self._sentiment_answer = pre_data['_sentiment_answer']
self._sentiment_query_mask = pre_data['_sentiment_query_mask']
self._sentiment_query_seg = pre_data['_sentiment_query_seg']
self._aspect_num = pre_data['_aspect_num']
self._opinion_num = pre_data['_opinion_num']
class ReviewDataset(Dataset):
def __init__(self, train, dev, test, set):
'''
评论数据集
:param train: list, training set of 14 lap, 14 res, 15 res, 16 res
:param dev: list, the same
:param test: list, the same
'''
self._train_set = train
self._dev_set = dev
self._test_set = test
if set == 'train':
self._dataset = self._train_set
elif set == 'dev':
self._dataset = self._dev_set
elif set == 'test':
self._dataset = self._test_set
self._forward_asp_query = self._dataset._forward_asp_query
self._forward_opi_query = self._dataset._forward_opi_query
self._forward_asp_answer_start = self._dataset._forward_asp_answer_start
self._forward_asp_answer_end = self._dataset._forward_asp_answer_end
self._forward_opi_answer_start = self._dataset._forward_opi_answer_start
self._forward_opi_answer_end = self._dataset._forward_opi_answer_end
self._forward_asp_query_mask = self._dataset._forward_asp_query_mask
self._forward_opi_query_mask = self._dataset._forward_opi_query_mask
self._forward_asp_query_seg = self._dataset._forward_asp_query_seg
self._forward_opi_query_seg = self._dataset._forward_opi_query_seg
self._backward_asp_query = self._dataset._backward_asp_query
self._backward_opi_query = self._dataset._backward_opi_query
self._backward_asp_answer_start = self._dataset._backward_asp_answer_start
self._backward_asp_answer_end = self._dataset._backward_asp_answer_end
self._backward_opi_answer_start = self._dataset._backward_opi_answer_start
self._backward_opi_answer_end = self._dataset._backward_opi_answer_end
self._backward_asp_query_mask = self._dataset._backward_asp_query_mask
self._backward_opi_query_mask = self._dataset._backward_opi_query_mask
self._backward_asp_query_seg = self._dataset._backward_asp_query_seg
self._backward_opi_query_seg = self._dataset._backward_opi_query_seg
self._sentiment_query = self._dataset._sentiment_query
self._sentiment_answer = self._dataset._sentiment_answer
self._sentiment_query_mask = self._dataset._sentiment_query_mask
self._sentiment_query_seg = self._dataset._sentiment_query_seg
self._aspect_num = self._dataset._aspect_num
self._opinion_num = self._dataset._opinion_num
def get_batch_num(self, batch_size):
return len(self._forward_asp_query) // batch_size
def __len__(self):
return len(self._forward_asp_query)
def __getitem__(self, item):
forward_asp_query = self._forward_asp_query[item]
forward_opi_query = self._forward_opi_query[item]
forward_asp_answer_start = self._forward_asp_answer_start[item]
forward_asp_answer_end = self._forward_asp_answer_end[item]
forward_opi_answer_start = self._forward_opi_answer_start[item]
forward_opi_answer_end = self._forward_opi_answer_end[item]
forward_asp_query_mask = self._forward_asp_query_mask[item]
forward_opi_query_mask = self._forward_opi_query_mask[item]
forward_asp_query_seg = self._forward_asp_query_seg[item]
forward_opi_query_seg = self._forward_opi_query_seg[item]
backward_asp_query = self._backward_asp_query[item]
backward_opi_query = self._backward_opi_query[item]
backward_asp_answer_start = self._backward_asp_answer_start[item]
backward_asp_answer_end = self._backward_asp_answer_end[item]
backward_opi_answer_start = self._backward_opi_answer_start[item]
backward_opi_answer_end = self._backward_opi_answer_end[item]
backward_asp_query_mask = self._backward_asp_query_mask[item]
backward_opi_query_mask = self._backward_opi_query_mask[item]
backward_asp_query_seg = self._backward_asp_query_seg[item]
backward_opi_query_seg = self._backward_opi_query_seg[item]
sentiment_query = self._sentiment_query[item]
sentiment_answer = self._sentiment_answer[item]
sentiment_query_mask = self._sentiment_query_mask[item]
sentiment_query_seg = self._sentiment_query_seg[item]
aspect_num = self._aspect_num[item]
opinion_num = self._opinion_num[item]
return {"forward_asp_query": np.array(forward_asp_query),
"forward_opi_query": np.array(forward_opi_query),
"forward_asp_answer_start": np.array(forward_asp_answer_start),
"forward_asp_answer_end": np.array(forward_asp_answer_end),
"forward_opi_answer_start": np.array(forward_opi_answer_start),
"forward_opi_answer_end": np.array(forward_opi_answer_end),
"forward_asp_query_mask": np.array(forward_asp_query_mask),
"forward_opi_query_mask": np.array(forward_opi_query_mask),
"forward_asp_query_seg": np.array(forward_asp_query_seg),
"forward_opi_query_seg": np.array(forward_opi_query_seg),
"backward_asp_query": np.array(backward_asp_query),
"backward_opi_query": np.array(backward_opi_query),
"backward_asp_answer_start": np.array(backward_asp_answer_start),
"backward_asp_answer_end": np.array(backward_asp_answer_end),
"backward_opi_answer_start": np.array(backward_opi_answer_start),
"backward_opi_answer_end": np.array(backward_opi_answer_end),
"backward_asp_query_mask": np.array(backward_asp_query_mask),
"backward_opi_query_mask": np.array(backward_opi_query_mask),
"backward_asp_query_seg": np.array(backward_asp_query_seg),
"backward_opi_query_seg": np.array(backward_opi_query_seg),
"sentiment_query": np.array(sentiment_query),
"sentiment_answer": np.array(sentiment_answer),
"sentiment_query_mask": np.array(sentiment_query_mask),
"sentiment_query_seg": np.array(sentiment_query_seg),
"aspect_num": np.array(aspect_num),
"opinion_num": np.array(opinion_num)
}
def generate_fi_batches(dataset, batch_size, shuffle=True, drop_last=True, ifgpu=True):
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_dict = {}
for name, tensor in data_dict.items():
if ifgpu:
out_dict[name] = data_dict[name].cuda()
else:
out_dict[name] = data_dict[name]
yield out_dict
| 8,838 | 53.561728 | 87 | py |
DMASTE | DMASTE-main/BMRC/functions.py | from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None | 305 | 18.125 | 46 | py |
DMASTE | DMASTE-main/BMRC/dataProcess.py | # @Author: Shaowei Chen, Contact: [email protected]
# @Date: 2021-5-4
import pickle
import torch
import os
class dual_sample(object):
def __init__(self,
original_sample,
text,
forward_querys,
forward_answers,
backward_querys,
backward_answers,
sentiment_querys,
sentiment_answers):
self.original_sample = original_sample #
self.text = text #
self.forward_querys=forward_querys
self.forward_answers=forward_answers
self.backward_querys=backward_querys
self.backward_answers=backward_answers
self.sentiment_querys=sentiment_querys
self.sentiment_answers=sentiment_answers
def get_text(lines):
# Line sample:
# It is always reliable , never bugged and responds well .####It=O is=O always=O reliable=O ,=O never=O bugged=O and=O responds=T-POS well=O .=O####It=O is=O always=O reliable=O ,=O never=O bugged=O and=O responds=O well=S .=O
text_list = []
aspect_list = []
opinion_list = []
triplet_data = []
sentiment_map = {'POS': 0, 'NEG': 1, 'NEU': 2}
for f in lines:
temp = f.split("####")
assert len(temp) == 2
word_list = temp[0].split()
ts = eval(temp[1])
ts = [(t[0], t[1], sentiment_map[t[2]])for t in ts]
triplet_data.append(ts)
# aspect_label_list = [t.split("=")[-1] for t in temp[1].split()]
# opinion_label_list = [t.split("=")[-1] for t in temp[2].split()]
# aspect_label_list = ['O']
# assert len(word_list) == len(aspect_label_list) == len(opinion_label_list)
text_list.append(word_list)
# aspect_list.append(aspect_label_list)
# opinion_list.append(opinion_label_list)
return text_list, aspect_list, opinion_list, triplet_data
def valid_data(triplet, aspect, opinion):
for t in triplet[0][0]:
assert aspect[t] != ["O"]
for t in triplet[0][1]:
assert opinion[t] != ["O"]
def fusion_dual_triplet(triplet):
triplet_aspect = []
triplet_opinion = []
triplet_sentiment = []
dual_opinion = []
dual_aspect = []
for t in triplet:
if t[0] not in triplet_aspect:
triplet_aspect.append(t[0])
triplet_opinion.append([t[1]])
triplet_sentiment.append(t[2])
else:
idx = triplet_aspect.index(t[0])
triplet_opinion[idx].append(t[1])
# assert triplet_sentiment[idx] == sentiment_map[t[2]], f'{triplet_sentiment[idx]} {sentiment_map[t[2]]}'
if t[1] not in dual_opinion:
dual_opinion.append(t[1])
dual_aspect.append([t[0]])
else:
idx = dual_opinion.index(t[1])
dual_aspect[idx].append(t[0])
return triplet_aspect, triplet_opinion, triplet_sentiment, dual_opinion, dual_aspect
if __name__ == '__main__':
home_path = "../ia-dataset/"
dataset_name_list = os.listdir(home_path)
dataset_name_list = [x for x in dataset_name_list if '.' not in x]
print(dataset_name_list)
# dataset_type_list = ["train", "test", "dev"]
for dataset_name in dataset_name_list:
dataset_dir = os.path.join(home_path, dataset_name)
dataset_type_list = os.listdir(dataset_dir)
dataset_type_list = [x.split('.')[0] for x in dataset_type_list]
for dataset_type in dataset_type_list:
output_path = "./data/preprocess/" + dataset_name + "_" + dataset_type + "_dual.pt"
# read triple
# f = open(home_path + dataset_name + "/" + dataset_name + "_pair/" + dataset_type + "_pair.pkl", "rb")
# triple_data = pickle.load(f)
# f.close()
# read text
f = open(home_path + dataset_name + "/" + dataset_type + ".txt", "r", encoding="utf-8")
text_lines = f.readlines()
f.close()
# get text
text_list, _, _, triple_data = get_text(text_lines)
sample_list = []
for k in range(len(text_list)):
triplet = triple_data[k]
text = text_list[k]
# valid_data(triplet, aspect_list[k], opinion_list[k])
triplet_aspect, triplet_opinion, triplet_sentiment, dual_opinion, dual_aspect = fusion_dual_triplet(triplet)
forward_query_list = []
backward_query_list = []
sentiment_query_list = []
forward_answer_list = []
backward_answer_list = []
sentiment_answer_list = []
forward_query_list.append(["What", "aspects", "?"])
start = [0] * len(text)
end = [0] * len(text)
for ta in triplet_aspect:
start[ta[0]] = 1
end[ta[-1]] = 1
forward_answer_list.append([start, end])
backward_query_list.append(["What", "opinions", "?"])
start = [0] * len(text)
end = [0] * len(text)
for to in dual_opinion:
start[to[0]] = 1
end[to[-1]] = 1
backward_answer_list.append([start, end])
for idx in range(len(triplet_aspect)):
ta = triplet_aspect[idx]
# opinion query
query = ["What", "opinion", "given", "the", "aspect"] + text[ta[0]:ta[-1] + 1] + ["?"]
forward_query_list.append(query)
start = [0] * len(text)
end = [0] * len(text)
for to in triplet_opinion[idx]:
start[to[0]] = 1
end[to[-1]] = 1
forward_answer_list.append([start, end])
# sentiment query
query = ["What", "sentiment", "given", "the", "aspect"] + text[ta[0]:ta[-1] + 1] + ["and", "the",
"opinion"]
for idy in range(len(triplet_opinion[idx]) - 1):
to = triplet_opinion[idx][idy]
query += text[to[0]:to[-1] + 1] + ["/"]
to = triplet_opinion[idx][-1]
query += text[to[0]:to[-1] + 1] + ["?"]
sentiment_query_list.append(query)
sentiment_answer_list.append(triplet_sentiment[idx])
for idx in range(len(dual_opinion)):
ta = dual_opinion[idx]
# opinion query
query = ["What", "aspect", "does", "the", "opinion"] + text[ta[0]:ta[-1] + 1] + ["describe", "?"]
backward_query_list.append(query)
start = [0] * len(text)
end = [0] * len(text)
for to in dual_aspect[idx]:
start[to[0]] = 1
end[to[-1]] = 1
backward_answer_list.append([start, end])
temp_sample = dual_sample(text_lines[k], text, forward_query_list, forward_answer_list, backward_query_list, backward_answer_list, sentiment_query_list, sentiment_answer_list)
sample_list.append(temp_sample)
torch.save(sample_list, output_path)
| 7,431 | 42.209302 | 230 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.