repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
marmot | marmot-master/marmot/features/phrase/verbs_bigram_feature_extractor.py | from collections import defaultdict
from marmot.features.feature_extractor import FeatureExtractor
def get_verbs(language):
verbs = defaultdict(str)
verbs['english'] = ['VB']
verbs['spanish'] = ['VL']
return verbs[language]
class VerbsBigramFeatureExtractor(FeatureExtractor):
'''
Number of punctuation marks in source and target:
<source_number>_<target_number>
'''
def __init__(self, lang='english'):
self.verbs = get_verbs(lang)
def is_verb(self, word):
for n in self.verbs:
if word.startswith(n):
return True
return False
def get_feature(self, context_obj):
source_idx = context_obj['source_index']
target_idx = context_obj['index']
source_verbs, target_verbs = 0, 0
for w in context_obj['target_pos'][target_idx[0]:target_idx[1]]:
if self.is_verb(w):
target_verbs += 1
for w in context_obj['source_pos'][source_idx[0]:source_idx[1]]:
if self.is_verb(w):
source_verbs += 1
feature_val = str(source_verbs) + "_" + str(target_verbs)
return feature_val
def get_feature_name(self):
return "source_target_verbs_numbers"
def get_features(self, context_obj):
return [self.get_feature(context_obj)]
def get_feature_names(self):
return [self.get_feature_name()]
| 1,412 | 27.836735 | 72 | py |
marmot | marmot-master/marmot/features/phrase/punctuation_bigram_feature_extractor.py | import string
from marmot.features.feature_extractor import FeatureExtractor
class PunctuationBigramFeatureExtractor(FeatureExtractor):
'''
Number of punctuation marks in source and target:
<source_number>_<target_number>
'''
def __init__(self):
self.punctuation = string.punctuation
def get_feature(self, context_obj):
source_punct, target_punct = 0, 0
for w in context_obj['target']:
if w in self.punctuation:
target_punct += 1
for w in context_obj['source']:
if w in self.punctuation:
source_punct += 1
feature_val = str(source_punct) + "_" + str(target_punct)
return feature_val
def get_feature_name(self):
return "source_target_punctuation_numbers"
def get_features(self, context_obj):
return [self.get_feature(context_obj)]
def get_feature_names(self):
return [self.get_feature_name()]
| 964 | 27.382353 | 65 | py |
marmot | marmot-master/marmot/features/phrase/prev_word_feature_extractor.py | from __future__ import print_function
import sys
from marmot.features.feature_extractor import FeatureExtractor
class PrevWordFeatureExtractor(FeatureExtractor):
'''
Extract previous word
'''
def get_feature(self, context_obj):
if type(context_obj['index']) is int:
first_word_idx = context_obj['index']
elif type(context_obj['index']) is tuple:
first_word_idx = context_obj['index'][0]
else:
print("Unknown type of context object's 'index' field: {}".format(type(context_obj['index'])))
sys.exit()
prev_word = context_obj['target'][first_word_idx - 1] if first_word_idx > 0 else "<s>"
return prev_word
def get_feature_name(self):
return "previoust_word"
def get_features(self, context_obj):
return [self.get_feature(context_obj)]
def get_feature_names(self):
return [self.get_feature_name()]
| 940 | 29.354839 | 106 | py |
marmot | marmot-master/marmot/features/phrase/alphanumeric_feature_extractor.py | from __future__ import division
import sys
from marmot.features.feature_extractor import FeatureExtractor
class AlphaNumericFeatureExtractor(FeatureExtractor):
'''
- percentage of numbers in the source
- percentage of numbers in the target
- absolute difference between number of numbers in the source and target sentence normalised by source sentence length
- percentage of source words that contain non-alphabetic symbols
- percentage of target words that contain non-alphabetic symbols
- ratio of percentage of tokens a-z in the source and tokens a-z in the target
'''
def get_features(self, context_obj):
#sys.stderr.write("Start AlphaNumericFeatureExtractor\n")
tg_numbers = 0
tg_alphanumeric = 0
for word in context_obj['token']:
try:
float(word)
tg_numbers += 1
except:
if word.isalnum() and not word.isalpha():
tg_alphanumeric += 1
src_numbers = 0
src_alphanumeric = 0
if 'source_token' in context_obj and len(context_obj['source_token']) > 0:
for word in context_obj['source_token']:
try:
float(word)
src_numbers += 1
except:
if word.isalnum() and not word.isalpha():
src_alphanumeric += 1
src_len = len(context_obj['source_token'])
tg_len = len(context_obj['token'])
src_tg_num_diff = abs(src_numbers - tg_numbers)/tg_len
src_tg_alnum_diff = abs(src_alphanumeric - tg_alphanumeric)/tg_len
src_num_percent = 0
src_alnum_percent = 0
if src_len > 0:
src_num_percent = src_numbers/src_len
src_alnum_percent = src_alphanumeric/src_len
all_out = [str(src_num_percent),
str(tg_numbers/tg_len),
str(src_tg_num_diff),
str(src_alnum_percent),
str(tg_alphanumeric/tg_len),
str(src_tg_alnum_diff)]
#sys.stderr.write("Finish AlphaNumericFeatureExtractor\n")
return all_out
def get_feature_names(self):
return ['percentage_src_numbers',
'percentage_tg_numbers',
'src_tg_numbers_normalized_diff',
'percentage_src_alphanumeric',
'percentage_tg_alphanumeric',
'src_tg_alphanumeric_normalized_diff']
| 2,488 | 37.890625 | 122 | py |
marmot | marmot-master/marmot/features/phrase/meta_extractor.py | import sys
class MetaExtractor():
'''
class which applies all feature extractors to an object
'''
def __init__(self, extractors):
sys.stderr.write('This is MetaExtractor init\n')
self.extractors = extractors
def get_features(self, context_obj):
features = []
for ext in self.extractors:
features.extend(ext.get_features(context_obj))
return features
def get_feature_names(self):
feature_names = []
for ext in self.extractors:
feature_names.extend(ext.get_feature_names())
return feature_names
| 610 | 24.458333 | 59 | py |
marmot | marmot-master/marmot/features/phrase/token_count_feature_extractor.py | from __future__ import division
from marmot.features.feature_extractor import FeatureExtractor
import sys
import logging
import numpy as np
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
class TokenCountFeatureExtractor(FeatureExtractor):
def get_features(self, context_obj):
#sys.stderr.write("Start TokenCountFeatureExtractor\n")
target_len = len(context_obj['token'])
target_tok_len = np.average([len(word) for word in context_obj['token']])
source_len, source_tok_len = 0, 0
if 'source_token' in context_obj and len(context_obj['source_token']) > 0:
source_len = len(context_obj['source_token'])
source_tok_len = np.average([len(word) for word in context_obj['source_token']])
target_occur = []
for word in context_obj['token']:
target_occur.append(context_obj['target'].count(word))
avg_occur = np.average(target_occur)
tg_src_ratio = target_len/source_len if source_len > 0 else 0
#sys.stderr.write("Finish TokenCountFeatureExtractor\n")
return [str(target_len),
str(source_len),
str(source_len/target_len),
str(tg_src_ratio),
str(target_tok_len),
str(source_tok_len),
str(avg_occur)]
def get_feature_names(self):
return ['target_phrase_len',
'source_phrase_len',
'source_target_len_ratio',
'target_source_len_ratio',
'avg_target_token_len',
'avg_source_token_len',
'avg_occurrence_of_target_word_within_sentence']
| 1,747 | 37 | 92 | py |
marmot | marmot-master/marmot/features/phrase/lm_feature_extractor.py | from __future__ import division
import sys
import kenlm
from marmot.features.feature_extractor import FeatureExtractor
class LMFeatureExtractor(FeatureExtractor):
def __init__(self, lm_file):
self.model = kenlm.LanguageModel(lm_file)
def get_features(self, context_obj):
#sys.stderr.write("Start LMFeatureExtractor\n")
log_prob = self.model.score(' '.join(context_obj['token']), bos=False, eos=False)
tg_len = len(context_obj['token'])
perplexity = 2**((-1/tg_len)*log_prob)
#sys.stderr.write("Finish LMFeatureExtractor\n")
return [str(log_prob), str(perplexity)]
def get_feature_names(self):
return ['target_log_prob', 'target_perplexity']
| 723 | 30.478261 | 89 | py |
marmot | marmot-master/marmot/features/phrase/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/features/phrase/context_feature_extractor.py | import sys
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.ngram_window_extractor import left_context, right_context
class ContextFeatureExtractor(FeatureExtractor):
def get_features(self, context_obj):
if 'source_token' in context_obj and len(context_obj['source_token']) > 0 and len(context_obj['source_index']) > 1:
try:
left_src = left_context(context_obj['source'], context_obj['source_token'][0], context_size=1, idx=context_obj['source_index'][0])
except IndexError:
print(context_obj['source'])
print(context_obj['source_token'])
print(context_obj['source_index'])
sys.exit()
right_src = right_context(context_obj['source'], context_obj['source_token'][-1], context_size=1, idx=context_obj['source_index'][1]-1)
else:
left_src = ""
right_src = ""
left_tg = left_context(context_obj['target'], context_obj['token'][0], context_size=1, idx=context_obj['index'][0])
right_tg = right_context(context_obj['target'], context_obj['token'][-1], context_size=1, idx=context_obj['index'][1]-1)
return [left_src[0], right_src[0], left_tg[0], right_tg[0]]
def get_feature_names(self):
return ['left_source_context', 'right_source_context', 'left_target_context', 'right_target_context']
| 1,416 | 49.607143 | 147 | py |
marmot | marmot-master/marmot/features/phrase/phrase_alignment_feature_extractor.py | from __future__ import division, print_function
import sys
import numpy as np
import os
import errno
from marmot.features.feature_extractor import FeatureExtractor
from marmot.util.alignments import train_alignments, align_sentence
from marmot.exceptions.no_data_error import NoDataError
class PhraseAlignmentFeatureExtractor(FeatureExtractor):
'''
Extract phrase-level alignment features:
- percentage of unaligned words
- percentage of words with more than 1 aligned words
- average number of aligned words per word
- ...?
'''
def __init__(self, align_model='', src_file='', tg_file='', tmp_dir=None):
if tmp_dir is None:
tmp_dir = os.getcwd()
try:
os.makedirs(tmp_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(tmp_dir):
pass
else:
raise
self.tmp_dir = tmp_dir
self.model = ''
# no alignment model
if align_model == '':
# if src_file and tg_file are not empty, it means that an alignment model needs to be trained
# (self.model doesn't have to be defined, if context objects have alignments)
if os.path.isfile(src_file) and os.path.isfile(tg_file):
self.model = train_alignments(src_file, tg_file, self.tmp_dir)
else:
self.model = align_model
def get_features(self, context_obj):
#sys.stderr.write("Start PhraseAlignmentFeatureExtractor\n")
if 'source' not in context_obj or context_obj['source'] is None:
#sys.stderr.write('No source')
raise NoDataError('source', context_obj, 'AlignmentFeatureExtractor')
if 'target' not in context_obj or context_obj['source'] is None or context_obj['target'] is None:
#sys.stderr.write('No target')
raise NoDataError('target', context_obj, 'AlignmentFeatureExtractor')
if 'alignments_all' not in context_obj:
context_obj['alignments_all'] = [[i] for i in context_obj['alignments']]
#raise NoDataError('alignments_all', context_obj, 'AlignmentFeatureExtractor')
# if self.model == '':
# raise NoDataError('alignments', context_obj, 'AlignmentFeatureExtractor')
# we have to extract new alignments because we need the number of aligned words per target word
# local_alignments = align_sentence(context_obj['source'], context_obj['target'], self.model)
n_unaligned, n_multiple = 0, 0
n_alignments = []
#sys.stderr.write('All fine\n')
#sys.stderr.write('%s\n' % (', '.join([s for s in context_obj])))
#sys.stderr.write('%s, %i\n' % (type(context_obj['index']), len(context_obj['index'])))
#sys.stderr.write('Context obj index: %i to %i\n' % (context_obj['index'][0], context_obj['index'][1]))
for i in range(context_obj['index'][0], context_obj['index'][1]):
assert(all([w == ww for (w, ww) in zip(context_obj['token'], [context_obj['target'][j] for j in range(context_obj['index'][0], context_obj['index'][1])])])), "Assertion failed"
#sys.stderr.write('Assertion was fine\n')
#print(context_obj['alignments_all'])
cur_alignments = len(context_obj['alignments_all'][i])
#sys.stderr.write('Alignments_all\n')
if cur_alignments == 0:
#sys.stderr.write('Cur_alignments = 0\n')
n_unaligned += 1
elif cur_alignments > 1:
#sys.stderr.write('Cur_alignments > 1\n')
n_multiple += 1
#sys.stderr.write('Op!\n')
n_alignments.append(cur_alignments)
#sys.stderr.write('Still fine')
tg_len = len(context_obj['token'])
#sys.stderr.write("Finish PhraseAlignmentFeatureExtractor\n")
return [str(n_unaligned/tg_len), str(n_multiple/tg_len), str(np.average(n_alignments))]
def get_feature_names(self):
return ['num_unaligned', 'num_multi_alignment', 'avg_alignments_num']
| 4,109 | 45.179775 | 188 | py |
marmot | marmot-master/marmot/features/phrase/punctuation_feature_extractor.py | from __future__ import division
import sys
from marmot.features.feature_extractor import FeatureExtractor
class PunctuationFeatureExtractor(FeatureExtractor):
def __init__(self):
self.punctuation = ['.', ',', ':', ';', '?', '!']
def get_features(self, context_obj):
#sys.stderr.write("Start PunctuationFeatureExtractor\n")
punct_source, punct_target = [], []
for punct in self.punctuation:
tmp_source, tmp_target = 0, 0
for word in context_obj['token']:
if word == punct:
tmp_target += 1
for word in context_obj['source_token']:
if word == punct:
tmp_source += 1
punct_source.append(tmp_source)
punct_target.append(tmp_target)
target_len = len(context_obj['token'])
punct_diff = [(src - tg) for (src, tg) in zip(punct_source, punct_target)]
punct_diff_norm = [(src - tg)/target_len for (src, tg) in zip(punct_source, punct_target)]
other_features = []
if len(context_obj['source_token']) > 0:
other_features.append(sum(punct_source)/len(context_obj['source_token']))
else:
other_features.append(0)
other_features.append(sum(punct_target)/len(context_obj['token']))
other_features.append((sum(punct_source) - sum(punct_target))/target_len)
#sys.stderr.write("Finish PunctuationFeatureExtractor\n")
return [str(p) for p in punct_diff] + [str(p) for p in punct_diff_norm] + [str(p) for p in other_features]
def get_feature_names(self):
return ['diff_periods',
'diff_commas',
'diff_colons',
'diff_semicolons',
'diff_questions',
'diff_exclamations',
'diff_periods_weighted',
'diff_commas_weighted',
'diff_colons_weighted',
'diff_semicolons_weighted',
'diff_questions_weighted',
'diff_exclamations_weighted',
'percentage_punct_source',
'percentage_punct_target',
'diff_punct']
| 2,189 | 39.555556 | 114 | py |
marmot | marmot-master/marmot/features/phrase/source_lm_feature_extractor.py | import sys
import kenlm
from marmot.features.feature_extractor import FeatureExtractor
class SourceLMFeatureExtractor(FeatureExtractor):
def __init__(self, lm_file):
self.model = kenlm.LanguageModel(lm_file)
def get_features(self, context_obj):
#sys.stderr.write("Start SourceLMFeatureExtractor\n")
if 'source_token' in context_obj and len(context_obj['source_token']) > 0:
log_prob = self.model.score(' '.join(context_obj['source_token']), bos=False, eos=False)
src_len = len(context_obj['source_token'])
perplexity = 2**((-1/src_len)*log_prob)
#sys.stderr.write("Finish SourceLMFeatureExtractor\n")
return [str(log_prob), str(perplexity)]
else:
#sys.stderr.write("Finish SourceLMFeatureExtractor\n")
return ['0.0', '0.0']
def get_feature_names(self):
return ['source_log_prob', 'source_perplexity']
| 942 | 36.72 | 100 | py |
marmot | marmot-master/marmot/features/phrase/next_word_feature_extractor.py | from __future__ import print_function
import sys
from marmot.features.feature_extractor import FeatureExtractor
class PrevWordFeatureExtractor(FeatureExtractor):
'''
Extract next word
'''
def get_feature(self, context_obj):
if type(context_obj['index']) is int:
last_word_idx = context_obj['index']
elif type(context_obj['index']) is tuple:
last_word_idx = context_obj['index'][1]
else:
print("Unknown type of context object's 'index' field: {}".format(type(context_obj['index'])))
sys.exit()
next_word = context_obj['target'][last_word_idx] if last_word_idx < len(context_obj['target']) else "</s>"
return next_word
def get_feature_name(self):
return "next_word"
def get_features(self, context_obj):
return [self.get_feature(context_obj)]
def get_feature_names(self):
return [self.get_feature_name()]
| 949 | 29.645161 | 114 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_num_translations_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.phrase.num_translations_feature_extractor import NumTranslationsFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class NumTranslationsFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = NumTranslationsFeatureExtractor('/export/data/varvara/europarl-sys/english_spanish/tiny/model/lex.1.f2e', '/export/data/varvara/europarl-sys/english_spanish/tiny/corpus/nc.clean.1.en')
def test_get_features(self):
obj = {'source':['a', 'boy', 'hits', 'the', 'dog'],
'target':['uno', 'nino', 'abati', 'el', 'perro'],
'token':['el'],
'index': (3, 4),
'source_token': ['the'],
'source_index':(3, 4)}
(f_001, f_005, f_01, f_02, f_05, f_001_w, f_005_w, f_01_w, f_02_w, f_05_w) = self.extractor.get_features(obj)
self.assertEqual(f_001, 7)
self.assertEqual(f_005, 6)
self.assertEqual(f_01, 3)
self.assertEqual(f_02, 2)
self.assertEqual(f_05, 0)
self.assertAlmostEqual(f_001_w, 0.52421775)
self.assertAlmostEqual(f_005_w, 0.4493295)
self.assertAlmostEqual(f_01_w, 0.22466475)
self.assertAlmostEqual(f_02_w, 0.1497765)
self.assertAlmostEqual(f_05_w, 0.0)
if __name__ == '__main__':
unittest.main()
| 1,474 | 37.815789 | 209 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_punctuation_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.punctuation_feature_extractor import PunctuationFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class PunctuationFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = PunctuationFeatureExtractor()
def test_get_features(self):
obj = {'target': ['a', ',', 'boy', ',', 'hits', '!', '3', 'dogs', ':', 'www1', '.', 'some', 'words', 'not', 'in', 'phrase'],
'source': ['un', ',', 'garcon', ';', 'bate', '!', '!', '3', 'ou', '4', '?', 'chiens', '.', 'quelques', 'mots', 'inutils', 'lalala'],
'token': [',', 'boy', ',', 'hits', '!', '3', 'dogs', ':', ':', '.'],
'index': (1, 11),
'source_token': [',', 'garcon', ';', 'bate', '!', '!', '3', 'ou', '4', '?', 'chiens', '.'],
'source_index': (1, 13)}
all_features = self.extractor.get_features(obj)
self.assertEqual(all_features[0], 0)
self.assertEqual(all_features[1], -1)
self.assertEqual(all_features[2], -2)
self.assertEqual(all_features[3], 1)
self.assertEqual(all_features[4], 1)
self.assertEqual(all_features[5], 1)
self.assertAlmostEqual(all_features[6], 0.0)
self.assertAlmostEqual(all_features[7], -0.1)
self.assertAlmostEqual(all_features[8], -0.2)
self.assertAlmostEqual(all_features[9], 0.1)
self.assertAlmostEqual(all_features[10], 0.1)
self.assertAlmostEqual(all_features[11], 0.1)
self.assertAlmostEqual(all_features[12], 0.5)
self.assertAlmostEqual(all_features[13], 0.6)
self.assertAlmostEqual(all_features[14], 0.0)
'''
0 - 'diff_periods',
1 - 'diff_commas',
2 - 'diffe_colons',
3 - 'diff_semicolons',
4 - 'diff_questions',
5 - 'diff_exclamations',
6 - 'diff_periods_weighted',
7 - 'diff_commas_weighted',
8 - 'diffe_colons_weighted',
9 - 'diff_semicolons_weighted',
10 - 'diff_questions_weighted',
11 - 'diff_exclamations_weighted',
12 - 'percentage_punct_source',
13 - 'percentage_punct_target',
14 - 'diff_punct'
'''
obj_no_src = {'target': ['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source': ['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token': ['3', 'dogs', 'a11i', 'o8', 'www1'],
'index': (3, 8),
'source_token': [],
'source_index': []}
all_features = self.extractor.get_features(obj_no_src)
self.assertEqual(all_features[0], 0)
self.assertEqual(all_features[1], 0)
self.assertEqual(all_features[2], 0)
self.assertEqual(all_features[3], 0)
self.assertEqual(all_features[4], 0)
self.assertEqual(all_features[5], 0)
self.assertAlmostEqual(all_features[6], 0.0)
self.assertAlmostEqual(all_features[7], 0.0)
self.assertAlmostEqual(all_features[8], 0.0)
self.assertAlmostEqual(all_features[9], 0.0)
self.assertAlmostEqual(all_features[10], 0.0)
self.assertAlmostEqual(all_features[11], 0.0)
self.assertAlmostEqual(all_features[12], 0.0)
self.assertAlmostEqual(all_features[13], 0.0)
self.assertAlmostEqual(all_features[14], 0.0)
if __name__ == '__main__':
unittest.main()
| 3,568 | 42.52439 | 147 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_oov_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.oov_feature_extractor import OOVFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class OOVFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = OOVFeatureExtractor('test_data/corpus.en')
def test_oov(self):
obj = {'source': ['Edward', 'is', 'a', 'friend', 'of', 'mine'], 'target': ['Edward', 'est', 'mon', 'ami'], 'index': (0, 4), 'source_index': (0, 2), 'token': ['Edward', 'est'], 'source_token': ['Edward', 'is', 'a', 'friend']}
self.assertEqual(self.extractor.get_features(obj)[0], 1)
obj2 = {'source': ['he', 'is', 'a', 'friend', 'of', 'mine'], 'target': ['Il', 'est', 'mon', 'ami'], 'index': (0, 4), 'source_index': (0, 2), 'token': ['Il', 'est'], 'source_token': ['he', 'is', 'a', 'friend']}
self.assertEqual(self.extractor.get_features(obj2)[0], 0)
if __name__ == '__main__':
unittest.main()
| 1,044 | 46.5 | 232 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_phrase_alignment_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.phrase_alignment_feature_extractor import PhraseAlignmentFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class PhraseAlignmentFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = PhraseAlignmentFeatureExtractor('/export/data/varvara/marmot/marmot/experiment/test_data/europarl_align_model')
def test_get_features(self):
obj = {'source': ['a', 'boy', 'hits', 'the', 'dog'],
'target': ['uno', 'nino', 'abati', 'el', 'perro'],
'alignments': [[], [0, 1], [2, 3], [3], [4]],
'token': ['uno', 'nino', 'abati'],
'index': (0, 3),
'source_token': ['a', 'boy', 'hits'],
'source_index': (0, 3)}
(n_unaligned, n_multi, align_num) = self.extractor.get_features(obj)
self.assertAlmostEqual(n_unaligned, 0.333333333)
self.assertAlmostEqual(n_multi, 0.666666666)
self.assertAlmostEqual(align_num, 1.33333333)
if __name__ == '__main__':
unittest.main()
| 1,182 | 37.16129 | 136 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_context_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.context_feature_extractor import ContextFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class ContextFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = ContextFeatureExtractor()
def test_get_features(self):
obj = {'source': ['Edward', 'is', 'a', 'friend', 'of', 'mine'], 'target': ['Edward', 'est', 'mon', 'ami'], 'index': (0, 2), 'source_index': (0, 4), 'token': ['Edward', 'est'], 'source_token': ['Edward', 'is', 'a', 'friend']}
contexts = self.extractor.get_features(obj)
self.assertEqual(contexts[0], "<s>")
self.assertEqual(contexts[1], "of")
self.assertEqual(contexts[2], "<s>")
self.assertEqual(contexts[3], "mon")
if __name__ == '__main__':
unittest.main()
| 931 | 36.28 | 232 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_ne_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.ne_feature_extractor import NEFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class AlphaNumericFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = NEFeatureExtractor()
def test_get_features(self):
obj = {'target': ['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source': ['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token': ['3', 'dogs', 'Tinky', 'and', 'Winky'],
'index': (3, 8),
'source_token': ['3', 'ou', 'Tinky', 'et', 'Winky'],
'source_index': (3, 8)}
(src_ne, tg_ne) = self.extractor.get_features(obj)
self.assertEqual(src_ne, 1)
self.assertEqual(tg_ne, 1)
def test_get_features_no_src(self):
obj_no_src = {'target': ['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source': ['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token': ['3', 'dogs', 'Tinky', 'and', 'Winky'],
'index': (3, 8),
'source_token': [],
'source_index': []}
(src_ne, tg_ne) = self.extractor.get_features(obj_no_src)
self.assertEqual(src_ne, 0)
self.assertEqual(tg_ne, 1)
def test_get_features_no_NE(self):
obj_no_src = {'target': ['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source': ['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token': ['3', 'dogs', 'aDa', 'and', 'ooO'],
'index': (3, 8),
'source_token': [],
'source_index': []}
(src_ne, tg_ne) = self.extractor.get_features(obj_no_src)
self.assertEqual(src_ne, 0)
self.assertEqual(tg_ne, 0)
if __name__ == '__main__':
unittest.main()
| 1,987 | 37.230769 | 109 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_token_count_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.phrase.token_count_feature_extractor import TokenCountFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class TokenCountFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = TokenCountFeatureExtractor()
def test_get_features(self):
obj = {'target':['a', 'boy', 'hits', 'a', 'dog'],
'source':['un', 'garcon', 'bate', 'un', 'chien'],
'token':['a', 'boy'],
'index': (0, 2),
'source_token': ['un', 'garcon'],
'source_index':(0, 2)}
(tg_ph_len, src_ph_len, src_tg_ratio, tg_src_ratio, tg_token_len, src_token_len, token_occ) = self.extractor.get_features(obj)
self.assertEqual(tg_ph_len, 2)
self.assertEqual(src_ph_len, 2)
self.assertEqual(src_tg_ratio, 1)
self.assertEqual(tg_src_ratio, 1)
self.assertEqual(tg_token_len, 2)
self.assertEqual(src_token_len, 4)
self.assertAlmostEqual(token_occ, 1.5)
def test_get_features_no_src(self):
obj = {'target':['a', 'boy', 'hits', 'a', 'dog'],
'source':['un', 'garcon', 'bate', 'un', 'chien'],
'token':['a', 'boy'],
'index': (0, 2),
'source_token': [],
'source_index':()}
(tg_ph_len, src_ph_len, src_tg_ratio, tg_src_ratio, tg_token_len, src_token_len, token_occ) = self.extractor.get_features(obj)
self.assertEqual(tg_ph_len, 2)
self.assertEqual(src_ph_len, 0)
self.assertEqual(src_tg_ratio, 0)
self.assertEqual(tg_src_ratio, 0)
self.assertEqual(tg_token_len, 2)
self.assertEqual(src_token_len, 0)
self.assertAlmostEqual(token_occ, 1.5)
if __name__ == '__main__':
unittest.main()
| 1,945 | 37.92 | 134 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_ngram_frequencies_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.phrase.ngram_frequencies_feature_extractor import NgramFrequenciesFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class NgramFrequenciesFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = NgramFrequenciesFeatureExtractor(os.getcwd(), ngram_count_file='test_data/ngram_counts')
def test_extractor_creation(self):
extractor = NgramFrequenciesFeatureExtractor(os.getcwd(), corpus='test_data/corpus.en')
self.assertEqual(extractor.ngrams[1]['must'], 783)
self.assertEqual(extractor.ngrams[2]['drop of'], 2)
self.assertEqual(extractor.ngrams[3]['is something that'], 11)
def test_get_features(self):
context = {'token': ['eso', 'es', 'naturally', 'unacceptable', 'ggg'], 'source_token': ['naturally', 'unacceptable', 'thing', 'is']}
features = self.extractor.get_features(context)
self.assertAlmostEqual(features[-3], 1.0)
self.assertAlmostEqual(features[-2], 0.5)
self.assertAlmostEqual(features[-1], 0.0)
if __name__ == '__main__':
unittest.main()
| 1,245 | 39.193548 | 140 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_alphanumeric_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.phrase.alphanumeric_feature_extractor import AlphaNumericFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class AlphaNumericFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = AlphaNumericFeatureExtractor()
def test_get_features(self):
obj = {'target':['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source':['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token':['3', 'dogs', 'a11i', 'o8', 'www1'],
'index': (3, 8),
'source_token': ['3', 'ou', '4', 'chiens', 'b2b'],
'source_index':(3, 8)}
(src_num, tg_num, num_ratio, src_alnum, tg_alnum, alnum_ratio) = self.extractor.get_features(obj)
self.assertAlmostEqual(src_num, 0.4)
self.assertAlmostEqual(tg_num, 0.2)
self.assertAlmostEqual(num_ratio, 0.2)
self.assertAlmostEqual(src_alnum, 0.2)
self.assertAlmostEqual(tg_alnum, 0.6)
self.assertAlmostEqual(alnum_ratio, 0.4)
def test_get_features_no_src(self):
obj_no_src = {'target':['a', 'boy', 'hits', '3', 'dogs', 'a11i', 'o8', 'www1'],
'source':['un', 'garcon', 'bate', '3', 'ou', '4', 'chiens', 'b2b'],
'token':['3', 'dogs', 'a11i', 'o8', 'www1'],
'index': (3, 8),
'source_token': [],
'source_index':[]}
(src_num, tg_num, num_ratio, src_alnum, tg_alnum, alnum_ratio) = self.extractor.get_features(obj_no_src)
self.assertAlmostEqual(src_num, 0)
self.assertAlmostEqual(tg_num, 0.2)
self.assertAlmostEqual(num_ratio, 0.2)
self.assertAlmostEqual(src_alnum, 0)
self.assertAlmostEqual(tg_alnum, 0.6)
self.assertAlmostEqual(alnum_ratio, 0.6)
if __name__ == '__main__':
unittest.main()
| 2,018 | 38.588235 | 112 | py |
marmot | marmot-master/marmot/features/phrase/tests/test_pos_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.phrase.pos_feature_extractor import POSFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class POSFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.extractor = POSFeatureExtractor('english', 'spanish')
def test_get_features(self):
obj = {'source': ['a', 'boy', 'hits', 'the', 'small', 'dog', 'severely'],
'target': ['uno', 'nino', 'abati', 'el', 'perro'],
'alignments': [[], [0, 1], [2, 3], [3], [4]],
'target_pos': ['ART', 'NC', 'VLfin', 'ART', 'NC'],
'source_pos': ['DT', 'NN', 'VBZ', 'DT', 'JJ', 'NN', 'RB'],
'token': ['uno', 'perro'],
'index': (3, 5),
'source_token': ['the', 'small', 'dog', 'severely'],
'source_index': (3, 7)}
'''
0 - 'percentage_content_words_src',
1 - 'percentage_content_words_tg',
2 - 'percentage_verbs_src',
3 - 'percentage_verbs_tg',
4 - 'percentage_nouns_src',
5 - 'percentage_nouns_tg',
6 - 'percentage_pronouns_src',
7 - 'percentage_pronouns_tg',
8 - 'ratio_content_words_src_tg',
9 - 'ratio_verbs_src_tg',
10 - 'ratio_nouns_src_tg',
11 - 'ratio_pronouns_src_tg'
'''
all_pos = self.extractor.get_features(obj)
self.assertAlmostEqual(all_pos[0], 0.75)
self.assertAlmostEqual(all_pos[1], 0.5)
self.assertAlmostEqual(all_pos[2], 0.0)
self.assertAlmostEqual(all_pos[3], 0.0)
self.assertAlmostEqual(all_pos[4], 0.25)
self.assertAlmostEqual(all_pos[5], 0.5)
self.assertAlmostEqual(all_pos[6], 0.0)
self.assertAlmostEqual(all_pos[7], 0.0)
self.assertAlmostEqual(all_pos[8], 1.5)
self.assertAlmostEqual(all_pos[9], 1.0)
self.assertAlmostEqual(all_pos[10], 0.5)
self.assertAlmostEqual(all_pos[11], 1.0)
if __name__ == '__main__':
unittest.main()
| 2,111 | 36.714286 | 109 | py |
marmot | marmot-master/marmot/features/tests/test_lm_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import shutil
from marmot.features.lm_feature_extractor import LMFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class LMFeatureExtractorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(os.path.realpath(__file__))
self.module_path = module_path
self.tmp_dir = os.path.join(module_path, 'tmp_dir')
self.lm3Extractor = LMFeatureExtractor(corpus_file=os.path.join(module_path, 'test_data/training.txt'), srilm=os.environ['SRILM'], tmp_dir=self.tmp_dir)
# self.lm5Extractor = LMFeatureExtractor(corpus_file=os.path.join(module_path, 'test_data/training.txt'), srilm=os.environ['SRILM'], tmp_dir=self.tmp_dir, order=5)
self.lm5Extractor = LMFeatureExtractor(ngram_file=os.path.join(module_path, 'test_data/training.ngram'), srilm=os.environ['SRILM'], tmp_dir=self.tmp_dir, order=5)
def test_get_features(self):
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
(left3, right3, back_l, back_m, back_r) = self.lm3Extractor.get_features({'token': 'for', 'index': 6, 'source': [u'c',u'\'',u'est',u'un',u'garçon'], 'target': [u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G'})
(left5, right5, back_l, back_m, back_r) = self.lm5Extractor.get_features({'token':'for', 'index':6, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G'})
self.assertEqual(left3, 3)
self.assertEqual(right3, 2)
self.assertEqual(left5, 5)
self.assertEqual(right5, 2)
pass
def test_backoff(self):
context_obj = {'token':'more', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'It', u'becomes', u'more', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G'}
(left3, right3, back_l, back_m, back_r) = self.lm3Extractor.get_features(context_obj)
self.assertAlmostEqual(back_l, 1.0)
self.assertAlmostEqual(back_m, 0.4)
self.assertAlmostEqual(back_r, 0.6)
context_obj = {'token':'telescope', 'index':6, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'One', u'of', u'the', u'tasks', u'to', u'the', u'telescope', u'China', u'GAGARIN'], 'tag':'G'}
(left3, right3, back_l, back_m, back_r) = self.lm3Extractor.get_features(context_obj)
self.assertAlmostEqual(back_l, 0.8)
self.assertAlmostEqual(back_m, 0.4)
self.assertAlmostEqual(back_r, 0.1)
context_obj = {'token':'UUUU', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'OOOOO', u'AAAAA', u'UUUU', u'China', u'telescope'], 'tag':'G'}
(left3, right3, back_l, back_m, back_r) = self.lm3Extractor.get_features(context_obj)
self.assertAlmostEqual(back_l, 0.1)
self.assertAlmostEqual(back_m, 0.2)
self.assertAlmostEqual(back_r, 0.3)
def test_start_end(self):
(left3, right3, back_l, back_m, back_r) = self.lm3Extractor.get_features({'token':'short', 'index':0, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'short', u'sentence'], 'tag':'G'})
self.assertAlmostEqual(back_l, 0.3)
self.assertAlmostEqual(back_m, 0.3)
self.assertAlmostEqual(back_r, 0.3)
# def tearDown(self):
# shutil.rmtree(self.tmp_dir, ignore_errors=True)
if __name__ == '__main__':
unittest.main()
| 3,757 | 60.606557 | 314 | py |
marmot | marmot-master/marmot/features/tests/test_google_translate_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.google_translate_feature_extractor import GoogleTranslateFeatureExtractor
class GoogleTranslateFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.gs_extractor_en = GoogleTranslateFeatureExtractor(lang='en')
self.gs_extractor_fr = GoogleTranslateFeatureExtractor(lang='fr')
def test_get_features(self):
gt1 = self.gs_extractor_en.get_features({'token':'short', 'index':3, 'source':[u'c',u'\'',u'est',u'une',u'courte', u'phrase'], 'target':[u'this', u'is', u'a', u'short', u'sentence', u'.'], 'tag':'G'})
self.assertEqual([1], gt1)
gt2 = self.gs_extractor_en.get_features({'token':'little', 'index':3, 'source':[u'c',u'\'',u'est',u'une',u'courte', u'phrase'], 'target':[u'this', u'is', u'a', u'little', u'sentence', u'.'], 'tag':'G'})
self.assertEqual([0], gt2)
def test_no_source(self):
gt = self.gs_extractor_en.get_features({'token':'short', 'index':3, 'target':[u'this', u'is', u'a', u'short', u'sentence', u'.'], 'tag':'G'})
self.assertEqual([], gt)
if __name__ == '__main__':
unittest.main()
| 1,186 | 41.392857 | 210 | py |
marmot | marmot-master/marmot/features/tests/test_source_lm_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.source_lm_feature_extractor import SourceLMFeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
class LMFeatureExtractorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(os.path.realpath(__file__))
self.module_path = module_path
self.lm3Extractor = SourceLMFeatureExtractor(corpus_file=os.path.join(module_path, 'test_data/training.txt'))
self.lm5Extractor = SourceLMFeatureExtractor(corpus_file=os.path.join(module_path, 'test_data/training.txt'), order=5)
def test_get_features(self):
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
(left3, right3) = self.lm3Extractor.get_features({'token': 'est', 'index': 2, 'target': [u'c',u'\'',u'est',u'un',u'garçon'], 'source': [u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G', 'alignments': [None, None, 6, None, None]})
(left5, right5) = self.lm5Extractor.get_features({'token':'est', 'index':2, 'target':[u'c',u'\'',u'est',u'un',u'garçon'], 'source':[u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G', 'alignments': [None, None, 6, None, None]})
self.assertEqual(left3, 3)
self.assertEqual(right3, 2)
self.assertEqual(left5, 5)
self.assertEqual(right5, 2)
# TODO: if source or alignment don't exist, an error should be thrown
def test_no_source(self):
with self.assertRaises(NoDataError):
features = self.lm3Extractor.get_features({'token': 'est', 'index': 2, 'target': [u'c',u'\'',u'est',u'un',u'garçon'], 'tag':'G'})
def test_no_alignments(self):
with self.assertRaises(NoDataError):
features = self.lm3Extractor.get_features({'token': 'est', 'index': 2, 'target': [u'c',u'\'',u'est',u'un',u'garçon'], 'source': [u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G'})
def test_unaligned(self):
left_ngram, right_ngram = self.lm3Extractor.get_features({'token': 'est', 'index': 2, 'target': [u'c',u'\'',u'est',u'un',u'garçon'], 'source': [u'this', u'is', u'a', u'boy'], 'alignments':[0, 1, None, 3, 4], 'tag':'G'})
self.assertEqual(left_ngram, 0)
self.assertEqual(right_ngram, 0)
def test_multi_alignment(self):
(left3, right3) = self.lm3Extractor.get_features({'token': 'est', 'index': 2, 'target': [u'c',u'\'',u'est',u'un',u'garçon'], 'source': [u'It', u'becomes', u'more', u'and', u'more', u'difficult', u'for', u'us', u'to', u'protect', u'her', u'brands', u'in', u'China', '.'], 'tag':'G', 'alignments': [None, None, 6, None, None]})
self.assertEqual(left3, 3)
self.assertEqual(right3, 2)
if __name__ == '__main__':
unittest.main()
| 3,075 | 61.77551 | 333 | py |
marmot | marmot-master/marmot/features/tests/test_target_token_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from marmot.features.target_token_feature_extractor import TargetTokenFeatureExtractor
class AlignmentFeatureExtractorTests(unittest.TestCase):
def test_get_features(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[3],[2],[4]]}
extractor = TargetTokenFeatureExtractor()
[token, left, right] = extractor.get_features(obj)
self.assertEqual(token, u'hits')
self.assertEqual(left, u'boy')
self.assertEqual(right, u'a')
def test_get_features_two_words(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[3],[2],[4]]}
extractor = TargetTokenFeatureExtractor(context_size=2)
[token, left, right] = extractor.get_features(obj)
self.assertEqual(token, u'hits')
self.assertEqual(left, u'a boy')
self.assertEqual(right, u'a dog')
def test_first_el(self):
obj = {'token': u'a', 'index': 0, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[3],[2],[4]]}
extractor = TargetTokenFeatureExtractor(context_size=2)
[token, left, right] = extractor.get_features(obj)
self.assertEqual(token, u'a')
self.assertEqual(left, u'_START_ _START_')
self.assertEqual(right, u'boy hits')
if __name__ == '__main__':
unittest.main()
| 1,912 | 50.702703 | 273 | py |
marmot | marmot-master/marmot/features/tests/test_dictionary_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.dictionary_feature_extractor import DictionaryFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class TokenCountFeatureExtractorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(os.path.realpath(__file__))
self.module_path = module_path
self.spec_list = DictionaryFeatureExtractor(language='english')
self.custom_list = DictionaryFeatureExtractor(punctuation=',.:;()', stopwords=['Sam'])
def test_get_features(self):
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
#( is stopword, is punctuation, is proper name, is digit )
(s, p, pr, d) = self.spec_list.get_features( {'token':'a', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'this',u'is',u'a',u'boy',u'.'], 'tag':'G'} )
self.assertEqual(s, 1)
self.assertEqual(p, 0)
self.assertEqual(pr, 0)
self.assertEqual(d, 0)
(s, p, pr, d) = self.custom_list.get_features( {'token':'Sam', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'this',u'is',u'Sam',u'!'], 'tag':'G'} )
self.assertEqual(pr, 1)
self.assertEqual(s, 1)
(s, p, pr, d) = self.custom_list.get_features( {'token':'!', 'index':3, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'this',u'is',u'Sam',u'!'], 'tag':'G'} )
self.assertEqual(p, 0)
(s, p, pr, d) = self.spec_list.get_features( {'token':'33', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'this',u'is',u'33',u'!'], 'tag':'G'} )
self.assertEqual(d, 1)
if __name__ == '__main__':
unittest.main()
| 1,844 | 46.307692 | 179 | py |
marmot | marmot-master/marmot/features/tests/test_ngram_feature_extractor.py | #!/usr/bin/env python
#encoding: utf-8
'''
@author: Chris Hokamp
@contact: [email protected]
'''
from nltk.tokenize import word_tokenize
import unittest
from marmot.util import ngram_window_extractor
class TestNgramFeatureExtractor(unittest.TestCase):
def test_extract_window(self):
sen_str = 'this is a test sentence.'
sen = word_tokenize(sen_str.lower())
test_token = 'this'
window = ngram_window_extractor.extract_window(sen, test_token)
self.assertListEqual(window, ['_START_', 'this', 'is'], 'A window starting with the first token should be correct')
sen2_str = 'this is a test sentence.'
sen2 = word_tokenize(sen2_str.lower())
test_token2 = 'is'
window2 = ngram_window_extractor.extract_window(sen2, test_token2)
self.assertListEqual(window2, ['this', 'is', 'a'], 'A window starting with the second token should be correct')
def test_left_context(self):
sen_str = 'this is a test sentence.'
sen = word_tokenize(sen_str.lower())
test_token = 'is'
left_context = ngram_window_extractor.left_context(sen, test_token, context_size=3)
self.assertListEqual(left_context, ['_START_', '_START_', 'this'], 'left_context should prepend _START_ tokens')
def test_right_context(self):
sen_str = 'this is a test sentence.'
sen = word_tokenize(sen_str.lower())
test_token = 'sentence'
right_context = ngram_window_extractor.right_context(sen, test_token, context_size=3)
self.assertListEqual(right_context, ['.', '_END_', '_END_'], 'right_context should append _END_ tokens')
if __name__ == '__main__':
unittest.main()
| 1,703 | 36.043478 | 123 | py |
marmot | marmot-master/marmot/features/tests/test_wordnet_feature_extractor.py | import unittest
from marmot.features.wordnet_feature_extractor import WordnetFeatureExtractor
class WordnetFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.wordnet_extractor = WordnetFeatureExtractor(src_lang='fre', tg_lang='en')
# self.wordnet_extractor_fr = WordnetFeatureExtractor()
def test_get_features(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN'], 'alignments':[[0],[1],[3],[2],[4]]}
wn_src, wn_tg = self.wordnet_extractor.get_features(obj)
self.assertEqual(wn_src, 9)
self.assertEqual(wn_tg, 24)
def test_no_source(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN']}
wn_src, wn_tg = self.wordnet_extractor.get_features(obj)
self.assertEqual(wn_src, 0)
self.assertEqual(wn_tg, 24)
def test_no_alignment(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN']}
wn_src, wn_tg = self.wordnet_extractor.get_features(obj)
self.assertEqual(wn_src, 0)
self.assertEqual(wn_tg, 24)
def test_multi_alignment(self):
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN'], 'alignments':[[0],[1],[3, 4],[2],[4]]}
wn_src, wn_tg = self.wordnet_extractor.get_features(obj)
self.assertEqual(wn_src, 9)
self.assertEqual(wn_tg, 24)
# def test_no_pos_fr(self):
if __name__ == '__main__':
unittest.main()
| 1,987 | 48.7 | 269 | py |
marmot | marmot-master/marmot/features/tests/test_token_count_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
from marmot.features.token_count_feature_extractor import TokenCountFeatureExtractor
# test a class which extracts source and target token count features, and the source/target token count ratio
class TokenCountFeatureExtractorTests(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
self.tokenCounter = TokenCountFeatureExtractor()
def test_get_features(self):
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
vector = self.tokenCounter.get_features( {'token':'a', 'index':2, 'source':[u'c',u'\'',u'est',u'un',u'garçon'], 'target':[u'this',u'is',u'a',u'boy',u'.'], 'tag':'G'})
# the tokenCounter outputs three features
self.assertEqual(len(vector), 3)
self.assertEqual(vector[0], 5.0)
self.assertEqual(vector[1], 5.0)
self.assertEqual(vector[2], 1.0)
if __name__ == '__main__':
unittest.main() | 1,074 | 37.392857 | 174 | py |
marmot | marmot-master/marmot/features/tests/__init__.py | __author__ = 'chris'
| 21 | 10 | 20 | py |
marmot | marmot-master/marmot/features/tests/test_alignment_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import glob
import unittest
from marmot.features.alignment_feature_extractor import AlignmentFeatureExtractor
class AlignmentFeatureExtractorTests(unittest.TestCase):
def setUp(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.src_name = os.path.join(self.module_path, '../../preprocessing/tests/test_data/corpus.de.1000')
self.tg_name = os.path.join(self.module_path, '../../preprocessing/tests/test_data/corpus.en.1000')
self.aligner_no_model = AlignmentFeatureExtractor()
self.aligner_no_model_2 = AlignmentFeatureExtractor(context_size=2)
def test_alignment_in_obj(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[3],[2],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'un')
self.assertEqual(left, u'frappe')
self.assertEqual(right, u'chien')
(cont_word, left, right) = self.aligner_no_model_2.get_features(obj)
self.assertEqual(left, u'garcon|frappe')
self.assertEqual(right, u'chien|_END_')
def test_alignment_on_the_fly(self):
obj = {'token': u'boy', 'index': 1, 'source': [u'ein', u'junge', u'schlägt', u'einen', u'Hund'], 'target': [u'a', u'boy', u'hits', u'a', u'dog']}
aligner_corpus = AlignmentFeatureExtractor(src_file=self.src_name, tg_file=self.tg_name)
(cont_word, left, right) = aligner_corpus.get_features(obj)
self.assertTrue('alignments' in obj)
self.assertEqual(cont_word, u'junge')
for a_file in glob.glob('align_model.*'):
os.remove(a_file)
for a_file in glob.glob(os.path.basename(self.src_name)+'_'+os.path.basename(self.tg_name)+'*'):
os.remove(a_file)
def test_align_model_in_extractor(self):
obj = {'token': u'boy', 'index': 1, 'source': [u'ein', u'junge', u'schlägt', u'einen', u'Hund'], 'target': [u'a', u'boy', u'hits', u'a', u'dog']}
aligner_model = AlignmentFeatureExtractor(align_model=os.path.join(self.module_path, 'test_data/alignments/align_model'))
(cont_word, left, right) = aligner_model.get_features(obj)
self.assertTrue('alignments' in obj)
self.assertEqual(cont_word, u'junge')
def test_unaligned(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[],[2],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'__unaligned__')
self.assertEqual(left, u'__unaligned__')
self.assertEqual(right, u'__unaligned__')
def test_align_two_adjacent(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[1, 2],[3],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'garcon|frappe')
self.assertEqual(left, u'un')
self.assertEqual(right, 'un')
def test_align_two_gap(self):
obj = {'token': u'hits', 'index': 2, 'target': [u'a',u'boy',u'hits',u'a',u'dog'], 'source': [u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'source_pos': ['DT','NN','VBZ', 'DT', 'NN'], 'alignments': [[0],[1],[2, 4],[3],[4]]}
(cont_word, left, right) = self.aligner_no_model.get_features(obj)
self.assertEqual(cont_word, u'frappe|chien')
self.assertEqual(left, u'garcon')
self.assertEqual(right, u'_END_')
if __name__ == '__main__':
unittest.main()
| 4,139 | 56.5 | 276 | py |
marmot | marmot-master/marmot/features/tests/test_pos_feature_extractor.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import unittest
import StringIO
from marmot.features.pos_feature_extractor import POSFeatureExtractor
class POSFeatureExtractorTests(unittest.TestCase):
# check: POS rerpresentation in context_obj
# no POS representation
def setUp(self):
tagger_root = os.environ['TREE_TAGGER'] if os.environ.has_key('TREE_TAGGER') else ''
if tagger_root == '':
sys.stderr('TREE_TAGGER environment variable should be defined so that $TREE_TAGGER/bin/tree-tagger exists\n')
sys.exit(2)
self.tagger = tagger_root+'/bin/tree-tagger'
self.par_src = tagger_root+'/lib/english-utf8.par'
module_path = os.path.dirname(os.path.realpath(__file__))
self.par_tg = os.path.join(module_path, 'test_data/spanish-par-linux-3.2-utf8.bin')
self.extractor_pos = POSFeatureExtractor( tagger=self.tagger, par_file_src=self.par_src, par_file_tg=self.par_tg )
self.extractor_no_pos = POSFeatureExtractor()
def test_pos_in_obj(self):
obj = {'token':u'a', 'index':0, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN'], 'alignments':[[0],[1],[2],[3],[4]]}
obj_no_align = {'token':u'a', 'index':0, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target_pos':['DT','NN','VBZ', 'DT', 'NN'], 'source_pos':['DT','NN','VBZ', 'DT', 'NN']}
(t1, s1) = self.extractor_no_pos.get_features(obj)
(t2, s2) = self.extractor_no_pos.get_features(obj_no_align)
self.assertEqual(t1, u'DT')
self.assertEqual(s1, ['DT'])
self.assertEqual(t2, 'DT')
self.assertEqual(s2, [])
def test_tag_on_the_fly(self):
# tagging on the fly, adding tagging to the object
obj = {'token':u'niño', 'index':1, 'source':[u'a',u'boy',u'hits',u'a',u'dog'], 'target':[ u'un', u'niño', u'vapulea', u'un', u'perro'], 'alignments':[[0],[1],[2],[3],[4]]}
(t1, s1) = self.extractor_pos.get_features(obj)
self.assertEqual(t1, 'NC')
self.assertEqual(s1, [u'NN'])
self.assertTrue(obj.has_key('target_pos'))
self.assertTrue(obj.has_key('source_pos'))
def test_no_tagger(self):
# no information for tagging
err = StringIO.StringIO()
sys.stderr = err
obj2 = {'token':u'niño', 'index':1, 'source':[u'a',u'boy',u'hits',u'a',u'dog'], 'target':[ u'un', u'niño', u'vapulea', u'un', u'perro'], 'alignments':[[0],[1],[2],[3],[4]]}
(t2, s2) = self.extractor_no_pos.get_features(obj2)
self.assertEqual( err.getvalue(), 'Tagging script and parameter file should be provided\nTagging script and parameter file should be provided\n' )
err.close()
self.assertEqual(t2, u'')
self.assertEqual(s2, [])
def test_only_target_tagging(self):
# no alignments
obj = {'token':u'niño', 'index':1, 'source':[u'a',u'boy',u'hits',u'a',u'dog'], 'target':[ u'un', u'niño', u'vapulea', u'un', u'perro']}
(t1, s1) = self.extractor_pos.get_features(obj)
self.assertEqual(t1, 'NC')
self.assertEqual(s1, [])
if __name__ == '__main__':
unittest.main()
| 3,326 | 47.217391 | 263 | py |
marmot | marmot-master/marmot/experiment/preprocessing_utils.py | from __future__ import print_function
import os
import copy
import multiprocessing as multi
import logging
import numpy as np
from collections import defaultdict
from sklearn.preprocessing.label import LabelBinarizer, MultiLabelBinarizer
import ipdb
from marmot.util.simple_corpus import SimpleCorpus
from marmot.experiment.import_utils import list_of_lists
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
# convert alignments from list of strings 'i-j'
# to list of lists such that new_align[j] = i
def convert_alignments(align_list, n_words):
new_align = [[] for i in range(n_words)]
for pair in align_list:
two_digits = pair.split('-')
new_align[int(two_digits[1])].append(int(two_digits[0]))
return new_align
# TODO: this function adds keys to the context object, but maybe the user wants different keys
# TODO: the function should be agnostic about which keys it adds -- why does it care?
# there is a difference between 'sequence fields' and 'token fields'
# this method creates a context for each token
def create_context(repr_dict, sentence_id=None):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
# if 'alignments' in repr_dict:
# repr_dict['alignments'] = convert_alignments(repr_dict['alignments'], len(repr_dict['target']))
active_keys = repr_dict.keys()
active_keys.remove('tags')
#if 'alignments_file':
# print("OGOGO!!! ALIGNMENTS REMOVED")
# active_keys.remove('alignments_file')
for idx, word in enumerate(repr_dict['target']):
c = {}
c['token'] = word
c['index'] = idx
if sentence_id is not None:
c['sentence_id'] = sentence_id
if type(repr_dict['tags']) == list or type(repr_dict['tags']) == np.ndarray:
c['tag'] = repr_dict['tags'][idx]
c['sequence_tags'] = repr_dict['tags']
elif type(repr_dict['tags']) == int:
c['tag'] = repr_dict['tags'][idx]
else:
print("Unknown type of tags representation:", type(repr_dict['tags']))
return []
for k in active_keys:
#print("Current key: ", k)
c[k] = repr_dict[k]
context_list.append(c)
return context_list
# create context objects from a data_obj -
# - a dictionary with representation labels as keys ('target', 'source', etc.) and
# representations (lists of lists) as values
# output: if data_type = 'plain', one list of context objects is returned
# if data_type = 'sequential', a list of lists of context objects is returned (list of sequences)
# if data_type = 'token', a dict {token: <list_of_contexts>} is returned
# TODO: this function requires the 'target' and 'tag' keys, but the user may wish to specify other keys
# TODO: 'target' and 'tag' don't make sense for every task
def create_contexts(data_obj, data_type='plain'):
'''
:param data_obj: an object representing a dataset consisting of files
:param data_type:
:return:
'''
contexts = []
if 'target' not in data_obj:
print("No 'target' label in data representations")
return []
if 'tags' not in data_obj:
print("No 'tag' label in data representations or wrong format of tag")
return []
for s_idx, sents in enumerate(zip(*data_obj.values())):
if data_type == 'sequential':
contexts.append(create_context({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, sentence_id=s_idx))
else:
contexts.extend(create_context({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, sentence_id=s_idx))
# TODO: there is an error here
if data_type == 'token':
new_contexts = defaultdict(list)
for cont in contexts:
new_contexts[cont['token']].append(cont)
contexts = copy.deepcopy(new_contexts)
return contexts
# convert list of lists into a flat list
# TODO: there is an error where no code runs here
def flatten(lofl):
if list_of_lists(lofl):
return [item for sublist in lofl for item in sublist]
elif type(lofl) == dict:
return lofl.values()
def map_feature_extractor((context, extractor)):
return extractor.get_features(context)
# feature extraction for categorical features with conversion to one-hot representation
# this implementation is for a list representation
# this returns a list of lists, where each list contains the feature extractor results for a context
# the point of returning a list of lists is to allow binarization of the feature values
# TODO: we can binarize over the columns of the matrix instead of binarizing the results of each feature extractor
# TODO: is the output of the single worker and the multithreaded different? if so, change
def contexts_to_features(contexts, feature_extractors, workers=1):
# single thread
if workers == 1:
return [[x for a_list in [map_feature_extractor((context, extractor)) for extractor in feature_extractors] for x in a_list] for context in contexts]
# multiple threads
else:
# resulting object
res_list = []
pool = multi.Pool(workers)
logger.info('Multithreaded - Extracting the features for: ' + str(len(contexts)) + ' contexts...')
# each context is paired with all feature extractors
for extractor in feature_extractors:
context_list = [(cont, extractor) for cont in contexts]
features = pool.map(map_feature_extractor, context_list)
res_list.append(features)
# np.hstack and np.vstack can't be used because lists have objects of different types
intermediate = [[x[i] for x in res_list] for i in range(len(res_list[0]))]
res_list = [flatten(x) for x in intermediate]
pool.close()
pool.join()
return res_list
# extract tags from a list of contexts
def tags_from_contexts(contexts):
return [context['tag'] for context in contexts]
# train converters(binarizers) from categorical values to one-hot representation
# for all features
# all_values is a list of lists, because we need to look at the feature values for every instance to binarize properly
def fit_binarizers(all_values):
binarizers = {}
for f in range(len(all_values[0])):
cur_features = [context[f] for context in all_values]
# only categorical values need to be binarized, ints/floats are left as they are
if type(cur_features[0]) == str or type(cur_features[0]) == unicode:
lb = LabelBinarizer()
lb.fit(cur_features)
binarizers[f] = lb
elif type(cur_features[0]) == list:
mlb = MultiLabelBinarizer()
# default feature for unknown values
cur_features.append(tuple(("__unk__",)))
mlb.fit([tuple(x) for x in cur_features])
binarizers[f] = mlb
return binarizers
# convert categorical features to one-hot representations with pre-fitted binarizers
# TODO: this function implicitly converts the data into a numpy array
def binarize(features, binarizers):
assert(list_of_lists(features))
num_features = len(features[0])
if binarizers != {} and max(binarizers.keys()) >= num_features:
print("Binarizers keys max: ", max(binarizers.keys()))
print("Total feature number: ", num_features)
print("Features:", features[0])
assert(binarizers == {} or max(binarizers.keys()) < num_features)
binarized_cols = []
for i in range(num_features):
# get this column
cur_values = [f[i] for f in features]
# if there's a binarizer for this column
if i in binarizers:
binarizer = binarizers[i]
if type(binarizer) == LabelBinarizer:
binarized_cols.append(binarizer.transform(cur_values))
elif type(binarizer) == MultiLabelBinarizer:
assert(list_of_lists(cur_values))
# MultiLabelBinarizer doesn't support unknown values -- they need to be replaced with a default value
# we're going to use the empty list as the default value
cur_values_default = []
default_value = binarizer.classes_[-1]
for a_list in cur_values:
new_list = list(a_list)
for j, val in enumerate(new_list):
if val not in binarizer.classes_:
new_list[j] = default_value
cur_values_default.append(tuple(new_list))
transformed = binarizer.transform(cur_values_default)
binarized_cols.append(transformed)
else:
raise NotImplementedError('this function is not implemented for type: {}'.format(type(binarizer)))
else:
binarized_cols.append(np.array(cur_values).reshape(len(cur_values), 1))
assert (len(binarized_cols) == num_features), 'the number of columns after binarization must match the number of features'
new_features = np.hstack(binarized_cols)
return new_features
| 9,703 | 41.191304 | 156 | py |
marmot | marmot-master/marmot/experiment/extract_features_phrase.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import os
from marmot.experiment.import_utils import build_objects, build_object, mk_tmp_dir, call_for_each_element
from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
'''
Only feature extraction
Extract features and save in CRF++ format
'''
def main(config):
workers = config['workers']
tmp_dir = config['tmp_dir']
tmp_dir = mk_tmp_dir(tmp_dir)
# REPRESENTATION GENERATION
# main representations (source, target, tags)
dev, test = False, False
# training
if 'training' in config['datasets']:
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
# test
if 'test' in config['datasets']:
test = True
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
# dev
if 'dev' in config['datasets']:
dev = True
dev_data_generator = build_object(config['datasets']['dev'][0])
dev_data = dev_data_generator.generate()
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
if test:
test_data = r.generate(test_data)
if dev:
dev_data = r.generate(dev_data)
logger.info("Simple representations: {}".format(len(train_data['target'])))
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = 'sequential'
bad_tagging = config['bad_tagging']
tags_format = config['tags_format'] if 'tags_format' in config else 'word'
train_contexts = create_contexts_ngram(train_data, data_type=data_type, test=False, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=tags_format)
if test:
test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=tags_format)
if dev:
dev_contexts = create_contexts_ngram(dev_data, data_type=data_type, test=True, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=tags_format)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
if test:
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
if dev:
dev_tags = call_for_each_element(dev_contexts, tags_from_contexts, data_type=data_type)
# word-level tags and phrase lengths
if test:
test_phrase_lengths = [get_contexts_words_number(cont) for cont in test_contexts]
if dev:
dev_phrase_lengths = [get_contexts_words_number(cont) for cont in dev_contexts]
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
if test:
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
if dev:
logger.info('mapping the feature extractors over the contexts for dev...')
dev_features = call_for_each_element(dev_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
# persisting features
logger.info('training and test sets successfully generated')
experiment_datasets = [{'name': 'train', 'features': train_features, 'tags': train_tags, 'phrase_lengths': None}]
if test:
experiment_datasets.append({'name': 'test', 'features': test_features, 'tags': test_tags, 'phrase_lengths': test_phrase_lengths})
if dev:
experiment_datasets.append({'name': 'dev', 'features': dev_features, 'tags': dev_tags, 'phrase_lengths': dev_phrase_lengths})
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
persist_dir = config['persist_dir'] if 'persist_dir' in config else tmp_dir
persist_dir = mk_tmp_dir(persist_dir)
persist_format = config['persist_format']
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name'],
dataset_obj['features'],
persist_dir,
feature_names=feature_names,
phrase_lengths=dataset_obj['phrase_lengths'],
tags=dataset_obj['tags'],
file_format=persist_format)
# generate a template for CRF++ feature extractor
feature_num = len(feature_names)
if persist_format == 'crf++':
generate_crf_template(feature_num, 'template', persist_dir)
logger.info('Features persisted to: {}'.format(', '.join([os.path.join(persist_dir, nn) for nn in [obj['name'] for obj in experiment_datasets]])))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("--data_type", help="data type - sequential or plain")
parser.add_argument("--bad_tagging", help="tagging -- optimistic, pessimistic or super-pessimistic")
parser.add_argument("--unambiguous", default=0, help="make the tagging unambiguous -- no segmentation for spans of BAD tag (values - 0 or 1, default 0)")
parser.add_argument("--output_name", default="output", help="file to store the test set tagging")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
if args.data_type is not None:
experiment_config['data_type'] = args.data_type
if args.bad_tagging is not None:
experiment_config['bad_tagging'] = args.bad_tagging
experiment_config['unambiguous'] = True if int(args.unambiguous) == 1 else False
experiment_config['output_name'] = args.output_name
main(experiment_config)
| 7,750 | 46.552147 | 173 | py |
marmot | marmot-master/marmot/experiment/run_experiment_ngram_new.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import os
import sys
import time
from subprocess import call
from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class
from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features, flatten, fit_binarizers, binarize
from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number
from marmot.experiment.learning_utils import map_classifiers, predict_all
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
def label_test(flat_labels, new_test_name, text_file, method_name):
tag_map = {0: 'BAD', 1: 'OK'}
new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w')
new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w')
start_idx = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
for t_idx, (tag, word) in enumerate(zip(tag_seq, words)):
new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag))
# write both hypothesis and reference
def label_test_hyp_ref(flat_labels, flat_true_labels, new_test_name, text_file):
tag_map = {0: 'BAD', 1: 'OK'}
new_test = open(new_test_name, 'w')
new_test_plain = open(new_test_name+'.plain', 'w')
start = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start, start+len(words))]
true_tag_seq = [tag_map[flat_true_labels[i]] for i in range(start, start+len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
start += len(words)
for t_idx, (tag, true_tag, word) in enumerate(zip(tag_seq, true_tag_seq, words)):
new_test.write('%d\t%d\t%s\t%s\t%s\n' % (s_idx, t_idx, word.encode('utf-8'), true_tag, tag))
# check that everything in a data_obj matches:
# - all source and target sentences exist
# - alignments don't hit out of bounds
# - target tokens really exist and are in their places
def main(config, stamp):
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['data_type'] if 'data_type' in config else (config['contexts'] if 'contexts' in config else 'plain')
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
logger.info("data_type -- {}, bad_tagging -- {}".format(data_type, bad_tagging))
# time_stamp = str(time.time())
time_stamp = stamp
workers = config['workers']
tmp_dir = config['tmp_dir']
# one generator
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
borders = config['borders'] if 'borders' in config else False
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
# test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging)
test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging, tags_format=config['tags_format'])
print("Objects in the train data: {}".format(len(train_data['target'])))
print("UNAMBIGUOUS: ", config['unambiguous'])
# train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'])
train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=config['tags_format'])
#print("Train contexts: {}".format(len(train_contexts)))
#print("1st context:", train_contexts[0])
# the list of context objects' 'target' field lengths
# to restore the word-level tags from the phrase-level
#test_context_correspondence = get_contexts_words_number(test_contexts)
if data_type == 'sequential':
test_context_correspondence = flatten([get_contexts_words_number(cont) for cont in test_contexts])
#print(test_context_correspondence)
for idx, cont in enumerate(test_contexts):
get_cont = get_contexts_words_number(cont)
count_cont = [len(c['token']) for c in cont]
assert(all([get_cont[i] == count_cont[i] for i in range(len(cont))])), "Sum doesn't match at line {}:\n{}\n{}".format(idx, ' '.join([str(c) for c in get_cont]), ' '.join([str(c) for c in count_cont]))
assert(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont])), "Sums don't match: {} and {}".format(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont]))
else:
test_context_correspondence = get_contexts_words_number(test_contexts)
assert(sum(test_context_correspondence) == sum([len(c['token']) for c in test_contexts])), "Sums don't match: {} and {}".format(sum(test_context_correspondence), sum([len(c['token']) for c in test_contexts]))
# print("Token lengths:", sum([len(c['token']) for c in test_contexts]))
# assert(sum(test_context_correspondence) == 9613), "GOLAKTEKO OPASNOSTE!!!, {}".format(sum(test_context_correspondence))
# sys.exit()
# if data_type == 'sequential':
# test_context_correspondence = flatten(test_context_correspondence)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
test_tags_true = test_data['tags']
tag_idx = 0
seg_idx = 0
# test_context_correspondence_seq = [get_contexts_words_number(cont) for cont in test_contexts]
# for idx, (tag_seq, phr_seq) in enumerate(zip(test_data['tags'], test_context_correspondence_seq)):
# assert(len(tag_seq) == sum(phr_seq)),"Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# tag_idx = 0
# for d in phr_seq:
# first_tag = tag_seq[tag_idx]
# assert(all([t == first_tag for t in tag_seq[tag_idx:tag_idx+d]])), "Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# try:
# indicator = [t == first_tag for t in test_data['tags'][seg_idx][tag_idx:tag_idx+d]]
# assert(all(indicator))
# tags_cnt += d
# if tags_cnt == len(test_data['tags'][seg_idx]):
# tags_cnt = 0
# seg_idx += 1
# elif tags_cnt > len(test_data['tags'][seg_idx]):
# raise
# except:
# print("No correspondence in line {}, tag {}: \n{}\n{}".format(seg_idx, tag_idx, ' '.join(test_data['tags'][seg_idx]), d))
# sys.exit()
assert(sum(test_context_correspondence) == len(flatten(test_data['tags']))), "Sums don't match for phrase contexts and test data object: {} and {}".format(sum(test_context_correspondence), len(flatten(test_data['tags'])))
# flat_cont = flatten(test_contexts)
# flat_tags = flatten(test_data['tags'])
# for ii in range(len(flat_cont)):
if data_type == 'plain':
assert(len(test_context_correspondence) == len(test_tags)), "Lengths don't match for phrase contexts and test tags: {} and {}".format(len(test_context_correspondence), len(test_tags))
# test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential')
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.cross_validation import permutation_test_score
import numpy as np
tag_map = {u'OK': 1, u'BAD': 0}
if data_type == 'sequential':
# TODO: save features for CRFSuite, call it
logger.info('training sequential model...')
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
print("FEATURE NAMES: ", feature_names)
persist_dir = tmp_dir
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
if 'persist_format' not in config:
config['persist_format'] = 'crf_suite'
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format'])
feature_num = len(train_features[0][0])
train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf')
test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf')
if config['persist_format'] == 'crf++':
# generate a template for CRF++ feature extractor
generate_crf_template(feature_num, 'template', tmp_dir)
# train a CRF++ model
call(['crf_learn', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)])
# tag a test set
call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file])
elif config['persist_format'] == 'crf_suite':
crfsuite_algorithm = config['crfsuite_algorithm'] if 'crfsuite_algorithm' in config else 'arow'
call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file])
test_out = open(test_file+'.tagged', 'w')
call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out)
test_out.close()
else:
print("Unknown persist format: {}".format(config['persist_format']))
sequential_true = [[]]
sequential_predictions = [[]]
flat_true = []
flat_predictions = []
for line in open(test_file+'.tagged'):
# end of tagging, statistics reported
if line.startswith('Performance'):
break
if line == '\n':
sequential_predictions.append([])
continue
chunks = line[:-1].decode('utf-8').split()
flat_true.append(chunks[-2])
sequential_true[-1].append(chunks[-2])
flat_predictions.append(chunks[-1])
sequential_predictions[-1].append(chunks[-1])
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(flat_predictions[idx])
test_tags_word.append(flat_true[idx])
print(f1_score(test_predictions_word, test_tags_word, average=None))
print(f1_score(test_predictions_word, test_tags_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_predictions_word, test_tags_word, average=None), recall_score(test_predictions_word, test_tags_word, average=None)))
else:
train_tags = [tag_map[tag] for tag in train_tags]
#print(test_tags)
test_tags = [tag_map[tag] for tag in test_tags]
#print(test_tags)
#sys.exit()
# data_type is 'token' or 'plain'
logger.info('start training...')
classifier_type = import_class(config['learning']['classifier']['module'])
# train the classifier(s)
classifier_map = map_classifiers(train_features, train_tags, classifier_type, data_type=data_type)
logger.info('classifying the test instances')
test_predictions = predict_all(test_features, classifier_map, data_type=data_type)
# assert(len(test_predictions) == len(flatten(test_tags_seq))), "long predictions: {}, sequential: {}".format(len(test_predictions), len(flatten(test_tags_seq)))
cnt = 0
test_predictions_seq = []
test_tags_seq_num = []
tag_map = {'OK': 1, 'BAD': 0, 1: 1, 0: 0}
long_test = True if 'multiply_data_test' in config and (config['multiply_data_test'] == 'ngrams' or config['multiply_data_test'] == '1ton') else False
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
logger.info("Test predictions lenght: {}".format(len(test_predictions)))
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(test_predictions[idx])
test_tags_word.append(test_tags[idx])
test_tags_true_flat = flatten(test_tags_true)
test_tags_true_flat = [tag_map[t] for t in test_tags_true_flat]
# print(f1_score(test_tags_word, test_predictions_word, average=None))
# print(f1_score(test_tags_word, test_predictions_word, average='weighted', pos_label=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_tags_true_flat, test_predictions_word, average=None), recall_score(test_tags_true_flat, test_predictions_word, average=None)))
# TODO: remove the hard coding of the tags here
bad_count = sum(1 for t in test_tags if t == u'BAD' or t == 0)
good_count = sum(1 for t in test_tags if t == u'OK' or t == 1)
total = len(test_tags)
assert (total == bad_count+good_count), 'tag counts should be correct'
percent_good = good_count / total
logger.info('percent good in test set: {}'.format(percent_good))
logger.info('percent bad in test set: {}'.format(1 - percent_good))
random_class_results = []
random_weighted_results = []
for i in range(20):
random_tags_phrase = list(np.random.choice([1, 0], total, [percent_good, 1-percent_good]))
random_tags = []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
random_tags.append(random_tags_phrase[idx])
# random_tags = [u'GOOD' for i in range(total)]
random_class_f1 = f1_score(test_tags_true_flat, random_tags, average=None)
random_class_results.append(random_class_f1)
logger.info('two class f1 random score ({}): {}'.format(i, random_class_f1))
# random_average_f1 = f1_score(random_tags, test_tags, average='weighted')
random_average_f1 = f1_score(test_tags_true_flat, random_tags, average='weighted', pos_label=None)
random_weighted_results.append(random_average_f1)
# logger.info('average f1 random score ({}): {}'.format(i, random_average_f1))
avg_random_class = np.average(random_class_results, axis=0)
avg_weighted = np.average(random_weighted_results)
logger.info('two class f1 random average score: {}'.format(avg_random_class))
logger.info('weighted f1 random average score: {}'.format(avg_weighted))
# print("Cross-validation:")
# print(permutation_test_score())
# logger.info("Sequence correlation: ")
# print(sequence_correlation_weighted(test_tags_seq_num, test_predictions_seq, verbose=True)[1])
label_test_hyp_ref(test_predictions_word, test_tags_true_flat, os.path.join(tmp_dir, config['output_name']), config["output_test"])
# label_test(test_predictions, '/export/data/varvara/marmot/marmot/experiment/final_submissions/baseline', '/export/data/varvara/corpora/wmt15_corrected/test.target', 'BASELINE')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("--data_type", help="data type - sequential or plain")
parser.add_argument("--bad_tagging", help="tagging -- optimistic, pessimistic or super-pessimistic")
parser.add_argument("--unambiguous", default=0, help="make the tagging unambiguous -- no segmentation for spans of BAD tag (values - 0 or 1, default 0)")
parser.add_argument("--output_name", default="output", help="file to store the test set tagging")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
if args.data_type is not None:
experiment_config['data_type'] = args.data_type
if args.bad_tagging is not None:
experiment_config['bad_tagging'] = args.bad_tagging
experiment_config['unambiguous'] = True if int(args.unambiguous) == 1 else False
experiment_config['output_name'] = args.output_name
stamp = os.path.basename(cfg_path).replace('config', '').replace('.yaml', '') + '_' + experiment_config['bad_tagging'] + '_' + experiment_config['data_type']
if experiment_config['unambiguous']:
stamp += '_un'
main(experiment_config, stamp)
| 19,745 | 55.417143 | 252 | py |
marmot | marmot-master/marmot/experiment/crf_experiment.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import copy
import sys
import os
import time
from subprocess import call
from marmot.experiment.import_utils import call_for_each_element, build_object, build_objects, mk_tmp_dir
from marmot.experiment.preprocessing_utils import create_contexts, flatten, contexts_to_features, tags_from_contexts, fit_binarizers, binarize
from marmot.experiment.learning_utils import map_classifiers, predict_all
from marmot.evaluation.evaluation_metrics import weighted_fmeasure, sequence_correlation, sequence_correlation_weighted
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
from marmot.evaluation.evaluation_utils import write_res_to_file
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
'''
Learn a model with an external CRF tool: CRF++ or CRFSuite
'''
def label_test(flat_labels, new_test_name, text_file, method_name):
tag_map = {0: 'BAD', 1: 'OK'}
new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w')
new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w')
start_idx = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
for t_idx, (tag, word) in enumerate(zip(tag_seq, words)):
new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag))
def get_crfpp_output(out_file):
predicted = []
for line in open(out_file):
line = line.strip('\n').replace('\t', ' ')
predicted.append(line.split(' ')[-1])
return predicted
def main(config):
workers = config['workers']
tmp_dir = config['tmp_dir'] if 'tmp_dir' in config else None
tmp_dir = mk_tmp_dir(tmp_dir)
time_stamp = str(time.time())
# REPRESENTATION GENERATION
# main representations (source, target, tags)
# training
train_data_generators = build_objects(config['datasets']['training'])
train_data = {}
for gen in train_data_generators:
data = gen.generate()
for key in data:
if key not in train_data:
train_data[key] = []
train_data[key].extend(data[key])
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# logger.info("Sample sequence: {}".format(train_data['similarity'][0]))
# sys.exit()
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
# borders = config['borders'] if 'borders' in config else False
# if 'multiply_data_train' not in config:
# pass
# elif config['multiply_data_train'] == 'ngrams':
# train_data = multiply_data_ngrams(train_data, borders=borders)
# elif config['multiply_data_train'] == '1ton':
# train_data = multiply_data(train_data, borders=borders)
# elif config['multiply_data_train'] == 'duplicate':
# train_data = multiply_data_base(train_data)
# elif config['multiply_data_train'] == 'all':
# train_data = multiply_data_all(train_data, borders=borders)
# else:
# print("Unknown 'multiply data train' value: {}".format(config['multiply_data_train']))
# logger.info("Extended train representations: {}".format(len(train_data['target'])))
# logger.info("Simple test representations: {}".format(len(test_data['target'])))
# if 'multiply_data_test' not in config:
# pass
# elif config['multiply_data_test'] == 'ngrams':
# test_data = multiply_data_ngrams(test_data, borders=borders)
# elif config['multiply_data_test'] == '1ton':
# test_data = multiply_data(test_data, borders=borders)
# else:
# print("Unknown 'multiply data test' value: {}".format(config['multiply_data_test']))
# logger.info("Extended test representations: {}".format(len(test_data['target'])))
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['contexts'] if 'contexts' in config else 'plain'
test_contexts = create_contexts(test_data, data_type=data_type)
test_contexts_seq = create_contexts(test_data, data_type='sequential')
train_contexts = create_contexts(train_data, data_type=data_type)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential')
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
# BEGIN CONVERTING FEATURES TO NUMBERS
logger.info('binarization flag: {}'.format(config['features']['binarize']))
# flatten so that we can properly binarize the features
if config['features']['binarize'] is True:
logger.info('Binarizing your features...')
all_values = []
if data_type == 'sequential':
all_values = flatten(train_features)
elif data_type == 'plain':
all_values = train_features
elif data_type == 'token':
all_values = flatten(train_features.values())
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
features_num = len(feature_names)
true_features_num = len(all_values[0])
logger.info('fitting binarizers...')
binarizers = fit_binarizers(all_values)
logger.info('binarizing test data...')
test_features = call_for_each_element(test_features, binarize, [binarizers], data_type=data_type)
logger.info('binarizing training data...')
# TODO: this line hangs with alignment+w2v
train_features = call_for_each_element(train_features, binarize, [binarizers], data_type=data_type)
logger.info('All of your features are now scalars in numpy arrays')
logger.info('training and test sets successfully generated')
# the way that we persist depends upon the structure of the data (plain/sequence/token_dict)
# TODO: remove this once we have a list containing all datasets
if config['features']['persist']:
if 'persist_format' in config['features']:
persist_format = config['features']['persist_format']
else:
persist_format = 'crf++'
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
if config['features']['persist_dir']:
persist_dir = config['features']['persist_dir']
else:
persist_dir = os.path.getcwd()
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format)
# BEGIN LEARNING
# TODO: different sequence learning modules need different representation, we should wrap them in a class
# TODO: create a consistent interface to sequence learners, will need to use *args and **kwargs because APIs are very different
from sklearn.metrics import f1_score, precision_score, recall_score
import numpy as np
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
print("FEATURE NAMES: ", feature_names)
persist_dir = tmp_dir
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
if 'persist_format' not in config:
config['persist_format'] = 'crf_suite'
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format'])
feature_num = len(train_features[0][0])
train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf')
test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf')
tag_map = {u'OK': 1, u'BAD': 0, 0: 0, 1: 1}
if config['persist_format'] == 'crf++':
# generate a template for CRF++ feature extractor
generate_crf_template(feature_num, 'template', tmp_dir)
# train a CRF++ model
call(['crf_learn', '-a', 'MIRA', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)])
# tag a test set
call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file])
elif config['persist_format'] == 'crf_suite':
crfsuite_algorithm = config['crfsuite_algorithm']
call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file])
test_out = open(test_file+'.tagged', 'w')
call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out)
test_out.close()
else:
print("Unknown persist format: {}".format(config['persist_format']))
# parse CRFSuite output
flattened_ref, flattened_hyp = [], []
tag_map = {'OK': 1, 'BAD': 0}
for line in open(test_file+'.tagged'):
if line == "\n":
continue
chunks = line.strip('\n').split('\t')
if len(chunks) != 2:
continue
try:
flattened_ref.append(tag_map[chunks[-2]])
flattened_hyp.append(tag_map[chunks[-1]])
except KeyError:
continue
print("Ref, hyp: ", len(flattened_ref), len(flattened_hyp))
logger.info('Structured prediction f1: ')
print(f1_score(flattened_ref, flattened_hyp, average=None))
print(f1_score(flattened_ref, flattened_hyp, average='weighted', pos_label=None))
logger.info("Sequence correlation: ")
# print(sequence_correlation_weighted(y_test, structured_hyp, verbose=True)[1])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("-a", help="crfsuite algorithm")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
experiment_config['crfsuite_algorithm'] = args.a
main(experiment_config)
| 12,883 | 46.895911 | 187 | py |
marmot | marmot-master/marmot/experiment/extract_features.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import sys
import os
from marmot.experiment.import_utils import call_for_each_element, build_object, build_objects, mk_tmp_dir
from marmot.experiment.preprocessing_utils import create_contexts, tags_from_contexts, contexts_to_features, fit_binarizers, binarize, flatten
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
'''
Only feature extraction
Extract features and save in CRF++, CRFSuite or SVMLight format
'''
def main(config):
workers = config['workers']
tmp_dir = config['tmp_dir']
tmp_dir = mk_tmp_dir(tmp_dir)
# REPRESENTATION GENERATION
# main representations (source, target, tags)
# training
# train_data_generators = build_objects(config['datasets']['training'])
# train_data = {}
# for gen in train_data_generators:
# data = gen.generate()
# for key in data:
# if key not in train_data:
# train_data[key] = []
# train_data[key].extend(data[key])
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
dev, test = False, False
# test
if 'test' in config['datasets']:
test = True
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
# dev
if 'dev' in config['datasets']:
dev = True
dev_data_generator = build_object(config['datasets']['dev'][0])
dev_data = dev_data_generator.generate()
# additional representations
# print("IN MAIN")
# print(train_data['alignments_file'])
# print(dev_data['alignments_file'])
# print(test_data['alignments_file'])
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
if test:
test_data = r.generate(test_data)
if dev:
dev_data = r.generate(dev_data)
print("TEST DATA", test_data['alignments'][0])
logger.info("Simple representations: {}".format(len(train_data['target'])))
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['data_type']
print("DATA TYPE:", data_type)
# sys.exit()
print("Train data: ", len(train_data['target']))
if dev:
print("Dev data: ", len(dev_data['target']))
if test:
print("Test data: ", len(test_data['target']))
print("In different representations: ")
for rep in train_data:
print(rep, len(train_data[rep]))
# print('Source dependencies: {}'.format(train_data['source_dependencies'][0]))
# print('Target dependencies: {}'.format(train_data['target_dependencies'][0]))
# print('Source root: {}'.format(train_data['source_root'][0]))
# print('Target root: {}'.format(train_data['target_root'][0]))
train_contexts = create_contexts(train_data, data_type=data_type)
if test:
test_contexts = create_contexts(test_data, data_type=data_type)
logger.info('Vocabulary comparison -- coverage for test dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
if dev:
dev_contexts = create_contexts(dev_data, data_type=data_type)
# print("TEST CONTEXT", test_contexts[0])
print("Train contexts: ", len(train_contexts))
if dev:
print("Dev contexts: ", len(dev_contexts))
if test:
print("Test contexts: ", len(test_contexts))
print('Train context example: {}'.format(train_contexts[0]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
if test:
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
if dev:
dev_tags = call_for_each_element(dev_contexts, tags_from_contexts, data_type=data_type)
print("Train tags: ", len(train_tags))
if dev:
print("Dev tags: ", len(dev_tags))
if test:
print("Test tags: ", len(test_tags))
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
if test:
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
print("Test features sample: ", test_features[0])
if dev:
logger.info('mapping the feature extractors over the contexts for dev...')
dev_features = call_for_each_element(dev_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, 1], data_type=data_type)
print("Train features sample: ", train_features[0])
logger.info('number of training instances: {}'.format(len(train_features)))
if dev:
logger.info('number of development instances: {}'.format(len(dev_features)))
if test:
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
# binarizing features
logger.info('binarization flag: {}'.format(config['features']['binarize']))
# flatten so that we can properly binarize the features
if config['features']['binarize'] is True:
logger.info('Binarizing your features...')
all_values = []
if data_type == 'sequential':
all_values = flatten(train_features)
elif data_type == 'plain':
all_values = train_features
elif data_type == 'token':
all_values = flatten(train_features.values())
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
features_num = len(feature_names)
true_features_num = len(all_values[0])
logger.info('fitting binarizers...')
binarizers = fit_binarizers(all_values)
logger.info('binarizing test data...')
test_features = call_for_each_element(test_features, binarize, [binarizers], data_type=data_type)
logger.info('binarizing training data...')
# TODO: this line hangs with alignment+w2v
train_features = call_for_each_element(train_features, binarize, [binarizers], data_type=data_type)
logger.info('All of your features are now scalars in numpy arrays')
logger.info('training and test sets successfully generated')
# persisting features
logger.info('training and test sets successfully generated')
experiment_datasets = [{'name': 'train', 'features': train_features, 'tags': train_tags}]
if test:
experiment_datasets.append({'name': 'test', 'features': test_features, 'tags': test_tags})
if dev:
experiment_datasets.append({'name': 'dev', 'features': dev_features, 'tags': dev_tags})
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
persist_dir = config['persist_dir'] if 'persist_dir' in config else config['features']['persist_dir']
persist_dir = mk_tmp_dir(persist_dir)
persist_format = config['persist_format'] if 'persist_format' in config else config['features']['persist_format']
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
for dataset_obj in experiment_datasets:
# persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format)
persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format)
# generate a template for CRF++ feature extractor
feature_num = len(feature_names)
if persist_format == 'crf++':
generate_crf_template(feature_num, 'template', persist_dir)
logger.info('Features persisted to: {}'.format(', '.join([os.path.join(persist_dir, nn) for nn in [obj['name'] for obj in experiment_datasets]])))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
main(experiment_config)
| 9,400 | 43.980861 | 167 | py |
marmot | marmot-master/marmot/experiment/run_experiment_pre_extracted.py | from __future__ import print_function, division
from argparse import ArgumentParser
import os
import sys
import yaml
import time
import logging
from subprocess import call
from sklearn.metrics import f1_score
from marmot.experiment.import_utils import call_for_each_element, build_object, build_objects, mk_tmp_dir
from marmot.experiment.preprocessing_utils import create_contexts, tags_from_contexts, contexts_to_features
from marmot.util.persist_features import persist_features
from marmot.util.add_bigram_features import add_bigram_features
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
def load_features(features_file, feature_names_file=None):
feature_names = []
if feature_names_file is not None:
for n_line in open(feature_names_file):
feature_names.append(n_line.decode('utf-8').strip('\n'))
features = []
cur_features = []
for f_line in open(features_file):
if f_line == '\n':
features.append(cur_features)
cur_features = []
continue
f_line = f_line.decode('utf-8').strip('\n')
f_chunks = f_line.split("\t")
for f in f_chunks:
try:
cur_features.append(float(f))
except ValueError:
cur_features.append(f)
if len(cur_features) != 0:
features.append(cur_features)
return features, feature_name
# load labels (one line per sentence, OK/BAD)
def load_tags(tags_file, data_type):
tags = []
for line in open(tags_file):
if data_type == "plain":
tags.extend(line.strip("\n").split())
elif data_type == "sequential":
tags.append(line.strip("\n").split())
else:
print("Unknown data type: {}".format(data_type))
sys.exit()
return tags
# parse SVLight output,
# return the predicted tags (0 - BAD, 1 - GOOD)
def get_test_score_blind(test_file):
predicted = []
tag_map = {'+1': 1, '-1': 0}
for line in open(test_file):
label = line[line.find(':')+1:line.find(' ')]
predicted.append(tag_map[label])
return predicted
def main(config):
workers = config['workers']
tmp_dir = config['tmp_dir'] if 'tmp_dir' in config else None
tmp_dir = mk_tmp_dir(tmp_dir)
time_stamp = str(time.time())
#----------------------Feature extraction from file------------------
if 'pre-extracted' in config:
train_features, feature_names = load_features(config['pre-extracted']['train-features'], config['pre-extracted']['feature-names'])
test_features, _ = load_features(config['pre-extracted']['test-features'])
train_tags = load_tags(config['pre-extracted']['train-tags']
test_tags = load_tags(config['pre-extracted']['test-tags']
#--------------REPRESENTATION GENERATION---------------------
else:
# main representations (source, target, tags)
# training
train_data_generator = build_object(config['datasets']['train'][0])
train_data = train_data_generator.generate()
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['data_type'] if 'data_type' in config else 'plain'
test_contexts = create_contexts(test_data, data_type=data_type)
train_contexts = create_contexts(train_data, data_type=data_type)
#------------------------FEATURE EXTRACTION--------------------------
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
# create features
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
if 'bigram_features' in config and config['bigram_features']:
train_features = call_for_each_element(train_features, add_bigram_features, [train_tags], data_type=data_type)
train_features = call_for_each_element(train_features, add_bigram_features, [train_tags], data_type=data_type)
# create binary features for training
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
#-------------------PERSIST FEATURES--------------------------------
if config['features']['persist_dir']:
persist_dir = config['features']['persist_dir']
else:
persist_dir = os.path.getcwd()
logger.info('persisting your features to: {}'.format(persist_dir))
if data_type == 'plain':
persist_format = 'svm_light'
elif data_type == 'sequential':
persist_format = 'crf_suite'
# for each dataset, write a file and persist the features
train_file_name = persist_features("train", train_features, persist_dir, feature_names=feature_names, tags=train_tags, file_format=persist_format)
test_file_name = persist_features("test", test_features, persist_dir, feature_names=feature_names, tags=test_tags, file_format=persist_format)
test_output = os.path.join(persist_dir, 'out')
#---------------------------TRAINING---------------------------------
#----------------------------SVM LIGHT-------------------------------
if data_type == 'plain':
kernel = 0
if 'svm_params' in config:
try:
kernel = int(config['svm_params']['kernel'])
except ValueError:
kernel = 0
kernel = kernel if kernel <= 4 else 0
model_name = os.path.join(tmp_dir, 'svmlight_model_file'+time_stamp)
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_learn', '-t', str(kernel), train_file_name, model_name])
logger.info("Training completed, start testing")
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_classify', '-f', '0', test_file_name, model_name, test_output])
logger.info("Testing completed")
predicted = get_test_score_blind(test_output)
tag_map = {'OK': 1, 'BAD': 0}
test_tags_num = [tag_map[t] for t in test_tags]
logger.info(f1_score(predicted, test_tags_num, average=None))
logger.info(f1_score(predicted, test_tags_num, average='weighted', pos_label=None))
#-------------------------CRFSUITE------------------------------------
elif data_type == "sequential":
model_name = os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp)
crfsuite_algorithm = config['crfsuite_algorithm'] if 'crfsuite_algorithm' in config else 'arow'
call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', model_name, train_file_name])
test_out_stream = open(test_output, 'w')
call(['crfsuite', 'tag', '-tr', '-m', model_name, test_file_name], stdout=test_out_stream)
test_out_stream.close()
# parse CRFSuite output
flattened_ref, flattened_hyp = [], []
tag_map = {'OK': 1, 'BAD': 0}
for line in open(test_output):
if line == "\n":
continue
chunks = line.strip('\n').split('\t')
if len(chunks) != 2:
continue
try:
flattened_ref.append(tag_map[chunks[-2]])
flattened_hyp.append(tag_map[chunks[-1]])
except KeyError:
continue
print("Ref, hyp: ", len(flattened_ref), len(flattened_hyp))
logger.info('Structured prediction f1: ')
f1_both = f1_score(flattened_ref, flattened_hyp, average=None)
print("F1-BAD, F1-OK: ", f1_both)
print("F1-mult: ", f1_both[0] * f1_both[1])
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
main(experiment_config)
| 9,518 | 44.328571 | 150 | py |
marmot | marmot-master/marmot/experiment/import_utils.py | from __future__ import print_function
# we need numpy to check the type of objects in list_of_lists
import numpy
import os
import sys
import errno
def import_class(module_name):
#sys.stderr.write("Importing class %s\n" % module_name)
mod_name, class_name = module_name.rsplit('.', 1)
#sys.stderr.write("Got module name\n")
mod = __import__(mod_name, fromlist=[class_name])
#sys.stderr.write("Imported module\n")
klass = getattr(mod, class_name)
#sys.stderr.write("Imported class\n")
return klass
def import_function(func_name):
mod_name, func_name = func_name.rsplit('.', 1)
mod = __import__(mod_name, fromlist=[func_name])
func = getattr(mod, func_name)
return func
def call_function(function, args):
return function(*args)
def import_and_call_function(function_obj):
func = import_function(function_obj['func'])
args = function_obj['args']
return call_function(func, args)
# check that <a_list> is an iterable of iterables
def list_of_lists(a_list):
if isinstance(a_list, (list, tuple, numpy.ndarray)) and len(a_list) > 0 and all([isinstance(l, (list, tuple, numpy.ndarray)) for l in a_list]):
return True
return False
# call the same function for the data organised in different structures
def call_for_each_element(data, function, args=[], data_type='sequential'):
if data_type == 'plain':
return function(data, *args)
elif data_type == 'sequential':
assert(list_of_lists(data))
return [function(d, *args) for d in data]
elif data_type == 'token':
assert(type(data) == dict)
return {token: function(contexts, *args) for token, contexts in data.items()}
# call through the function tree, at each node look for: (func:<>, args:<>)
# args[] may have a property: type: function_output:, if so, call recursively with (func:<>, args:<>)
# finally, call the original func with its args
def function_tree(func, args):
# map args to function outputs where requested
for idx, arg in enumerate(args):
if type(arg) is dict and 'type' in arg and arg['type'] == 'function_output':
inner_func = import_function(arg['func'])
args[idx] = function_tree(inner_func, arg['args'])
# the function is ready to be called
return call_function(func, args)
# load and build object - universal
def build_object(obj_info, root_element='module'):
#sys.stderr.write("Building extractor: %s\n" % obj_info[root_element])
klass = import_class(obj_info[root_element])
#sys.stderr.write("Class imported\n")
input_args = obj_info['args'] if 'args' in obj_info else []
#sys.stderr.write("Arguments extracted\n")
# map args to function outputs where requested
for idx, arg in enumerate(input_args):
if type(arg) is dict and 'type' in arg and arg['type'] == 'function_output':
func = import_function(arg['func'])
input_args[idx] = function_tree(func, arg['args'])
# init the object
obj = klass(*input_args)
#sys.stderr.write('Object instance created\n')
return obj
# load and build object - universal
def build_object_light(class_name, input_args):
#sys.stderr.write("Building object: %s\n" % class_name)
klass = import_class(class_name)
#sys.stderr.write("Class imported\n")
# init the object
obj = klass(*input_args)
#sys.stderr.write('Object instance created\n')
return obj
def build_objects(object_list, root_element='module'):
objects = []
for obj_info in object_list:
obj = build_object(obj_info)
objects.append(obj)
return objects
# create a directory
def mk_tmp_dir(tmp_dir):
if tmp_dir is None:
tmp_dir = os.getcwd() + '/tmp_dir'
try:
os.makedirs(tmp_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(tmp_dir):
pass
else:
raise
return tmp_dir
| 3,963 | 31.227642 | 147 | py |
marmot | marmot-master/marmot/experiment/learning_utils.py | # utils for interfacing with Scikit-Learn
import logging
import numpy as np
import copy
from multiprocessing import Pool
from sklearn.metrics import f1_score
from marmot.learning.pystruct_sequence_learner import PystructSequenceLearner
from marmot.experiment.import_utils import call_for_each_element
from marmot.experiment.preprocessing_utils import flatten, fit_binarizers, binarize
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
# TODO: allow specification of cross-validation params at init time
def init_classifier(classifier_type, args=None):
if args is not None:
return classifier_type(*args)
return classifier_type()
def train_classifier(X, y, classifier):
classifier.fit(X, y)
def map_classifiers(all_contexts, tags, classifier_type, data_type='plain', classifier_args=None):
if data_type == 'plain':
assert(type(all_contexts) == np.ndarray or type(all_contexts) == list)
logger.info('training classifier')
classifier = init_classifier(classifier_type, classifier_args)
classifier.fit(all_contexts, tags)
return classifier
elif data_type == 'token':
assert(type(all_contexts) == dict)
classifier_map = {}
for token, contexts in all_contexts.items():
logger.info('training classifier for token: {}'.format(token.encode('utf-8')))
token_classifier = init_classifier(classifier_type, classifier_args)
token_classifier.fit(contexts, tags[token])
classifier_map[token] = token_classifier
return classifier_map
def predict_all(test_features, classifier_map, data_type='plain'):
if data_type == 'plain':
predictions = classifier_map.predict(test_features)
return predictions
elif data_type == 'token':
test_predictions = {}
for key, features in test_features.iteritems():
try:
classifier = classifier_map[key]
predictions = classifier.predict(features)
test_predictions[key] = predictions
except KeyError as e:
print(key + " - is NOT in the classifier map")
raise
return test_predictions
def run_prediction((train_data, train_tags, test_data, test_tags, idx)):
logger.info('training sequential model...')
all_values = flatten(train_data)
# binarize
binarizers = fit_binarizers(all_values)
test_data = call_for_each_element(test_data, binarize, [binarizers], data_type='sequential')
train_data = call_for_each_element(train_data, binarize, [binarizers], data_type='sequential')
x_train = np.array([np.array(xi) for xi in train_data])
y_train = np.array([np.array(xi) for xi in train_tags])
x_test = np.array([np.array(xi) for xi in test_data])
y_test = np.array([np.array(xi) for xi in test_tags])
sequence_learner = PystructSequenceLearner()
sequence_learner.fit(x_train, y_train)
structured_hyp = sequence_learner.predict(x_test)
logger.info('scoring sequential model...')
flattened_hyp = flatten(structured_hyp)
flattened_ref = flatten(y_test)
test_tags = flattened_ref
logger.info('Structured prediction f1: ')
cur_res = f1_score(flattened_ref, flattened_hyp, average=None)
logger.info('[ {}, {} ], {}'.format(cur_res[0], cur_res[1], f1_score(flattened_ref, flattened_hyp, pos_label=None)))
return (cur_res, idx)
# remove the feature number <idx>
def get_reduced_set(features_list, idx):
new_features_list = [obj[:idx] + obj[idx+1:] for obj in features_list]
return new_features_list
# train the model on all combinations of the feature set without one element
# TODO: the target metric should be tunable (now the f1 score of BAD class)
def selection_epoch(old_result, train_data, train_tags, test_data, test_tags, feature_names, data_type='sequential'):
reduced_res = np.zeros((len(feature_names),))
max_res = old_result
reduced_train = train_data
reduced_test = test_data
reduced_features = feature_names
for idx, name in enumerate(feature_names):
logger.info("Excluding feature {}".format(name))
# new feature sets without the feature <idx>
cur_reduced_train = call_for_each_element(train_data, get_reduced_set, args=[idx], data_type=data_type)
cur_reduced_test = call_for_each_element(test_data, get_reduced_set, args=[idx], data_type=data_type)
# train a sequence labeller
if data_type == 'sequential':
cur_res = run_prediction((cur_reduced_train, train_tags, cur_reduced_test, test_tags, idx))
reduced_res[idx] = cur_res[0]
# if the result is better than previous -- save as maximum
if cur_res[0] > max_res:
max_res = cur_res[0]
reduced_train = cur_reduced_train
reduced_test = cur_reduced_test
reduced_features = feature_names[:idx] + feature_names[idx+1:]
# if better result is found -- return it
if max_res > old_result:
return (idx, max_res, reduced_train, reduced_test, reduced_features)
# none of the reduced sets worked better
else:
return (-1, old_result, [], [], [])
def selection_epoch_multi(old_result, train_data, train_tags, test_data, test_tags, feature_names, workers, data_type='sequential'):
# reduced_res = np.zeros((len(feature_names),))
max_res = old_result
reduced_train = train_data
reduced_test = test_data
reduced_features = feature_names
parallel_data = []
for idx, name in enumerate(feature_names):
# new feature sets without the feature <idx>
cur_reduced_train = call_for_each_element(train_data, get_reduced_set, args=[idx], data_type=data_type)
cur_reduced_test = call_for_each_element(test_data, get_reduced_set, args=[idx], data_type=data_type)
parallel_data.append((cur_reduced_train, train_tags, cur_reduced_test, test_tags, idx))
# train a sequence labeller
if data_type == 'sequential':
pool = Pool(workers)
reduced_res = pool.map(run_prediction, parallel_data)
print "Multiprocessing output: ", reduced_res
all_res = [res[0][0] for res in reduced_res]
# some feature set produced better result
if max(all_res) > old_result:
odd_feature_num = reduced_res[np.argmax(all_res)][1]
reduced_train = call_for_each_element(train_data, get_reduced_set, args=[odd_feature_num], data_type=data_type)
reduced_test = call_for_each_element(test_data, get_reduced_set, args=[odd_feature_num], data_type=data_type)
reduced_features = feature_names[:odd_feature_num] + feature_names[odd_feature_num+1:]
logger.info("Old result: {}, new result: {}, removed feature is {}".format(old_result, max(all_res), feature_names[odd_feature_num]))
return (feature_names[odd_feature_num], max(all_res), reduced_train, reduced_test, reduced_features)
# none of the reduced sets worked better
else:
logger.info("No improvement on this round")
return ("", old_result, [], [], [])
def feature_selection(train_data, train_tags, test_data, test_tags, feature_names, data_type='sequential'):
tag_map = {u'OK': 1, u'BAD': 0}
train_tags = [[tag_map[tag] for tag in seq] for seq in train_tags]
test_tags = [[tag_map[tag] for tag in seq] for seq in test_tags]
full_set_result = run_prediction((train_data, train_tags, test_data, test_tags, 0))
logger.info("Feature selection")
odd_feature = None
baseline_res = full_set_result[0][0]
logger.info("Baseline result: {}".format(baseline_res))
reduced_train = copy.deepcopy(train_data)
reduced_test = copy.deepcopy(test_data)
reduced_features = copy.deepcopy(feature_names)
odd_feature_list = []
# reduce the feature set while there are any combinations that give better result
cnt = 1
old_max = baseline_res
while odd_feature != "" and len(reduced_features) > 1:
logger.info("Feature selection: round {}".format(cnt))
odd_feature, max_res, reduced_train, reduced_test, reduced_features = selection_epoch_multi(old_max, reduced_train, train_tags, reduced_test, test_tags, reduced_features, 10, data_type=data_type)
# odd_feature, reduced_train, reduced_test, reduced_features = selection_epoch(old_max, reduced_train, train_tags, reduced_test, test_tags, reduced_features, data_type=data_type)
odd_feature_list.append(odd_feature)
old_max = max_res
cnt += 1
# form a set of reduced feature names and feature numbers
new_feature_list = []
for feature in feature_names:
if feature not in odd_feature_list:
new_feature_list.append(feature)
logger.info("Feature selection is terminating, good features are: {}".format(' '.join(new_feature_list)))
return (new_feature_list, baseline_res, old_max)
| 9,015 | 44.08 | 203 | py |
marmot | marmot-master/marmot/experiment/preprocessing_utils_old.py | from __future__ import print_function
import os
import sys
import copy
import multiprocessing as multi
import logging
import numpy as np
from collections import defaultdict
from sklearn.preprocessing.label import LabelBinarizer, MultiLabelBinarizer
import ipdb
from marmot.util.simple_corpus import SimpleCorpus
from marmot.experiment.import_utils import list_of_lists
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
# convert alignments from list of strings 'i-j'
# to list of lists such that new_align[j] = i
def convert_alignments(align_list, n_words):
new_align = [[] for i in range(n_words)]
for pair in align_list:
two_digits = pair.split('-')
new_align[int(two_digits[1])].append(int(two_digits[0]))
return new_align
# baseline strategy: duplicate each sentence N times (N - number of words in the sentence)
def multiply_data_base(representations):
partial = {a_key: [] for a_key in representations}
# loop through sentences
for idx in range(len(representations['target'])):
if 'source' in representations:
partial['source'].append(representations['source'][idx])
if 'alignments' in representations:
partial['alignments'].append(representations['alignments'][idx])
if 'target_pos' in representations:
partial['target_pos'].append(representations['target_pos'][idx])
if 'source_pos' in representations:
partial['source_pos'].append(representations['source_pos'][idx])
if 'pseudo_ref' in representations:
partial['pseudo_ref'].append(representations['pseudo_ref'][idx])
if 'tags' in representations:
partial['tags'].append(representations['tags'][idx])
partial['target'].append(representations['target'][idx])
return partial
# create sequences consisting of the first 1, 2, 3, ..., N words for every original sequence
# borders -- create a representation that contains markers of borders:
# 'SE' - Start-End (full sentence)
# 'SN' - Start-Not-end (partial sentence)
def multiply_data(representations, borders=False):
partial = {a_key: [] for a_key in representations}
if borders:
partial['borders'] = []
# loop through sentences
for idx in range(len(representations['target'])):
# loop through words in the sentence
for w_idx in range(len(representations['target'][idx])):
if 'source' in representations:
partial['source'].append(representations['source'][idx])
if 'alignments' in representations:
partial['alignments'].append(representations['alignments'][idx][:w_idx+1])
if 'alignments_all' in representations:
partial['alignments_all'].append(representations['alignments_all'][idx][:w_idx+1])
if 'target_pos' in representations:
partial['target_pos'].append(representations['target_pos'][idx][:w_idx+1])
if 'source_pos' in representations:
partial['source_pos'].append(representations['source_pos'][idx])
if 'pseudo_ref' in representations:
partial['pseudo_ref'].append(representations['pseudo_ref'][idx])
if 'tags' in representations:
partial['tags'].append(representations['tags'][idx][:w_idx+1])
partial['target'].append(representations['target'][idx][:w_idx+1])
if borders:
if w_idx + 1 == len(representations['target']):
partial['borders'].append('SE')
else:
partial['borders'].append('SN')
return partial
# create sequences consisting of all ngrams of every original sequence
# :param order: -- order of ngram
# borders -- create a representation that contains markers of borders:
# 'SE' - Start-End (full sentence)
# 'SN' - Start-Not-end (partial sentence)
# 'NE' - Not-start-End
# 'NN' - Not-start-Not-end
def multiply_data_ngrams(representations, order=3, borders=False):
partial = {a_key: [] for a_key in representations}
if borders:
partial['borders'] = []
# loop through sentences
for idx in range(len(representations['target'])):
# loop through words in the sentence
for w_idx in range(len(representations['target'][idx])):
start = max(0, w_idx - order + 1)
end = w_idx + 1
if 'source' in representations:
partial['source'].append(representations['source'][idx])
if 'alignments' in representations:
partial['alignments'].append(representations['alignments'][idx][start:end])
if 'target_pos' in representations:
partial['target_pos'].append(representations['target_pos'][idx][start:end])
if 'source_pos' in representations:
partial['source_pos'].append(representations['source_pos'][idx])
if 'pseudo_ref' in representations:
partial['pseudo_ref'].append(representations['pseudo_ref'][idx])
if 'tags' in representations:
partial['tags'].append(representations['tags'][idx][start:end])
partial['target'].append(representations['target'][idx][start:end])
if borders:
border_start, border_end = '', ''
if start == 0:
border_start = 'S'
else:
border_start = 'N'
if end == len(representations['target']):
border_end = 'E'
else:
border_end = 'N'
partial['borders'].append(border_start + border_end)
return partial
# take all substrings of a string
def multiply_data_all(representations, borders=False):
partial = {a_key: [] for a_key in representations}
if borders:
partial['borders'] = []
# loop through sentences
for idx in range(len(representations['target'])):
# loop through words in the sentence
for w_idx in range(len(representations['target'][idx])):
if 'source' in representations:
partial['source'].append(representations['source'][idx])
if 'alignments' in representations:
partial['alignments'].append(representations['alignments'][idx][:w_idx+1])
if 'target_pos' in representations:
partial['target_pos'].append(representations['target_pos'][idx][:w_idx+1])
if 'source_pos' in representations:
partial['source_pos'].append(representations['source_pos'][idx])
if 'pseudo_ref' in representations:
partial['pseudo_ref'].append(representations['pseudo_ref'][idx])
if 'tags' in representations:
partial['tags'].append(representations['tags'][idx][:w_idx+1])
partial['target'].append(representations['target'][idx][:w_idx+1])
if borders:
if w_idx + 1 == len(representations['target']):
partial['borders'].append('SE')
else:
partial['borders'].append('SN')
# if borders:
# border_start, border_end = '', ''
# if start == 0:
# border_start = 'S'
# else:
# border_start = 'N'
# if end == len(representations['target']):
# border_end = 'E'
# else:
# border_end = 'N'
# partial['borders'].append(border_start + border_end)
# substrings that don't start from the first word
if w_idx > 0:
for w2_idx in range(1, w_idx):
if 'source' in representations:
partial['source'].append(representations['source'][idx])
if 'alignments' in representations:
partial['alignments'].append(representations['alignments'][idx][w2_idx:w_idx+1])
if 'target_pos' in representations:
partial['target_pos'].append(representations['target_pos'][idx][w2_idx:w_idx+1])
if 'source_pos' in representations:
partial['source_pos'].append(representations['source_pos'][idx])
if 'pseudo_ref' in representations:
partial['pseudo_ref'].append(representations['pseudo_ref'][idx])
if 'tags' in representations:
partial['tags'].append(representations['tags'][idx][w2_idx:w_idx+1])
partial['target'].append(representations['target'][idx][w2_idx:w_idx+1])
if borders:
border_start, border_end = '', ''
if w2_idx == 0:
border_start = 'S'
else:
border_start = 'N'
if w_idx + 1 == len(representations['target']):
border_end = 'E'
else:
border_end = 'N'
partial['borders'].append(border_start + border_end)
return partial
# TODO: this function adds keys to the context object, but maybe the user wants different keys
# TODO: the function should be agnostic about which keys it adds -- why does it care?
# there is a difference between 'sequence fields' and 'token fields'
# this method creates a context for each token
def create_context(repr_dict, sentence_id=None):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
# if 'alignments' in repr_dict:
# repr_dict['alignments'] = convert_alignments(repr_dict['alignments'], len(repr_dict['target']))
active_keys = repr_dict.keys()
active_keys.remove('tags')
for idx, word in enumerate(repr_dict['target']):
c = {}
c['token'] = word
c['index'] = idx
if sentence_id is not None:
c['sentence_id'] = sentence_id
if type(repr_dict['tags']) == list or type(repr_dict['tags']) == np.ndarray:
c['tag'] = repr_dict['tags'][idx]
c['sequence_tags'] = repr_dict['tags']
elif type(repr_dict['tags']) == int:
c['tag'] = repr_dict['tags'][idx]
else:
print("Unknown type of tags representation:", type(repr_dict['tags']))
return []
for k in active_keys:
c[k] = repr_dict[k]
context_list.append(c)
return context_list
# create context objects from a data_obj -
# - a dictionary with representation labels as keys ('target', 'source', etc.) and
# representations (lists of lists) as values
# output: if data_type = 'plain', one list of context objects is returned
# if data_type = 'sequential', a list of lists of context objects is returned (list of sequences)
# if data_type = 'token', a dict {token: <list_of_contexts>} is returned
# TODO: this function requires the 'target' and 'tag' keys, but the user may wish to specify other keys
# TODO: 'target' and 'tag' don't make sense for every task
def create_contexts(data_obj, data_type='plain'):
'''
:param data_obj: an object representing a dataset consisting of files
:param data_type:
:return:
'''
contexts = []
if 'target' not in data_obj:
print("No 'target' label in data representations")
return []
if 'tags' not in data_obj:
print("No 'tag' label in data representations or wrong format of tag")
return []
for s_idx, sents in enumerate(zip(*data_obj.values())):
if data_type == 'sequential':
# print(contexts)
contexts.append(create_context({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, sentence_id=s_idx))
else:
contexts.extend(create_context({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, sentence_id=s_idx))
# TODO: there is an error here
if data_type == 'token':
new_contexts = defaultdict(list)
for cont in contexts:
new_contexts[cont['token']].append(cont)
contexts = copy.deepcopy(new_contexts)
return contexts
# convert list of lists into a flat list
# TODO: there is an error where no code runs here
def flatten(lofl):
if list_of_lists(lofl):
return [item for sublist in lofl for item in sublist]
elif type(lofl) == dict:
return lofl.values()
def map_feature_extractor((context, extractor)):
return extractor.get_features(context)
def map_feature_extractors((sequence, extractors)):
features = []
for obj in sequence:
obj_features = []
for extractor in extractors:
obj_features.extend(extractor.get_features(obj))
features.append(obj_features)
return features
# feature extraction for categorical features with conversion to one-hot representation
# this implementation is for a list representation
# this returns a list of lists, where each list contains the feature extractor results for a context
# the point of returning a list of lists is to allow binarization of the feature values
# TODO: we can binarize over the columns of the matrix instead of binarizing the results of each feature extractor
# TODO: is the output of the single worker and the multithreaded different? if so, change
def contexts_to_features(contexts, feature_extractors, workers=1):
return [[x for a_list in [map_feature_extractor((context, extractor)) for extractor in feature_extractors] for x in a_list] for context in contexts]
def contexts_to_features_multi(contexts, feature_extractors, workers=1):
# print("One context:", contexts[0])
# single thread
if workers == 1:
# print("Extractors:", type(feature_extractors))
# print("Contexts:", type(contexts))
#context = contexts[0]
# all_return = []
# for extractor in feature_extractors:
# print("Extracting features with {}".format(extractor))
# aaa = [map_feature_extractor((context, extractor)) for context in contexts]
# for context in contexts:
# aaa = [map_feature_extractor((context, extractor)) for extractor in feature_extractors]
# all_return.append(x for a_list in aaa for x in a_list)
# print("Context:", type(aaa[0][0]))
# print("One context:", aaa[0])
# return all_return
# return [[x for a_list in [map_feature_extractor((context, extractor)) for extractor in feature_extractors] for x in a_list] for context in contexts]
return [[x for a_list in [map_feature_extractor((context, extractor)) for extractor in feature_extractors] for x in a_list] for context in contexts]
# multiple threads
else:
# resulting object
res_list = []
#ipdb.set_trace()
pool = multi.Pool(workers)
logger.info('Multithreaded - Extracting the features for: ' + str(len(contexts)) + ' contexts...')
# each context is paired with all feature extractors
for extractor in feature_extractors:
context_list = [(cont, extractor) for cont in contexts]
# print("Contexts:", contexts)
# sys.exit()
# print("Types: ", [(type(x), type(context_list[0][0][x])) for x in context_list[0][0]])
# print("Values: ", [(x, context_list[0][0][x]) for x in context_list[0][0]])
#ipdb.set_trace()
features = pool.map(map_feature_extractor, context_list)
res_list.append(features)
# np.hstack and np.vstack can't be used because lists have objects of different types
intermediate = [[x[i] for x in res_list] for i in range(len(res_list[0]))]
res_list = [flatten(x) for x in intermediate]
pool.close()
pool.join()
return res_list
def contexts_to_features_seq(contexts, feature_extractors, workers=1):
res_list = []
#ipdb.set_trace()
pool = multi.Pool(workers)
logger.info('Multithreaded - Extracting the features for: ' + str(len(contexts)) + ' contexts...')
# each context is paired with all feature extractors
flat_contexts = flatten(contexts)
seq_lengths = [len(seq) for seq in contexts]
for extractor in feature_extractors:
context_list = [(cont, extractor) for cont in flat_contexts]
#ipdb.set_trace()
features = pool.map(map_feature_extractor, context_list)
res_list.append(features)
# np.hstack and np.vstack can't be used because lists have objects of different types
intermediate = [[x[i] for x in res_list] for i in range(len(res_list[0]))]
res_list = [flatten(x) for x in intermediate]
cnt = 0
seq_res = []
for seq in seq_lengths:
seq_res.append(res_list[cnt:cnt+seq])
cnt += seq
pool.close()
pool.join()
return seq_res
# pool = multi.Pool(workers)
# context_list = [(sequence, feature_extractors) for sequence in contexts]
# features = pool.map(map_feature_extractors, context_list)
# pool.close()
# pool.join()
# return features
# extract tags from a list of contexts
def tags_from_contexts(contexts):
return [context['tag'] for context in contexts]
# train converters(binarizers) from categorical values to one-hot representation
# for all features
# all_values is a list of lists, because we need to look at the feature values for every instance to binarize properly
def fit_binarizers(all_values):
binarizers = {}
for f in range(len(all_values[0])):
cur_features = [context[f] for context in all_values]
# only categorical values need to be binarized, ints/floats are left as they are
if type(cur_features[0]) == str or type(cur_features[0]) == unicode:
lb = LabelBinarizer()
lb.fit(cur_features)
binarizers[f] = lb
elif type(cur_features[0]) == list:
mlb = MultiLabelBinarizer()
# default feature for unknown values
cur_features.append(tuple(("__unk__",)))
mlb.fit([tuple(x) for x in cur_features])
binarizers[f] = mlb
return binarizers
# convert categorical features to one-hot representations with pre-fitted binarizers
# TODO: this function implicitly converts the data into a numpy array
def binarize(features, binarizers):
assert(list_of_lists(features))
num_features = len(features[0])
# if binarizers != {} and max(binarizers.keys()) >= num_features:
# print("Binarizers keys max: ", max(binarizers.keys()))
# print("Total feature number: ", num_features)
# print("Features:", features[0])
assert(binarizers == {} or max(binarizers.keys()) < num_features)
binarized_cols = []
for i in range(num_features):
# get this column
cur_values = [f[i] for f in features]
# if there's a binarizer for this column
if i in binarizers:
binarizer = binarizers[i]
if type(binarizer) == LabelBinarizer:
try:
binarized_cols.append(binarizer.transform(cur_values))
except:
pass
# print(cur_values)
elif type(binarizer) == MultiLabelBinarizer:
assert(list_of_lists(cur_values))
# MultiLabelBinarizer doesn't support unknown values -- they need to be replaced with a default value
# we're going to use the empty list as the default value
cur_values_default = []
default_value = binarizer.classes_[-1]
for a_list in cur_values:
new_list = list(a_list)
for j, val in enumerate(new_list):
if val not in binarizer.classes_:
new_list[j] = default_value
cur_values_default.append(tuple(new_list))
transformed = binarizer.transform(cur_values_default)
binarized_cols.append(transformed)
else:
raise NotImplementedError('this function is not implemented for type: {}'.format(type(binarizer)))
else:
# arr = np.array(cur_values)
# print(arr.shape)
# print(len(cur_values))
# print(cur_values)
try:
# new_vals = np.array(cur_values).reshape(len(cur_values), 1)
binarized_cols.append(np.array(cur_values).reshape(len(cur_values), 1))
except:
print(cur_values)
sys.exit()
assert (len(binarized_cols) == num_features), 'the number of columns after binarization must match the number of features'
new_features = np.hstack(binarized_cols)
return new_features
| 21,460 | 43.617464 | 157 | py |
marmot | marmot-master/marmot/experiment/converter.py | from __future__ import print_function
#############################################################
#
# Convert features from CRFSuite format to something else
#
############################################################
import os
import sys
import time
from argparse import ArgumentParser
from sklearn.metrics import f1_score
from subprocess import call
from marmot.util.generate_crf_template import generate_crf_template
from marmot.experiment.import_utils import mk_tmp_dir
# <in_file> -- input file
# <tmp_dir> -- directory to store the output
# <sequence> -- True - sentences as sequences, False - each word is a separate sequence
# full name of the output: "crfpp.<dataset_name>.<stamp>"
def crfsuite_to_crfpp(in_file, tmp_dir, dataset_name, sequence=True, stamp=None):
#TODO: template
if stamp is None:
stamp = str(time.time())
feature_num = 0
out_file_name = os.path.join(tmp_dir, "crfpp." + dataset_name + '.' + stamp)
out_file = open(out_file_name, 'w')
tag_set = []
for line in open(in_file):
line = line.strip('\n').decode('utf-8')
if line == '':
if sequence:
out_file.write('\n')
continue
elements = line.split('\t')
cur_tag = elements[0]
tag_set.append(cur_tag)
cur_features = []
for el in elements[1:]:
stop = el.find(':')
if stop == -1:
cur_features.append(el)
else:
cur_features.append(el[:stop - 1] + el[stop + 1:])
feature_num = len(cur_features)
to_pr = u'\t'.join(cur_features)
out_file.write('%s\t%s\n' % (to_pr.encode('utf-8'), cur_tag.encode('utf-8')))
if not sequence:
out_file.write('\n')
generate_crf_template(feature_num, template_name='template', tmp_dir=tmp_dir)
out_file.close()
return out_file_name, tag_set
# <sequence> -- True - sequential representation for HMM, False - plain for classification
def crfsuite_to_svmlight(in_file, tmp_dir, dataset_name, binarized_features=None, sequence=False, stamp=None):
if stamp is None:
stamp = str(time.time())
no_bin = False
if binarized_features is None:
print("No binary features list provided, it will be generated from the data")
no_bin = True
binarized_features = []
out_file_name = os.path.join(tmp_dir, "svmlight." + dataset_name + '.' + stamp)
out_file = open(out_file_name, 'w')
seg_idx = 1
tag_set = []
tag_map = {'OK': '+1', 'BAD': '-1', u'OK': '+1', u'BAD': '-1'}
for idx, line in enumerate(open(in_file)):
if idx % 1000 == 0:
sys.stderr.write('.')
if line.strip('\n') == '':
seg_idx += 1
continue
elements = line.strip('\n').decode('utf-8').split('\t')
cur_tag = elements[0]
tag_set.append(cur_tag)
cur_tag_svm = tag_map[cur_tag]
cur_features = []
for el in elements[1:]:
stop = el.find(':')
cur_el = ''
if stop == -1:
cur_el = el
else:
cur_el = el[:stop] + el[stop + 1:]
try:
cur_features.append(binarized_features.index(cur_el) + 1)
except:
if no_bin:
binarized_features.append(cur_el)
cur_features.append(len(binarized_features))
cur_features.sort()
if sequence:
out_file.write('%s qid:%d %s\n' % (cur_tag_svm, seg_idx, ' '.join([str(f) + ':1.0' for f in cur_features])))
else:
out_file.write('%s\t%s\n' % (cur_tag_svm, ' '.join([str(f) + ':1.0' for f in cur_features])))
out_file.close()
sys.stderr.write('\n')
return out_file_name, tag_set, binarized_features
# <data_type> -- 'svm_light' | 'crf_suite' | 'crfpp'
def compute_ref(true_tags, out_file, data_type='svm_light'):
tag_map = {'OK': 1, 'BAD': 0, u'OK': 1, u'BAD': 0}
predicted = []
if data_type == 'svm_light':
tag_map_pred = {'+1': 1, '-1': 0}
for line in open(out_file):
label = line[line.find(':')+1:line.find(' ')]
predicted.append(tag_map_pred[label])
elif data_type == 'crfpp' or data_type == 'crf_suite':
for line in open(out_file):
line = line.strip('\n')
if line == '':
continue
tag = line.split('\t')[-1]
if tag == 'OK' or tag == 'BAD':
predicted.append(tag)
predicted = [tag_map[t] for t in predicted]
# if (type(true_tags[0]) is str or type(true_tags[0]) is unicode) and not true_tags[0].isdigit():
true_tags = [tag_map[t] for t in true_tags]
# if type(predicted[0]) is str and not predicted[0].isdigit():
print(true_tags[:10])
print(predicted[:10])
print(f1_score(predicted, true_tags, average=None))
print(f1_score(predicted, true_tags, average='weighted', pos_label=None))
# extract only tags from the CRFSuite file
def get_test_tags(in_file):
tag_set = []
for line in open(in_file):
line = line.strip('\n')
if line == '':
continue
tag_set.append(line.split('\t')[0])
return tag_set
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("train_file", action="store", help="path to the training features in CRFSuite format")
parser.add_argument("test_file", action="store", help="path to the test features in CRFSuite format")
parser.add_argument("method", help="crf_suite | crfpp | svm_light")
parser.add_argument("representation", help="sequence | plain")
parser.add_argument("--params", default='', help="training params, string")
parser.add_argument("--test_params", default='', help="test params, string")
parser.add_argument("--tmp", default=None, action="store", help="temporary directory")
args = parser.parse_args()
tmp_dir = args.tmp if args.tmp is not None else os.path.join(os.path.dirname(os.path.realpath(__file__)), 'tmp_dir')
tmp_dir = os.path.abspath(tmp_dir)
tmp_dir = mk_tmp_dir(tmp_dir)
stamp = args.method
if args.params != '':
stamp += ('.' + args.params.replace(' ', '_'))
print("Stamp: ", stamp)
if args.representation == 'sequence':
sequence = True
elif args.representation == 'plain':
sequence = False
else:
print("Unknown representation: {}".format(args.representation))
if args.method == 'crf_suite':
model = os.path.join(tmp_dir, 'crfsuite_model_file' + stamp)
test_tags = get_test_tags(args.test_file)
call(['crfsuite', 'learn'] + args.params.split() + ['-m', model, args.train_file])
test_out = open(args.test_file+'.tagged', 'w')
call(['crfsuite', 'tag', '-tr', '-m', model, args.test_file], stdout=test_out)
test_out.close()
compute_ref(test_tags, args.test_file+'.tagged', data_type=args.method)
elif args.method == 'crfpp':
my_train_file, train_tags = crfsuite_to_crfpp(args.train_file, tmp_dir, 'train', sequence=sequence, stamp=stamp)
my_test_file, test_tags = crfsuite_to_crfpp(args.test_file, tmp_dir, 'test', sequence=sequence, stamp=stamp)
model = os.path.join(tmp_dir, 'crfpp_model_file' + stamp)
print("Running training: {}".format(' '.join(['crf_learn'] + args.params.split() + [os.path.join(tmp_dir, 'template'), my_train_file, model])))
call(['crf_learn'] + args.params.split() + [os.path.join(tmp_dir, 'template'), my_train_file, model])
print("Running test: {}".format(' '.join(['crf_test'] + args.test_params.split() + ['-m', model, '-o', my_test_file+'.tagged', my_test_file])))
call(['crf_test'] + args.test_params.split() + ['-m', model, '-o', my_test_file+'.tagged', my_test_file])
compute_ref(test_tags, my_test_file+'.tagged', data_type=args.method)
elif args.method == 'svm_light':
my_train_file, train_tags, binarized_features = crfsuite_to_svmlight(args.train_file, tmp_dir, 'train', binarized_features=None, sequence=sequence, stamp=stamp)
my_test_file, test_tags, binarized_features = crfsuite_to_svmlight(args.test_file, tmp_dir, 'test', binarized_features=binarized_features, sequence=sequence, stamp=stamp)
model = os.path.join(tmp_dir, 'svm_model_file.' + stamp)
print("Running training: {}".format(' '.join(['/export/tools/varvara/svm_multiclass/svm_light/svm_learn'] + args.params.split() + [my_train_file, model])))
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_learn'] + args.params.split() + [my_train_file, model])
test_out = my_test_file + '.tagged'
print("Running test: {}".format(' '.join(['/export/tools/varvara/svm_multiclass/svm_light/svm_classify', '-f', '0', my_test_file, model, test_out])))
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_classify', '-f', '0', my_test_file, model, test_out])
compute_ref(test_tags, my_test_file+'.tagged', data_type=args.method)
else:
print("Unknown method: {}".format(args.method))
| 9,127 | 45.10101 | 178 | py |
marmot | marmot-master/marmot/experiment/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/experiment/context_utils.py | from __future__ import print_function, division
import sys
import numpy as np
from collections import Counter
###########################################################################
#
# This file contains different functions for generation of non-standard
# contexts (contexts where each 'token' is a list of words)
#
###########################################################################
# return the window of a list
# add symbols '_START_' and '_END_' if the range exceeds the length of the list
def negative_window(my_list, start, end):
res = []
while start < 0:
res.append('_START_')
start += 1
while start < min(end, len(my_list)):
res.append(my_list[start])
start += 1
while end > len(my_list):
res.append('_END_')
end -= 1
return res
def create_context_ngram(repr_dict, order, test=False, unambiguous=False, bad_tagging="pessimistic"):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
active_keys = repr_dict.keys()
active_keys.remove('tags')
tag_map = {'OK': 1, 'BAD': 0}
# if the order is greater than 1, we need to have the equal number of ngrams for each word
# so we need to go beyond the borders of a sentence:
# "this is my younger brother" has 3 3-grams: "this is my", "is my younger" and "my younger brother"
# "my" participates in 3 trigrams, other words in 2 or less.
# but we need all words to participate in 3 3-grams, so we create the following trigrams:
# "_START_ _START_ this", "_START_ this is", "this is my", "is my younger" and "my younger brother", "younger brother _END_", "brother _END_ _END_"
#logger.info("Order: {}".format(order))
for i in range(len(repr_dict['target']) + order - 1):
#logger.info("Word {}".format(i))
c = {}
#logger.info("Negative window from {} to {}, length {}".format(i - order + 1, i + 1, len(repr_dict['target'])))
c['token'] = negative_window(repr_dict['target'], i - order + 1, i + 1)
c['index'] = (i - order + 1, i + 1)
# we take only tags for the existing tags
# i.e. for the sequence "_START_ _START_ it" the tag will be the tag for "it" only
tags = [tag_map[t] for t in repr_dict['tags'][max(0, i-order+1):min(len(repr_dict['tags']), i+1)]]
c['tag'] = np.average(tags)
for k in active_keys:
c[k] = repr_dict[k]
context_list.append(c)
return context_list
# create a new segmentation that divides only "GOOD" segments
# and keeps "BAD" segments unchanged
# TODO: add new ways of segmenting? (e.g. keep BAD segments untouched)
def error_based_segmentation(repr_dict):
# borders between segments with different labels
score_borders = [(i, i+1) for i in range(len(repr_dict['tags'])-1) if repr_dict['tags'][i] != repr_dict['tags'][i+1]]
# borders of phrases provided by Moses
segmentation_borders = [(j-1, j) for (i, j) in repr_dict['segmentation']][:-1]
# join both border types so that all phrases have unambiguous scores
# and there are no too long segments
new_borders = sorted(set(score_borders + segmentation_borders))
new_segments = []
prev = 0
# convert new borders to segments
for border in new_borders:
new_segments.append((prev, border[1]))
prev = border[1]
new_segments.append((new_borders[-1][1], len(repr_dict['target'])))
return new_segments
# we don't really need the order here, it should always be None
# or anything else
# :test: -- True if data is test data, False if training -- test sentences can have empty source-segmentation field (if Moses failed to produce constrained reference for them)
# :only_target: -- True if only target sentence is segmented, needs to be processed without source segmentation
# :bad_tagging: -- tag all phrases with at least one bad word as "BAD"
# if seg to False - only phrases with 50% or more bad words are tagged as "BAD"
# :tags_format: -- 'word': one tag per word (conversion to phrase-level tags needed),
# 'phrase': one tag per phrase (no conversion needed)
def create_context_phrase(repr_dict, order=None, unambiguous=False, test=False, bad_tagging="pessimistic", tags_format='word'):
'''
:param repr_dict: a dict representing a 'line' or 'sentence' or a 'segment'
:return: a list of context objects representing the data for each token in the sequence
'''
#print("CONTEXT CREATOR for a sentence")
#print(repr_dict)
# how many words had to change their tag from good to bad and vise versa
good2bad, bad2good = 0, 0
context_list = []
# is checked before in create_contexts, but who knows
if 'target' not in repr_dict:
print("No 'target' label in data representations")
return []
if 'tags' not in repr_dict:
print("No 'tag' label in data representations or wrong format of tag")
print(repr_dict)
return []
if 'segmentation' not in repr_dict or len(repr_dict['segmentation']) == 0:
# for the test data assuming that sentences without segmentation consist of one-word segments
if test:
repr_dict['segmentation'] = [(i, i+1) for i in range(len(repr_dict['target']))]
# for the training data omitting sentences without segmentation
else:
# print("No 'segmentation' label in data representations")
return []
if unambiguous:
assert('source_segmentation' not in repr_dict or len(repr_dict['source_segmentation']) == 0), "Error-based segmentation of target can't be performed if source segmentation exists -- after re-segmentation source and target segments won't match"
assert(not test), "Error-based segmentation can't be applied to the test set"
#print("Unambiguous")
print("Old segmentation: ", repr_dict['segmentation'])
repr_dict['segmentation'] = error_based_segmentation(repr_dict)
print("New segmentation: ", repr_dict['segmentation'])
# no source segmentation means that no Moses segmentation was produced
# in the training data we leave these sentences out
# in the test data they are processed as normal
# assuming that every target word is a separate segment
active_keys = repr_dict.keys()
active_keys.remove('tags')
if 'source_segmentation' in repr_dict:
active_keys.remove('source_segmentation')
if len(repr_dict['source_segmentation']) != 0 and len(repr_dict['source_segmentation']) != len(repr_dict['segmentation']):
print("Wrong segmentation lengths: ", repr_dict)
sys.exit()
for idx, (i, j) in enumerate(repr_dict['segmentation']):
c = {}
c['token'] = repr_dict['target'][i:j]
c['index'] = (i, j)
# source phrase from the phrase segmentation
if 'source_segmentation' in repr_dict and len(repr_dict['source_segmentation']) != 0:
src_seg = repr_dict['source_segmentation'][idx]
c['source_token'] = repr_dict['source'][src_seg[0]:src_seg[1]]
c['source_index'] = (src_seg[0], src_seg[1])
# source phrase from the alignments
elif 'alignments' in repr_dict:
alignments = []
for ii in range(c['index'][0], c['index'][1]):
try:
cur_align = repr_dict['alignments'][ii]
if cur_align is not None:
alignments.append(repr_dict['alignments'][ii])
except IndexError:
print("Indices: {} to {}, current: {}".format(c['index'][0], c['index'][1], ii))
print("Alignments: ", repr_dict['alignments'])
print("Representation: ", repr_dict)
sys.exit()
# converted to set to remove duplicates
# converted back to list because set doesn't support indexing
alignments = list(set(alignments))
if len(alignments) == 0:
c['source_token'] = []
c['source_index'] = ()
# source phrase -- substring between the 1st and the last word aligned to the target phrase
# (unaligned words in between are included)
else:
c['source_token'] = [repr_dict['source'][ii] for ii in alignments]
c['source_index'] = (alignments[0], alignments[-1] + 1)
else:
c['source_token'] = []
c['source_index'] = ()
if len(c['token']) == 0:
print("No token: from {} to {} in target: ".format(i, j), repr_dict['target'], repr_dict['source'], repr_dict['segmentation'])
if j == 0:
print("j==0!")
print("Target: '{}', segmentation: {}, {}".format(' '.join(repr_dict['target']), i, j))
if i == j or (len(repr_dict['tags'][i:j]) == 0 and tags_format == 'word') or len(repr_dict['target'][i:j]) == 0:
print("i==j!")
print("Target: '{}', tags: '{}' segmentation: {}, {}".format(' '.join([w.encode('utf-8') for w in repr_dict['target']]), ' '.join(repr_dict['tags']), i, j))
tags_cnt = Counter(repr_dict['tags'][i:j])
# super-pessimistic tagging -- if BAD occurs any number of times - the final tag is BAD
bad_all = tags_cnt['BAD']
good_all = tags_cnt['OK']
if tags_format == 'word':
if bad_tagging == "super_pessimistic":
if tags_cnt['BAD'] > 0:
c['tag'] = 'BAD'
good2bad += good_all
else:
c['tag'] = 'OK'
# pessimistic tagging -- if BAD occurs in 1 of 3 words or more often -- the final tag is BAD
elif bad_tagging == "pessimistic":
if tags_cnt['BAD']/len(repr_dict['tags'][i:j]) < 0.3:
c['tag'] = 'OK'
bad2good += bad_all
else:
c['tag'] = 'BAD'
good2bad += good_all
# optimisic - if OK occurs as much or more than BAD - the final tag is OK
elif bad_tagging == "optimistic":
if tags_cnt['OK'] >= tags_cnt['BAD']:
bad2good += bad_all
c['tag'] = 'OK'
else:
c['tag'] = 'BAD'
good2bad += good_all
else:
print("Unknown tag assignment scheme: {}".format(bad_tagging))
sys.exit()
elif tags_format == 'phrase':
c['tag'] = repr_dict['tags'][idx]
else:
print("Unknown tags format: {}".format(tags_format))
sys.exit()
for k in active_keys:
c[k] = repr_dict[k]
context_list.append(c)
return context_list, good2bad, bad2good
# create contexts where 'token' is an ngram of arbitrary length
# data_type is always 'plain' (no 'sequential' or 'token' for now)
# :order: -- order of ngram
# :data_type: -- 'plain' - data is a flat list
# 'sequential' - data is a list of sequences (used for dev and test)
def create_contexts_ngram(data_obj, order=None, data_type='plain', test=False, unambiguous=False, bad_tagging="pessimistic", tags_format='word'):
'''
:param data_obj: an object representing a dataset consisting of files
:param data_type:
:return:
'''
print("ENTER CONTEXTS CREATOR")
contexts = []
if 'target' not in data_obj:
print("No 'target' label in data representations")
return []
if 'tags' not in data_obj:
print("No 'tag' label in data representations or wrong format of tag")
return []
if 'segmentation' in data_obj:
context_generator = create_context_phrase
else:
if order is None:
print("The order of ngrams has to be defined to create the ngram contexts")
return []
context_generator = create_context_ngram
print("Sentences in the data: {}".format(len(data_obj['target'])))
if 'target_file' in data_obj:
data_obj.pop('target_file')
if 'source_file' in data_obj:
data_obj.pop('source_file')
overall = 0
good2bad, bad2good = 0, 0
if data_type == 'plain':
print("DATATYPE: PLAIN")
print(len(data_obj.values()))
for r_key in data_obj:
print("{} -- {} values".format(r_key, len(data_obj[r_key])))
# print(zip(*data_obj.values())[0])
for s_idx, sents in enumerate(zip(*data_obj.values())):
#print("SENTENCE {}".format(s_idx))
# all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging)
# print("ALL: ", all_out)
#(cont, good, bad) = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging)
all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging, tags_format=tags_format)
if len(all_out) < 3:
continue
good2bad += all_out[1]
bad2good += all_out[2]
overall += len(all_out[0])
# print("Type of the generated context: ", type(cont))
contexts.extend(all_out[0])
# print("Contexts: {}".format(overall))
elif data_type == 'sequential':
print("SEQUENTIAL")
for s_idx, sents in enumerate(zip(*data_obj.values())):
all_out = create_context_phrase({data_obj.keys()[i]: sents[i] for i in range(len(sents))}, order, test=test, unambiguous=unambiguous, bad_tagging=bad_tagging, tags_format=tags_format)
if len(all_out) < 3:
continue
#print("SEQ CONTEXTS: {}".format(len(cont)))
good2bad += all_out[1]
bad2good += all_out[2]
overall += len(all_out[0])
contexts.append(all_out[0])
else:
print("UNKNOWN DATATYPE: {}".format(data_type))
print("Good to bad: {}\nBad to good: {}, \nTotal: {}".format(good2bad, bad2good, overall))
return contexts
# output a flat list of numbers
# a number for each context -- means the number of words this context represents
def get_contexts_words_number(contexts):
numbers_list = []
for c in contexts:
try:
numbers_list.append(len(c['token']))
except TypeError:
print("Erroneous context: ", c)
print("List: ", contexts)
print("The 'token' field has to be of type 'list', is actually {}".format(type(c['token'])))
sys.exit()
return numbers_list
| 15,242 | 46.19195 | 251 | py |
marmot | marmot-master/marmot/experiment/svm_light_experiment.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import sys
import os
from subprocess import call
from sklearn.metrics import f1_score
from marmot.experiment.import_utils import call_for_each_element, build_object, build_objects, mk_tmp_dir
from marmot.experiment.preprocessing_utils import create_contexts, tags_from_contexts, contexts_to_features, fit_binarizers, binarize, flatten
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
'''
Only feature extraction
Extract features and save in CRF++, CRFSuite or SVMLight format
'''
def feat_to_string(a_feat):
try:
return a_feat.encode('utf-8')
except:
return str(a_feat)
# data type - plain
def binarize_features(train_features, feature_names, train_tags):
binary_features = set()
for features, a_tag in zip(train_features, train_tags):
for a_feat, a_name in zip(features, feature_names):
new_feature = "{}_{}_{}".format(a_name, feat_to_string(a_feat), a_tag)
binary_features.add(new_feature)
return list(binary_features)
# data type - plain
# no tag in the feature
def binarize_features_blind(train_features, feature_names):
binary_features = set()
for features in train_features:
for a_feat, a_name in zip(features, feature_names):
new_feature = "{}_{}".format(a_name, feat_to_string(a_feat))
binary_features.add(new_feature)
return list(binary_features)
# features, tags -- dataset to binarize
# feature_names -- feature names for this dataset
# binary_features -- list of binary feature names
# output -- list of binary feature names which light for this object
def get_binary_features(test_features, feature_names, test_tags, binary_features):
new_test_features = []
for features, a_tag in zip(test_features, test_tags):
cur_features = []
for a_feat, a_name in zip(features, feature_names):
try:
cur_features.append(binary_features.index('{}_{}_{}'.format(a_name, feat_to_string(a_feat), a_tag)) + 1)
except ValueError: # no such feature, skipping
pass
cur_features.sort()
# print("Features: ", cur_features)
new_test_features.append(cur_features)
return new_test_features
# the same, but features without tag
def get_binary_features_blind(test_features, feature_names, binary_features):
new_test_features = []
for features in test_features:
cur_features = []
for a_feat, a_name in zip(features, feature_names):
try:
cur_features.append(binary_features.index('{}_{}'.format(a_name, feat_to_string(a_feat))) + 1)
except ValueError: # no such feature, skipping
pass
cur_features.sort()
# print("Features: ", cur_features)
new_test_features.append(cur_features)
return new_test_features
# binary features for test
#two variants for every object: with positive and negative features
def get_binary_features_test(test_features, feature_names, test_tags, binary_features):
new_test_features = []
new_test_features_inverse = []
opposite = {'OK': 'BAD', 'BAD': 'OK'}
for features, a_tag in zip(test_features, test_tags):
cur_features_dir = []
cur_features_inv = []
for a_feat, a_name in zip(features, feature_names):
try:
cur_features_dir.append(binary_features.index('{}_{}_{}'.format(a_name, feat_to_string(a_feat), a_tag)) + 1)
cur_features_inv.append(binary_features.index('{}_{}_{}'.format(a_name, feat_to_string(a_feat), opposite[a_tag])) + 1)
except ValueError: # no such feature, skipping
pass
cur_features_dir.sort()
cur_features_inv.sort()
# print("Features: ", cur_features)
new_test_features.append(cur_features_dir)
new_test_features_inverse.append(cur_features_inv)
return new_test_features, new_test_features_inverse
def get_test_score(test_file, inverse_test_file):
predicted = []
tag_map = {'+1': 1, '-1': 0}
dir_score, inv_score = 0.0, 0.0
for line_dir, line_inv in zip(open(test_file), open(inverse_test_file)):
dir_label = line_dir[line_dir.find(':')+1:line_dir.find(' ')]
inv_label = line_inv[line_inv.find(':')+1:line_inv.find(' ')]
dir_score = line_dir[:line_dir.find(':')]
inv_score = line_inv[:line_inv.find(':')]
if dir_score > inv_score:
predicted.append(tag_map[dir_label])
else:
predicted.append(tag_map[inv_label])
return predicted
# parse SVLight output,
# return the predicted tags (0 - BAD, 1 - GOOD)
def get_test_score_blind(test_file):
predicted = []
tag_map = {'+1': 1, '-1': 0}
for line in open(test_file):
label = line[line.find(':')+1:line.find(' ')]
predicted.append(tag_map[label])
return predicted
# persist features to svm_light format
# all features - binary
# feature = <feature_name>_<feature_value>_<label>
def persist_to_svm(train_features, test_features, feature_names, train_tags, test_tags, persist_dir):
# binarize
logger.info("Binarize features")
binary_features = binarize_features_blind(train_features, feature_names, train_tags)
logger.info("Get binary representation for test")
new_test_features = get_binary_features(test_features, feature_names, test_tags, binary_features)
test_file_name = os.path.join(persist_dir, 'test_binary.svm')
test_file = open(test_file_name, 'w')
tags_map = {'OK': '+1', 'BAD': '-1'}
for feat, a_tag in zip(new_test_features, test_tags):
#print("Features: ", feat)
#print("Tag: ", a_tag)
test_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
logger.info("Get binary representation for training")
new_train_features = get_binary_features(train_features, feature_names, train_tags, binary_features)
# persist
logger.info("Export training and test")
train_file_name = os.path.join(persist_dir, 'train_binary.svm')
#test_file_name = os.path.join(persist_dir, 'test_binary.svm')
train_file = open(train_file_name, 'w')
#test_file = open(test_file_name, 'w')
#tags_map = {'OK': '+1', 'BAD': '-1'}
for feat, a_tag in zip(new_train_features, train_tags):
# print("Features: ", feat)
# print("Tag: ", a_tag)
train_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
#for feat, a_tag in zip(new_test_features, test_tags):
# test_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
train_file.close()
test_file.close()
# persist unbinarized
# logger.info("Export non-binary versions for control")
# train_control = open(os.path.join(persist_dir, 'train_control.svm'), 'w')
# test_control = open(os.path.join(persist_dir, 'test_control.svm'), 'w')
# for feat, a_tag in zip(train_features, train_tags):
# train_control.write("%s %s\n" % (a_tag, ' '.join([str(f_name) + ':' + feat_to_string(f) for f_name, f in zip(feature_names, feat)])))
# for feat, a_tag in zip(test_features, test_tags):
# test_control.write("%s %s\n" % (a_tag, ' '.join([str(f_name) + ':' + feat_to_string(f) for f_name, f in zip(feature_names, feat)])))
# train_control.close()
# test_control.close()
return train_file_name, test_file_name
# persist to svm with double test file
def persist_to_svm_dbl(train_features, test_features, feature_names, train_tags, test_tags, persist_dir):
# binarize
logger.info("Binarize features")
binary_features = binarize_features(train_features, feature_names, train_tags)
logger.info("Get binary representation for test")
new_test_features_dir, new_test_features_inv = get_binary_features_test(test_features, feature_names, test_tags, binary_features)
test_file_name = os.path.join(persist_dir, 'test_binary_dir.svm')
test_file = open(test_file_name, 'w')
tags_map = {'OK': '+1', 'BAD': '-1'}
for feat, a_tag in zip(new_test_features_dir, test_tags):
test_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
inv_test_file_name = os.path.join(persist_dir, 'test_binary_inv.svm')
inv_test_file = open(inv_test_file_name, 'w')
tags_map_inv = {'OK': '-1', 'BAD': '+1'}
for feat, a_tag in zip(new_test_features_inv, test_tags):
inv_test_file.write('%s %s\n' % (tags_map_inv[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
logger.info("Get binary representation for training")
new_train_features = get_binary_features(train_features, feature_names, train_tags, binary_features)
# persist
logger.info("Export training and test")
train_file_name = os.path.join(persist_dir, 'train_binary.svm')
#test_file_name = os.path.join(persist_dir, 'test_binary.svm')
train_file = open(train_file_name, 'w')
for feat, a_tag in zip(new_train_features, train_tags):
train_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
train_file.close()
test_file.close()
inv_test_file.close()
return train_file_name, test_file_name, inv_test_file_name
# persist to svm without tag encoded in features
def persist_to_svm_blind(train_features, test_features, train_tags, test_tags, feature_names, persist_dir):
# binarize
logger.info("Binarize features")
binary_features = binarize_features_blind(train_features, feature_names)
logger.info("Get binary representation for test")
new_test_features = get_binary_features_blind(test_features, feature_names, binary_features)
test_file_name = os.path.join(persist_dir, 'test_binary.svm')
test_file = open(test_file_name, 'w')
tags_map = {'OK': '+1', 'BAD': '-1'}
for feat, a_tag in zip(new_test_features, test_tags):
test_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
logger.info("Get binary representation for training")
new_train_features = get_binary_features_blind(train_features, feature_names, binary_features)
# persist
logger.info("Export training and test")
train_file_name = os.path.join(persist_dir, 'train_binary.svm')
#test_file_name = os.path.join(persist_dir, 'test_binary.svm')
train_file = open(train_file_name, 'w')
for feat, a_tag in zip(new_train_features, train_tags):
train_file.write('%s %s\n' % (tags_map[a_tag], ' '.join([str(f) + ':1.0' for f in feat])))
train_file.close()
test_file.close()
return train_file_name, test_file_name
def main(config):
workers = config['workers']
tmp_dir = config['tmp_dir']
tmp_dir = mk_tmp_dir(tmp_dir)
# REPRESENTATION GENERATION
# main representations (source, target, tags)
# training
train_data_generators = build_objects(config['datasets']['training'])
train_data = {}
for gen in train_data_generators:
data = gen.generate()
for key in data:
if key not in train_data:
train_data[key] = []
train_data[key].extend(data[key])
dev, test = False, False
# test
if 'test' in config['datasets']:
test = True
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
# dev
if 'dev' in config['datasets']:
dev = True
dev_data_generator = build_object(config['datasets']['dev'][0])
dev_data = dev_data_generator.generate()
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
if test:
test_data = r.generate(test_data)
if dev:
dev_data = r.generate(dev_data)
logger.info("Simple representations: {}".format(len(train_data['target'])))
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['contexts']
print("DATA TYPE:", data_type)
# sys.exit()
train_contexts = create_contexts(train_data, data_type=data_type)
if test:
test_contexts = create_contexts(test_data, data_type=data_type)
if dev:
dev_contexts = create_contexts(dev_data, data_type=data_type)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
if test:
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
if dev:
dev_tags = call_for_each_element(dev_contexts, tags_from_contexts, data_type=data_type)
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
if test:
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
print("Test features sample: ", test_features[0])
if dev:
logger.info('mapping the feature extractors over the contexts for dev...')
dev_features = call_for_each_element(dev_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, 1], data_type=data_type)
print("Train features sample: ", train_features[0])
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
# binarizing features
logger.info('binarization flag: {}'.format(config['features']['binarize']))
# flatten so that we can properly binarize the features
if config['features']['binarize'] is True:
logger.info('Binarizing your features...')
all_values = []
if data_type == 'sequential':
all_values = flatten(train_features)
elif data_type == 'plain':
all_values = train_features
elif data_type == 'token':
all_values = flatten(train_features.values())
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
features_num = len(feature_names)
true_features_num = len(all_values[0])
logger.info('fitting binarizers...')
binarizers = fit_binarizers(all_values)
logger.info('binarizing test data...')
test_features = call_for_each_element(test_features, binarize, [binarizers], data_type=data_type)
logger.info('binarizing training data...')
# TODO: this line hangs with alignment+w2v
train_features = call_for_each_element(train_features, binarize, [binarizers], data_type=data_type)
logger.info('All of your features are now scalars in numpy arrays')
logger.info('training and test sets successfully generated')
# persisting features
logger.info('training and test sets successfully generated')
# experiment_datasets = [{'name': 'train', 'features': train_features, 'tags': train_tags}]
# if test:
# experiment_datasets.append({'name': 'test', 'features': test_features, 'tags': test_tags})
# if dev:
# experiment_datasets.append({'name': 'dev', 'features': dev_features, 'tags': dev_tags})
# feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
persist_dir = config['persist_dir'] if 'persist_dir' in config else config['features']['persist_dir']
persist_dir = mk_tmp_dir(persist_dir)
# train_file_name, test_file_name, inv_test_file_name = persist_to_svm_dbl(train_features, test_features, feature_names, train_tags, test_tags, persist_dir)
train_file_name, test_file_name = persist_to_svm_blind(train_features, test_features, train_tags, test_tags, feature_names, persist_dir)
model_name = os.path.join(persist_dir, 'model')
logger.info("Start training")
kernel = 0 # linear kernel (default)
if 'svm_params' in config:
kernel = int(config['svm_params']['kernel']) if kernel <= 4 else 0
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_learn', '-t', str(kernel), train_file_name, model_name])
logger.info("Training completed, start testing")
test_file = os.path.join(persist_dir, 'out')
# inverse_test_file = os.path.join(persist_dir, 'out_inv')
call(['/export/tools/varvara/svm_multiclass/svm_light/svm_classify', '-f', '0', test_file_name, model_name, test_file])
# call(['/export/tools/varvara/svm_multiclass/svm_light/svm_classify', '-f', '0', inv_test_file_name, model_name, inverse_test_file])
logger.info("Testing completed")
# predicted = get_test_score(test_file, inverse_test_file)
predicted = get_test_score_blind(test_file)
tag_map = {'OK': 1, 'BAD': 0}
test_tags_num = [tag_map[t] for t in test_tags]
logger.info(f1_score(predicted, test_tags_num, average=None))
logger.info(f1_score(predicted, test_tags_num, average='weighted', pos_label=None))
# persist_format = config['persist_format'] if 'persist_format' in config else config['features']['persist_format']
# logger.info('persisting your features to: {}'.format(persist_dir))
# # for each dataset, write a file and persist the features
# for dataset_obj in experiment_datasets:
# persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format)
# # generate a template for CRF++ feature extractor
# feature_num = len(feature_names)
# if persist_format == 'crf++':
# generate_crf_template(feature_num, 'template', persist_dir)
# logger.info('Features persisted to: {}'.format(', '.join([os.path.join(persist_dir, nn) for nn in [obj['name'] for obj in experiment_datasets]])))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("--tmp", action="store", default=None, help="temporary directory")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
if args.tmp is not None:
experiment_config['tmp_dir'] = args.tmp
main(experiment_config)
| 19,675 | 45.079625 | 167 | py |
marmot | marmot-master/marmot/experiment/run_experiment_ngram.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import os
import sys
import time
from subprocess import call
from marmot.experiment.import_utils import build_objects, build_object, call_for_each_element, import_class
from marmot.experiment.preprocessing_utils import tags_from_contexts, contexts_to_features, flatten, fit_binarizers, binarize
from marmot.experiment.context_utils import create_contexts_ngram, get_contexts_words_number
from marmot.experiment.learning_utils import map_classifiers, predict_all
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
def label_test(flat_labels, new_test_name, text_file, method_name):
tag_map = {0: 'BAD', 1: 'OK'}
new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w')
new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w')
start_idx = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
for t_idx, (tag, word) in enumerate(zip(tag_seq, words)):
new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag))
# write both hypothesis and reference
def label_test_hyp_ref(flat_labels, flat_true_labels, new_test_name, text_file):
tag_map = {0: 'BAD', 1: 'OK'}
new_test = open(new_test_name, 'w')
new_test_plain = open(new_test_name+'.plain', 'w')
start = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start, start+len(words))]
true_tag_seq = [tag_map[flat_true_labels[i]] for i in range(start, start+len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
start += len(words)
for t_idx, (tag, true_tag, word) in enumerate(zip(tag_seq, true_tag_seq, words)):
new_test.write('%d\t%d\t%s\t%s\t%s\n' % (s_idx, t_idx, word.encode('utf-8'), true_tag, tag))
# check that everything in a data_obj matches:
# - all source and target sentences exist
# - alignments don't hit out of bounds
# - target tokens really exist and are in their places
def main(config, stamp):
# the data_type is the format corresponding to the model of the data that the user wishes to learn
data_type = config['data_type'] if 'data_type' in config else (config['contexts'] if 'contexts' in config else 'plain')
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
logger.info("data_type -- {}, bad_tagging -- {}".format(data_type, bad_tagging))
# time_stamp = str(time.time())
time_stamp = stamp
workers = config['workers']
tmp_dir = config['tmp_dir']
# one generator
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
borders = config['borders'] if 'borders' in config else False
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
bad_tagging = config['bad_tagging'] if 'bad_tagging' in config else 'pessimistic'
# test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging)
test_contexts = create_contexts_ngram(test_data, data_type=data_type, test=True, bad_tagging=bad_tagging, tags_format=config['tags_format'])
print("Objects in the train data: {}".format(len(train_data['target'])))
print("UNAMBIGUOUS: ", config['unambiguous'])
# train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'])
train_contexts = create_contexts_ngram(train_data, data_type=data_type, bad_tagging=bad_tagging, unambiguous=config['unambiguous'], tags_format=config['tags_format'])
#print("Train contexts: {}".format(len(train_contexts)))
#print("1st context:", train_contexts[0])
# the list of context objects' 'target' field lengths
# to restore the word-level tags from the phrase-level
#test_context_correspondence = get_contexts_words_number(test_contexts)
if data_type == 'sequential':
test_context_correspondence = flatten([get_contexts_words_number(cont) for cont in test_contexts])
#print(test_context_correspondence)
for idx, cont in enumerate(test_contexts):
get_cont = get_contexts_words_number(cont)
count_cont = [len(c['token']) for c in cont]
assert(all([get_cont[i] == count_cont[i] for i in range(len(cont))])), "Sum doesn't match at line {}:\n{}\n{}".format(idx, ' '.join([str(c) for c in get_cont]), ' '.join([str(c) for c in count_cont]))
assert(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont])), "Sums don't match: {} and {}".format(sum(test_context_correspondence) == sum([len(c['token']) for cont in test_contexts for c in cont]))
else:
test_context_correspondence = get_contexts_words_number(test_contexts)
assert(sum(test_context_correspondence) == sum([len(c['token']) for c in test_contexts])), "Sums don't match: {} and {}".format(sum(test_context_correspondence), sum([len(c['token']) for c in test_contexts]))
# print("Token lengths:", sum([len(c['token']) for c in test_contexts]))
# assert(sum(test_context_correspondence) == 9613), "GOLAKTEKO OPASNOSTE!!!, {}".format(sum(test_context_correspondence))
# sys.exit()
# if data_type == 'sequential':
# test_context_correspondence = flatten(test_context_correspondence)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
test_tags_true = test_data['tags']
tag_idx = 0
seg_idx = 0
# test_context_correspondence_seq = [get_contexts_words_number(cont) for cont in test_contexts]
# for idx, (tag_seq, phr_seq) in enumerate(zip(test_data['tags'], test_context_correspondence_seq)):
# assert(len(tag_seq) == sum(phr_seq)),"Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# tag_idx = 0
# for d in phr_seq:
# first_tag = tag_seq[tag_idx]
# assert(all([t == first_tag for t in tag_seq[tag_idx:tag_idx+d]])), "Something wrong in line {}:\n{}\n{}".format(idx, ' '.join(tag_seq), ' '.join([str(p) for p in phr_seq]))
# try:
# indicator = [t == first_tag for t in test_data['tags'][seg_idx][tag_idx:tag_idx+d]]
# assert(all(indicator))
# tags_cnt += d
# if tags_cnt == len(test_data['tags'][seg_idx]):
# tags_cnt = 0
# seg_idx += 1
# elif tags_cnt > len(test_data['tags'][seg_idx]):
# raise
# except:
# print("No correspondence in line {}, tag {}: \n{}\n{}".format(seg_idx, tag_idx, ' '.join(test_data['tags'][seg_idx]), d))
# sys.exit()
#assert(sum(test_context_correspondence) == len(flatten(test_data['tags']))), "Sums don't match for phrase contexts and test data object: {} and {}".format(sum(test_context_correspondence), len(flatten(test_data['tags'])))
# flat_cont = flatten(test_contexts)
# flat_tags = flatten(test_data['tags'])
# for ii in range(len(flat_cont)):
if data_type == 'plain':
assert(len(test_context_correspondence) == len(test_tags)), "Lengths don't match for phrase contexts and test tags: {} and {}".format(len(test_context_correspondence), len(test_tags))
# test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential')
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.cross_validation import permutation_test_score
import numpy as np
tag_map = {u'OK': 1, u'BAD': 0}
if data_type == 'sequential':
# TODO: save features for CRFSuite, call it
logger.info('training sequential model...')
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
print("FEATURE NAMES: ", feature_names)
persist_dir = tmp_dir
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
if 'persist_format' not in config:
config['persist_format'] = 'crf_suite'
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name']+time_stamp, dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=config['persist_format'])
feature_num = len(train_features[0][0])
train_file = os.path.join(tmp_dir, 'train'+time_stamp+'.crf')
test_file = os.path.join(tmp_dir, 'test'+time_stamp+'.crf')
if config['persist_format'] == 'crf++':
# generate a template for CRF++ feature extractor
generate_crf_template(feature_num, 'template', tmp_dir)
# train a CRF++ model
call(['crf_learn', os.path.join(tmp_dir, 'template'), train_file, os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp)])
# tag a test set
call(['crf_test', '-m', os.path.join(tmp_dir, 'crfpp_model_file'+time_stamp), '-o', test_file+'.tagged', test_file])
elif config['persist_format'] == 'crf_suite':
crfsuite_algorithm = config['crfsuite_algorithm'] if 'crfsuite_algorithm' in config else 'arow'
call(['crfsuite', 'learn', '-a', crfsuite_algorithm, '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), train_file])
test_out = open(test_file+'.tagged', 'w')
call(['crfsuite', 'tag', '-tr', '-m', os.path.join(tmp_dir, 'crfsuite_model_file'+time_stamp), test_file], stdout=test_out)
test_out.close()
else:
print("Unknown persist format: {}".format(config['persist_format']))
sys.exit()
sequential_true = [[]]
sequential_predictions = [[]]
flat_true = []
flat_predictions = []
for line in open(test_file+'.tagged'):
# end of tagging, statistics reported
if line.startswith('Performance'):
break
if line == '\n':
sequential_predictions.append([])
continue
chunks = line[:-1].decode('utf-8').split()
flat_true.append(chunks[-2])
sequential_true[-1].append(chunks[-2])
flat_predictions.append(chunks[-1])
sequential_predictions[-1].append(chunks[-1])
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(flat_predictions[idx])
test_tags_word.append(flat_true[idx])
print(f1_score(test_predictions_word, test_tags_word, average=None))
print(f1_score(test_predictions_word, test_tags_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_predictions_word, test_tags_word, average=None), recall_score(test_predictions_word, test_tags_word, average=None)))
else:
train_tags = [tag_map[tag] for tag in train_tags]
#print(test_tags)
test_tags = [tag_map[tag] for tag in test_tags]
#print(test_tags)
#sys.exit()
# data_type is 'token' or 'plain'
logger.info('start training...')
classifier_type = import_class(config['learning']['classifier']['module'])
# train the classifier(s)
classifier_map = map_classifiers(train_features, train_tags, classifier_type, data_type=data_type)
logger.info('classifying the test instances')
test_predictions = predict_all(test_features, classifier_map, data_type=data_type)
# assert(len(test_predictions) == len(flatten(test_tags_seq))), "long predictions: {}, sequential: {}".format(len(test_predictions), len(flatten(test_tags_seq)))
cnt = 0
test_predictions_seq = []
test_tags_seq_num = []
tag_map = {'OK': 1, 'BAD': 0, 1: 1, 0: 0}
long_test = True if 'multiply_data_test' in config and (config['multiply_data_test'] == 'ngrams' or config['multiply_data_test'] == '1ton') else False
# restoring the word-level tags
test_predictions_word, test_tags_word = [], []
logger.info("Test predictions lenght: {}".format(len(test_predictions)))
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
test_predictions_word.append(test_predictions[idx])
test_tags_word.append(test_tags[idx])
test_tags_true_flat = flatten(test_tags_true)
test_tags_true_flat = [tag_map[t] for t in test_tags_true_flat]
# print(f1_score(test_tags_word, test_predictions_word, average=None))
# print(f1_score(test_tags_word, test_predictions_word, average='weighted', pos_label=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average=None))
print(f1_score(test_tags_true_flat, test_predictions_word, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_tags_true_flat, test_predictions_word, average=None), recall_score(test_tags_true_flat, test_predictions_word, average=None)))
# TODO: remove the hard coding of the tags here
bad_count = sum(1 for t in test_tags if t == u'BAD' or t == 0)
good_count = sum(1 for t in test_tags if t == u'OK' or t == 1)
total = len(test_tags)
assert (total == bad_count+good_count), 'tag counts should be correct'
percent_good = good_count / total
logger.info('percent good in test set: {}'.format(percent_good))
logger.info('percent bad in test set: {}'.format(1 - percent_good))
random_class_results = []
random_weighted_results = []
for i in range(20):
random_tags_phrase = list(np.random.choice([1, 0], total, [percent_good, 1-percent_good]))
random_tags = []
for idx, n in enumerate(test_context_correspondence):
for i in range(n):
random_tags.append(random_tags_phrase[idx])
# random_tags = [u'GOOD' for i in range(total)]
random_class_f1 = f1_score(test_tags_true_flat, random_tags, average=None)
random_class_results.append(random_class_f1)
logger.info('two class f1 random score ({}): {}'.format(i, random_class_f1))
# random_average_f1 = f1_score(random_tags, test_tags, average='weighted')
random_average_f1 = f1_score(test_tags_true_flat, random_tags, average='weighted', pos_label=None)
random_weighted_results.append(random_average_f1)
# logger.info('average f1 random score ({}): {}'.format(i, random_average_f1))
avg_random_class = np.average(random_class_results, axis=0)
avg_weighted = np.average(random_weighted_results)
logger.info('two class f1 random average score: {}'.format(avg_random_class))
logger.info('weighted f1 random average score: {}'.format(avg_weighted))
# print("Cross-validation:")
# print(permutation_test_score())
# logger.info("Sequence correlation: ")
# print(sequence_correlation_weighted(test_tags_seq_num, test_predictions_seq, verbose=True)[1])
label_test_hyp_ref(test_predictions_word, test_tags_true_flat, os.path.join(tmp_dir, config['output_name']), config["output_test"])
# label_test(test_predictions, '/export/data/varvara/marmot/marmot/experiment/final_submissions/baseline', '/export/data/varvara/corpora/wmt15_corrected/test.target', 'BASELINE')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
parser.add_argument("--data_type", help="data type - sequential or plain")
parser.add_argument("--bad_tagging", help="tagging -- optimistic, pessimistic or super-pessimistic")
parser.add_argument("--unambiguous", default=0, help="make the tagging unambiguous -- no segmentation for spans of BAD tag (values - 0 or 1, default 0)")
parser.add_argument("--output_name", default="output", help="file to store the test set tagging")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
if args.data_type is not None:
experiment_config['data_type'] = args.data_type
if args.bad_tagging is not None:
experiment_config['bad_tagging'] = args.bad_tagging
experiment_config['unambiguous'] = True if int(args.unambiguous) == 1 else False
experiment_config['output_name'] = args.output_name
stamp = os.path.basename(cfg_path).replace('config', '').replace('.yaml', '') + '_' + experiment_config['bad_tagging'] + '_' + experiment_config['data_type']
if experiment_config['unambiguous']:
stamp += '_un'
main(experiment_config, stamp)
| 19,765 | 55.31339 | 252 | py |
marmot | marmot-master/marmot/experiment/run_experiment_word.py | from __future__ import print_function, division
from argparse import ArgumentParser
import yaml
import logging
import copy
import sys
from marmot.experiment.import_utils import *
from marmot.experiment.preprocessing_utils import *
from marmot.experiment.learning_utils import map_classifiers, predict_all
from marmot.evaluation.evaluation_metrics import weighted_fmeasure, sequence_correlation, sequence_correlation_weighted
from marmot.evaluation.evaluation_utils import compare_vocabulary
from marmot.util.persist_features import persist_features
from marmot.evaluation.evaluation_utils import write_res_to_file
from marmot.experiment.preprocessing_utils_old import multiply_data, multiply_data_ngrams, multiply_data_all, multiply_data_base
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
def label_test(flat_labels, new_test_name, text_file, method_name):
tag_map = {0: 'BAD', 1: 'OK'}
new_test_plain = open(new_test_name+'.'+method_name+'.plain', 'w')
new_test_ext = open(new_test_name+'.'+method_name+'.ext', 'w')
start_idx = 0
for s_idx, txt in enumerate(open(text_file)):
words = txt[:-1].decode('utf-8').strip().split()
tag_seq = [tag_map[flat_labels[i]] for i in range(start_idx, len(words))]
new_test_plain.write('%s\n' % ' '.join(tag_seq))
for t_idx, (tag, word) in enumerate(zip(tag_seq, words)):
new_test_ext.write('%s\t%d\t%d\t%s\t%s\n' % (method_name, s_idx, t_idx, word.encode('utf-8'), tag))
def main(config):
workers = config['workers']
# REPRESENTATION GENERATION
# main representations (source, target, tags)
# training
train_data_generator = build_object(config['datasets']['training'][0])
train_data = train_data_generator.generate()
# train_data = {}
# for gen in train_data_generators:
# data = gen.generate()
# for key in data:
# if key not in train_data:
# train_data[key] = []
# train_data[key].extend(data[key])
# test
test_data_generator = build_object(config['datasets']['test'][0])
test_data = test_data_generator.generate()
logger.info("Train data keys: {}".format(train_data.keys()))
logger.info("Train data sequences: {}".format(len(train_data['target'])))
logger.info("Sample sequence: {}".format([w.encode('utf-8') for w in train_data['target'][0]]))
# logger.info("Sample sequence: {}".format(train_data['similarity'][0]))
# sys.exit()
#logger.info("Alignment file: {}".format(train_data['alignments_file']))
#logger.info("Alignment file: {}".format(test_data['alignments_file']))
# additional representations
if 'representations' in config:
representation_generators = build_objects(config['representations'])
else:
representation_generators = []
for r in representation_generators:
train_data = r.generate(train_data)
test_data = r.generate(test_data)
borders = config['borders'] if 'borders' in config else False
if 'multiply_data_train' not in config:
pass
elif config['multiply_data_train'] == 'ngrams':
logger.info("Multiply data: {} and {}".format(config['multiply_data_train'], config['multiply_data_test']))
train_data = multiply_data_ngrams(train_data, borders=borders)
logger.info("Sequences: {}, tag sequences: {}".format(len(train_data['target']), len(train_data['tags'])))
elif config['multiply_data_train'] == '1ton':
logger.info("Multiply data: {} and {}".format(config['multiply_data_train'], config['multiply_data_test']))
train_data = multiply_data(train_data, borders=borders)
elif config['multiply_data_train'] == 'duplicate':
train_data = multiply_data_base(train_data)
elif config['multiply_data_train'] == 'all':
train_data = multiply_data_all(train_data, borders=borders)
else:
print("Unknown 'multiply data train' value: {}".format(config['multiply_data_train']))
logger.info("Train data example: {}".format(train_data['target'][:10]))
logger.info("Train tags example: {}".format(train_data['tags'][:10]))
logger.info("Extended train representations: {}".format(len(train_data['target'])))
# print(train_data[:2])
logger.info("Simple test representations: {}".format(len(test_data['target'])))
if 'multiply_data_test' not in config:
pass
elif config['multiply_data_test'] == 'ngrams':
test_data = multiply_data_ngrams(test_data, borders=borders)
elif config['multiply_data_test'] == '1ton':
test_data = multiply_data(test_data, borders=borders)
else:
print("Unknown 'multiply data test' value: {}".format(config['multiply_data_test']))
logger.info("Extended test representations: {}".format(len(test_data['target'])))
logger.info('here are the keys in your representations: {}'.format(train_data.keys()))
# the data_type is the format corresponding to the model of the data that the user wishes to learn
# data_type = config['contexts'] if 'contexts' in config else 'plain'
data_type = config['data_type'] if 'data_type' in config else 'sequential'
test_contexts = create_contexts(test_data, data_type=data_type)
test_contexts_seq = create_contexts(test_data, data_type='sequential')
train_contexts = create_contexts(train_data, data_type=data_type)
logger.info('Vocabulary comparison -- coverage for each dataset: ')
logger.info(compare_vocabulary([train_data['target'], test_data['target']]))
# END REPRESENTATION GENERATION
# FEATURE EXTRACTION
train_tags = call_for_each_element(train_contexts, tags_from_contexts, data_type=data_type)
test_tags = call_for_each_element(test_contexts, tags_from_contexts, data_type=data_type)
test_tags_seq = call_for_each_element(test_contexts_seq, tags_from_contexts, data_type='sequential')
logger.info('creating feature extractors...')
feature_extractors = build_objects(config['feature_extractors'])
logger.info('mapping the feature extractors over the contexts for test...')
test_features = call_for_each_element(test_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('mapping the feature extractors over the contexts for train...')
train_features = call_for_each_element(train_contexts, contexts_to_features, [feature_extractors, workers], data_type=data_type)
logger.info('number of training instances: {}'.format(len(train_features)))
logger.info('number of testing instances: {}'.format(len(test_features)))
logger.info('train features sample: {}'.format(train_features[:5]))
logger.info('train tags sample: {}'.format(train_tags[:5]))
logger.info('All of your features now exist in their raw representation, but they may not be numbers yet')
# END FEATURE EXTRACTION
# BEGIN CONVERTING FEATURES TO NUMBERS
logger.info('binarization flag: {}'.format(config['features']['binarize']))
# flatten so that we can properly binarize the features
if config['features']['binarize'] is True:
logger.info('Binarizing your features...')
all_values = []
if data_type == 'sequential':
all_values = flatten(train_features)
elif data_type == 'plain':
all_values = train_features
elif data_type == 'token':
all_values = flatten(train_features.values())
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
features_num = len(feature_names)
true_features_num = len(all_values[0])
logger.info('fitting binarizers...')
binarizers = fit_binarizers(all_values)
logger.info('binarizing test data...')
test_features = call_for_each_element(test_features, binarize, [binarizers], data_type=data_type)
logger.info('binarizing training data...')
# TODO: this line hangs with alignment+w2v
train_features = call_for_each_element(train_features, binarize, [binarizers], data_type=data_type)
logger.info('All of your features are now scalars in numpy arrays')
logger.info('training and test sets successfully generated')
# the way that we persist depends upon the structure of the data (plain/sequence/token_dict)
# TODO: remove this once we have a list containing all datasets
if config['features']['persist']:
if 'persist_format' in config['features']:
persist_format = config['features']['persist_format']
else:
persist_format = 'crf++'
experiment_datasets = [{'name': 'test', 'features': test_features, 'tags': test_tags}, {'name': 'train', 'features': train_features, 'tags': train_tags}]
feature_names = [f for extractor in feature_extractors for f in extractor.get_feature_names()]
if config['persist_dir']:
persist_dir = config['persist_dir']
else:
persist_dir = os.path.getcwd()
logger.info('persisting your features to: {}'.format(persist_dir))
# for each dataset, write a file and persist the features
for dataset_obj in experiment_datasets:
persist_features(dataset_obj['name'], dataset_obj['features'], persist_dir, feature_names=feature_names, tags=dataset_obj['tags'], file_format=persist_format)
sys.exit()
# BEGIN LEARNING
# TODO: different sequence learning modules need different representation, we should wrap them in a class
# TODO: create a consistent interface to sequence learners, will need to use *args and **kwargs because APIs are very different
from sklearn.metrics import f1_score, precision_score, recall_score
import numpy as np
tag_map = {u'OK': 1, u'BAD': 0, 0: 0, 1: 1}
if data_type == 'sequential':
logger.info('training sequential model...')
train_tags = [[tag_map[tag] for tag in seq] for seq in train_tags]
test_tags = [[tag_map[tag] for tag in seq] for seq in test_tags]
x_train = np.array([np.array(xi) for xi in train_features])
y_train = np.array([np.array(xi) for xi in train_tags])
x_test = np.array([numpy.array(xi) for xi in test_features])
y_test = np.array([numpy.array(xi) for xi in test_tags])
# pystruct
from marmot.learning.pystruct_sequence_learner import PystructSequenceLearner
sequence_learner = PystructSequenceLearner()
sequence_learner.fit(x_train, y_train)
structured_hyp = sequence_learner.predict(x_test)
# only the last word in every sequence should be counted
flattened_hyp = []
flattened_ref = []
if long_test:
for idx, seq in enumerate(structured_hyp):
flattened_hyp.append(seq[-1])
flattened_ref.append(y_test[idx][-1])
else:
flattened_hyp = flatten(structured_hyp)
flattened_ref = flatten(y_test)
logger.info('scoring sequential model...')
# TODO: the flattening is currently a hack to let us use the same evaluation code for structured and plain tasks
# flattened_hyp = flatten(structured_hyp)
# end pystruct
# for idx, seq in enumerate(test_tags_seq):
# cnt += len(seq)
# if cnt >= len(test_predictions):
# print("long predictions: {}, sequential: {}, sequence #{}".format(len(test_predictions), len(flatten(test_tags_seq)), idx))
# print("Sequence: ", test_contexts_seq[idx])
# if long_test:
# cnt = -1
# new_predictions = []
# new_true = []
# for seq in test_tags_seq:
# cnt += len(seq)
# new_predictions.append(tag_map[test_predictions[cnt]])
# new_true.append(tag_map[seq[-1]])
# test_predictions = new_predictions
# test_tags = new_true
#
# print(f1_score(test_predictions, test_tags, average=None))
print("Ref, hyp: ", len(flattened_ref), len(flattened_hyp))
logger.info('Structured prediction f1: ')
print(f1_score(flattened_ref, flattened_hyp, average=None))
print(f1_score(flattened_ref, flattened_hyp, average='weighted', pos_label=None))
logger.info("Sequence correlation: ")
print(sequence_correlation_weighted(y_test, structured_hyp, verbose=True)[1])
else:
train_tags = [tag_map[tag] for tag in train_tags]
test_tags = [tag_map[tag] for tag in test_tags]
# data_type is 'token' or 'plain'
logger.info('start training...')
classifier_type = import_class(config['learning']['classifier']['module'])
# train the classifier(s)
classifier_map = map_classifiers(train_features, train_tags, classifier_type, data_type=data_type)
logger.info('classifying the test instances')
test_predictions = predict_all(test_features, classifier_map, data_type=data_type)
# assert(len(test_predictions) == len(flatten(test_tags_seq))), "long predictions: {}, sequential: {}".format(len(test_predictions), len(flatten(test_tags_seq)))
cnt = 0
test_predictions_seq = []
test_tags_seq_num = []
tag_map = {'OK': 1, 'BAD': 0, 1: 1, 0: 0}
long_test = True if 'multiply_data_test' in config and (config['multiply_data_test'] == 'ngrams' or config['multiply_data_test'] == '1ton') else False
for idx, seq in enumerate(test_tags_seq):
test_predictions_seq.append([])
test_tags_seq_num.append([])
for w in seq:
test_predictions_seq[-1].append(tag_map[test_predictions[cnt]])
test_tags_seq_num[-1].append(tag_map[w])
cnt += 1
# cnt += len(seq)
# if cnt >= len(test_predictions):
# print("long predictions: {}, sequential: {}, sequence #{}".format(len(test_predictions), len(flatten(test_tags_seq)), idx))
# print("Sequence: ", test_contexts_seq[idx])
if long_test:
cnt = -1
new_predictions = []
new_true = []
for seq in test_tags_seq:
cnt += len(seq)
new_predictions.append(tag_map[test_predictions[cnt]])
new_true.append(tag_map[seq[-1]])
test_predictions = new_predictions
test_tags = new_true
print(f1_score(test_predictions, test_tags, average=None))
print(f1_score(test_predictions, test_tags, average='weighted', pos_label=None))
print("Precision: {}, recall: {}".format(precision_score(test_predictions, test_tags, average=None), recall_score(test_predictions, test_tags, average=None)))
logger.info("Sequence correlation: ")
print(sequence_correlation_weighted(test_tags_seq_num, test_predictions_seq, verbose=True)[1])
# label_test(test_predictions, '/export/data/varvara/marmot/marmot/experiment/final_submissions/baseline', '/export/data/varvara/corpora/wmt15_corrected/test.target', 'BASELINE')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("configuration_file", action="store", help="path to the config file (in YAML format).")
args = parser.parse_args()
experiment_config = {}
# Experiment hyperparams
cfg_path = args.configuration_file
# read configuration file
with open(cfg_path, "r") as cfg_file:
experiment_config = yaml.load(cfg_file.read())
main(experiment_config)
| 15,599 | 48.52381 | 185 | py |
marmot | marmot-master/marmot/experiment/experiment_utils.py | from __future__ import division
import numpy as np
import multiprocessing as multi
import logging
import types
import sklearn
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from import_utils import import_class
from preprocessing_utils import map_feature_extractor
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
def init_class(klass, args):
return klass(*args)
# def build_context_creator(creator_obj):
# creator_klass = import_class(creator_obj['module'])
# input_args = creator_obj['args']
#
# # map args to function outputs where requested
# for idx, arg in enumerate(input_args):
# if type(arg) is dict and 'type' in arg and arg['type'] == 'function_output':
# func = import_function(arg['func'])
# input_args[idx] = function_tree(func, arg['args'])
#
# # init the object
# creator = creator_klass(*input_args)
# return creator
#
#
# def build_context_creators(context_creator_list):
# context_creators = []
# for creator_obj in context_creator_list:
# creator = build_context_creator(creator_obj)
# context_creators.append(creator)
# return context_creators
def filter_contexts(token_contexts, min_total=1):
return {token: contexts for token, contexts in token_contexts.items() if len(contexts) >= min_total}
# filter contexts to satisfy the whole size constraint and the class size constraint
def filter_contexts_class(token_contexts, min_total=1, min_class_count=1, proportion=2):
new_token_contexts = {}
classes = set([cc['tag'] for context in token_contexts.values() for cc in context])
for token, contexts in token_contexts.items():
# no need to check other conditions if there are too few contexts
if len(contexts) < min_total:
continue
class_counts = {cl: 0 for cl in classes}
for cc in contexts:
class_counts[cc['tag']] += 1
min_class = min(class_counts.values())
cur_proportion = max(class_counts.values())/max(min_class,1)
if min_class >= min_class_count and cur_proportion <= proportion:
new_token_contexts[token] = contexts
return new_token_contexts
# convert the tag representation of a list of contexts into another format (remap the tag strings)
import copy
def convert_tagset(tagmap, tok_contexts):
tok_contexts_copy = copy.deepcopy(tok_contexts)
for tok, contexts in tok_contexts_copy.iteritems():
for context in contexts:
context['tag'] = tagmap[context['tag']]
return tok_contexts_copy
def flatten(lofl):
return [item for sublist in lofl for item in sublist]
# returns a dict of token --> contexts
# remember that contexts store their own tags
def map_contexts(tokens, context_creators):
return {token: flatten([creator.get_contexts(token) for creator in context_creators]) for token in tokens}
def map_context_creators((token, context_creators)):
logger.info('mapping context creators for token: ' + token)
contexts = flatten([creator.get_contexts(token) for creator in context_creators])
return token, contexts
#multithreaded context mapping
def map_contexts(tokens, context_creators, workers=1):
#single thread
if workers == 1:
return {token: flatten([creator.get_contexts(token) for creator in context_creators]) for token in tokens}
#multiple threads
else:
# res_dict = {}
pool = multi.Pool(workers)
tokens_with_extractors = [(token, context_creators) for token in tokens]
res = pool.map(map_context_creators, tokens_with_extractors)
res_dict = {k:v for k,v in res}
return res_dict
# multithreaded feature extraction
# this is for a dict representation {<tpk>: [<context>,...]}
def token_contexts_to_features(token_contexts, feature_extractors, workers=1):
#single thread
if workers == 1:
return {token: np.vstack( [np.hstack([map_feature_extractor((context, extractor)) for extractor in feature_extractors] ) for context in contexts]) for token, contexts in token_contexts.items()}
#multiple threads
else:
#resulting object
res_dict = {}
pool = multi.Pool(workers)
print("Feature extractors: ", feature_extractors)
for token, contexts in token_contexts.items():
logger.info('Multithreaded - Extracting contexts for token: ' + token + ' -- with ' + str(len(contexts)) + ' contexts...')
#each context is paired with all feature extractors
# context_list = [ (cont, feature_extractors) for cont in contexts ]
extractors_output = []
for extractor in feature_extractors:
context_list = [(cont, extractor) for cont in contexts]
extractors_output.append(np.vstack(pool.map(map_feature_extractor, context_list)))
res_dict[token] = np.hstack(extractors_output)
return res_dict
# feature extraction for categorical features with convertation to one-hot representation
# TODO: this is unused code right now -- remove or update in favor of the new 'data_type' param
def token_contexts_to_features_categorical(token_contexts, feature_extractors, workers=1):
#single thread
if workers == 1:
return {token: [[x for a_list in [map_feature_extractor((context, extractor)) for extractor in feature_extractors] for x in a_list ] for context in contexts] for token, contexts in token_contexts.items()}
#multiple threads
else:
#resulting object
res_dict = {}
pool = multi.Pool(workers)
print("Feature extractors: ", feature_extractors)
for token, contexts in token_contexts.items():
logger.info('Multithreaded - Extracting categorical contexts for token: ' + token + ' -- with ' + str(len(contexts)) + ' contexts...')
#each context is paired with all feature extractors
extractors_output = []
for extractor in feature_extractors:
context_list = [(cont, extractor) for cont in contexts]
extractors_output.append( pool.map(map_feature_extractor, context_list) )
# np.hstack and np.vstack can't be used because lists have objects of different types
intermediate = [ [x[i] for x in extractors_output] for i in range(len(extractors_output[0])) ]
res_dict[token] = [ flatten(sl) for sl in intermediate ]
return res_dict
def feature_names_from_extractor_list(feature_extractors):
"""
get a list of feature names from a list of feature extractors
:param feature_extractors: a list of feature extractors
:return: a list of the feature names for each extractor (think first row of .csv file)
"""
feature_names = [feature_name for feature_extractor in feature_extractors for feature_name in feature_extractor.get_feature_names()]
return feature_names
def tags_from_contexts(token_contexts):
"""
create a dict mapping tokens to their tags
:param token_contexts:
:return: a dict of {<token>: [tag_i, tag_i+1, ...]}
"""
return {token: np.array([context['tag'] for context in contexts]) for token, contexts in token_contexts.items()}
def sync_keys(dict_a, dict_b):
'''
make sure two dicts have the same keys, delete the ones that are different
:param dict_a:
:param dict_b:
:return:
'''
dict_a_keys = set(dict_a.keys())
dict_b_keys = set(dict_b.keys())
for k in dict_a_keys.symmetric_difference(dict_b_keys):
if k in dict_a_keys:
del dict_a[k]
else:
del dict_b[k]
| 7,723 | 39.020725 | 212 | py |
marmot | marmot-master/marmot/learning/sequence_learner.py | # this is an abstract class representing a sequence learner, or 'structured' learner
# implementations wrap various sequence learning tools, in order to provide a consistent interface within Marmot
from abc import ABCMeta, abstractmethod
class SequenceLearner(object):
__metaclass__ = ABCMeta
# subclasses must provide the implementation
@abstractmethod
def fit(self, X, y):
'''
fit a sequence model to data in the format [[seq1_w1, seq1_w2, ...]],
:param X: a list of np.arrays, where each row in each array contains the features for an item in the sequence - X can be viewed as a 3d tensor
:param y: the true labels for each sequence
:return:
'''
pass
@abstractmethod
def predict(self, X):
'''
predict the tag for each item in each sequence
:param X: list of sequences list of np.array
:return: list of lists, where each list contains the predictions for the test sequence
'''
pass | 1,016 | 35.321429 | 150 | py |
marmot | marmot-master/marmot/learning/pystruct_sequence_learner.py | import numpy as np
from marmot.learning.sequence_learner import SequenceLearner
from pystruct.models import ChainCRF
from pystruct.learners import OneSlackSSVM
from pystruct.learners import StructuredPerceptron
# a learner which uses the pystruct library
class PystructSequenceLearner(SequenceLearner):
def __init__(self):
# the model
self.model = ChainCRF(directed=True)
# the learner
self.learner = OneSlackSSVM(model=self.model, C=.1, inference_cache=50, tol=0.1, n_jobs=1)
# self.learner = StructuredPerceptron(model=self.model, average=True)
def fit(self, X, y):
# Train linear chain CRF
self.learner.fit(X, y)
def predict(self, X):
return self.learner.predict(X)
| 752 | 27.961538 | 98 | py |
marmot | marmot-master/marmot/learning/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/util/add_bigram_features.py | def add_bigram_features(features, labels):
'''
Enhance feature set with features that consist
of a feature + label of previous word
E.g. from a set of features ['NN', 'Noun', 3]
create a set ['NN_OK', 'Noun_OK', '3_OK']
'''
assert(len(features) == len(labels))
new_features = []
for feat_element, a_label in zip(features, labels):
new_feat_element = []
for a_feat in feat_element:
new_feat_element.append(str(a_feat) + '_' + a_label)
new_features.append(feat_element + new_feat_element)
return new_features
def add_bigram_features_test(features, a_label):
'''
Add previous label for features of one word
This is used as a replacement of add_bigram_features procedure
for test, where only one previous label at a time
is available.
'''
new_features = []
for a_feat in features:
new_features.append(str(a_feat) + '_' + a_label)
return features + new_features
| 982 | 30.709677 | 66 | py |
marmot | marmot-master/marmot/util/alignments.py | import os
import sys
import shutil
from subprocess import Popen
from marmot.util.force_align import Aligner
from marmot.experiment.import_utils import mk_tmp_dir
def train_alignments(src_train, tg_train, tmp_dir, align_model='align_model'):
cdec = os.environ['CDEC_HOME']
if cdec == '':
sys.stderr.write('No CDEC_HOME variable found. Please install cdec and/or set the variable\n')
return ''
if src_train == '' or tg_train == '':
sys.stderr.write('No parallel corpus for training\n')
return ''
# join source and target files
tmp_dir = mk_tmp_dir(tmp_dir)
shutil.copy(src_train, tmp_dir)
shutil.copy(tg_train, tmp_dir)
joint_name = os.path.join(tmp_dir, os.path.basename(src_train) + '_' + os.path.basename(tg_train))
src_tg_file = open(joint_name, 'w')
get_corp = Popen([cdec+'/corpus/paste-files.pl', src_train, tg_train], stdout=src_tg_file)
get_corp.wait()
src_tg_file.close()
src_tg_clean = open(joint_name+'.clean', 'w')
clean_corp = Popen([cdec+'/corpus/filter-length.pl', joint_name], stdout=src_tg_clean)
clean_corp.wait()
src_tg_clean.close()
align_model_full = os.path.join(tmp_dir, align_model)
# train the alignment model
fwd_align = open(align_model_full+'.fwd_align', 'w')
rev_align = open(align_model_full+'.rev_align', 'w')
fwd_err = open(align_model_full+'.fwd_err', 'w')
rev_err = open(align_model_full+'.rev_err', 'w')
fwd = Popen([cdec+'/word-aligner/fast_align', '-i'+joint_name+'.clean', '-d', '-v', '-o', '-p'+align_model_full+'.fwd_params'], stdout=fwd_align, stderr=fwd_err)
rev = Popen([cdec+'/word-aligner/fast_align', '-i'+joint_name+'.clean', '-r', '-d', '-v', '-o', '-p'+align_model_full+'.rev_params'], stdout=rev_align, stderr=rev_err)
fwd.wait()
rev.wait()
fwd_align.close()
rev_align.close()
fwd_err.close()
rev_err.close()
return align_model_full
def align_sentence(src_line, tg_line, align_model):
# TODO: there is an error here if one or both fields are missing -- we cannot align a sentence without both src_line and tg_line
# throw an error prompting the user to specify another dataset or context creator
# if not src_line or not tg_line:
cur_alignments = [[] for i in range(len(tg_line))]
aligner = Aligner(align_model+'.fwd_params', align_model+'.fwd_err', align_model+'.rev_params', align_model+'.rev_err')
align_str = aligner.align(' '.join(src_line)+u' ||| '+' '.join(tg_line))
# parse the return value from the aligner
for pair in align_str.split():
pair = pair.split('-')
cur_alignments[int(pair[1])].append(int(pair[0]))
aligner.close()
return cur_alignments
def align_files(src_file, tg_file, align_model, align_file):
'''
align 2 files and put the alignments in a new file
:align_model: - alignment model prefix
:align_file: - new file to store the alignments
'''
aligner = Aligner(align_model+'.fwd_params', align_model+'.fwd_err', align_model+'.rev_params', align_model+'.rev_err')
align_out = open(align_file, 'w')
for src_line, tg_line in zip(open(src_file), open(tg_file)):
align_out.write('%s\n' % aligner.align(src_line[:-1].decode('utf-8') + u' ||| ' + tg_line[:-1].decode('utf-8')))
aligner.close()
align_out.close()
| 3,351 | 39.385542 | 171 | py |
marmot | marmot-master/marmot/util/extract_syntactic_features.py | # Extract syntactic sentence-level features from the output of Stanford parser
# Parser should be run using the following command:
#
# for English:
# java -mx3g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLP -file <INPUT> -outputFormat xml -annotators tokenize,ssplit,pos,depparse
# for German:
# java -mx3g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLP -props StanfordCoreNLP-german.properties -file <INPUT> -outputFormat xml -annotators tokenize,ssplit,pos,depparse
#
# in directory /export/tools/varvara/stanford_compiled/stanford-corenlp-full-2015-01-30
from __future__ import print_function, division
import xml.etree.ElementTree as ET
from subprocess import call
import numpy as np
import time
import sys
import os
def call_stanford(data_src, tmp_dir):
cur_dir = os.getcwd()
# call Stanford
os.chdir('/export/tools/varvara/stanford_compiled/stanford-corenlp-full-2015-01-30')
sys.stderr.write('Changed to Stanford dir\n')
sys.stderr.write('Cur dir: %s\n' % os.getcwd())
parsed_src_name = os.path.join(tmp_dir, os.path.basename(data_src) + '.xml')
sys.stderr.write('Output file will be: %s\n' % parsed_src_name)
syntactic_command_src = "java -Xmx10g -mx3g -cp '*' edu.stanford.nlp.pipeline.StanfordCoreNLP -file %s -outputFormat xml -annotators tokenize,ssplit,pos,depparse -ssplit.eolonly true -outputDirectory %s" % (data_src, tmp_dir)
# write syntactic command to file
command_file_name = 'tagger_run.' + str(time.time())
sys.stderr.write('Parsing command:\n%s\n' % syntactic_command_src)
command_file = open(command_file_name, 'w')
command_file.write('%s\n' % syntactic_command_src)
command_file.close()
call(['bash', command_file_name])
os.remove(command_file_name)
os.chdir(cur_dir)
return parsed_src_name
def call_parzu(data_tg, tmp_dir):
cur_dir = os.getcwd()
# call ParZu
os.chdir('/export/tools/varvara/ParZu')
syntactic_command_tg = "./parzu -i tokenized_lines"
parsed_tg_name = os.path.join(tmp_dir, os.path.basename(data_tg) + '.parzu')
parsed_tg = open(parsed_tg_name, 'w')
call(syntactic_command_tg.split(), stdin=open(data_tg), stdout=parsed_tg)
parsed_tg.close()
os.chdir(cur_dir)
return parsed_tg_name
def go_down(idx, cur_dep, dependencies):
if idx not in dependencies:
return cur_dep
return get_depth(dependencies, idx, cur_dep)
def get_depth(dependencies, root, cur_dep):
max_dep = 0
for arrow in dependencies[root]:
new_dep = go_down(arrow['id'], cur_dep + 1, dependencies)
max_dep = new_dep if new_dep > max_dep else max_dep
return max_dep
# return: maximum depth of the tree,
# average depth
# proportion of internal nodes
def get_paths(dependencies, sentence_id):
all_paths = []
# find list of all tokens and root
tokens = set() # list of all tokens
dep_tokens = [] # tokens that have dependencies
internal_tokens = [] # internal nodes of the tree
for t in dependencies:
tokens.add(t)
internal_tokens.append(t)
for dep in dependencies[t]:
tokens.add(dep['id'])
dep_tokens.append(dep['id'])
tokens = list(tokens)
root_list = [t for t in tokens if t not in dep_tokens]
assert(len(root_list) == 1), "Wrong number of roots: {}, sentence_id: {}, dependencies: {}".format(len(root_list), sentence_id, dependencies)
root = root_list[0]
internal_tokens.remove(root)
# all paths
for t in tokens:
# use only leaves - tokens with no outcoming dependencies
if t in dependencies:
continue
cur_dep = 0
cur_head = t
while cur_head != root:
for head in dependencies:
for dep in dependencies[head]:
if dep['id'] == cur_head:
cur_dep += 1
cur_head = head
all_paths.append(cur_dep)
try:
features = [max(all_paths), np.average(all_paths), len(internal_tokens)/len(tokens)]
except ValueError as ex:
sys.stderr.write('%s\n' % ' '.join([str(e) for e in ex]))
sys.stderr.write("In sentence %s\n" % (sentence_id))
sys.exit()
return features
def get_width(dependencies, root, sentence_id):
assert(root in dependencies), "Wrong root: {}, dependencies: {}, sentence: {}".format(root, dependencies, sentence_id)
return len(dependencies[root])
def get_connection_features(dependencies, token_pos, language=None):
# clauses inventory
clauses_en = ['advcl', 'ccomp', 'pcomp', 'rcmod']
clauses_de = ['neb', 'objc', 'par', 'rel']
if language == 'en':
clauses = clauses_en
elif language == 'de':
clauses = clauses_de
else:
clauses = clauses_en + clauses_de
# subjects inventory
subject = ['nsubj', 'nsubjpass', 'subj']
verbs = 'V'
# number of subjects, number of verbs with dependent subject, number of dependent clauses
n_subj, n_verb_subj, n_clauses = 0, 0, 0
for head in dependencies:
for dep in dependencies[head]:
if dep['type'] in clauses:
n_clauses += 1
if dep['type'] in subject:
n_subj += 1
if token_pos[head].startswith(verbs):
n_verb_subj += 1
return [n_subj, n_verb_subj, n_clauses]
def get_pos(token_pos):
# POS tags for verbs, nouns, conjunctions
verbs = 'V'
nouns = 'N'
conjunctions = 'CC'
conj_de = 'konj'
# number of verbs, nouns, conjunctions
n_verbs, n_nouns, n_conj = 0, 0, 0
# sentence starts with a verb
start_verb = 0
for t in token_pos:
token_str = token_pos[t]
if token_str.startswith(verbs):
n_verbs += 1
if token_str.startswith(nouns):
n_nouns += 1
if token_str.startswith(conjunctions):
n_conj += 1
if token_str.startswith(conj_de):
n_conj += 1
if token_pos[1].startswith(verbs):
start_verb = 1
return [n_verbs, n_nouns, n_conj, start_verb]
# this procedure parses Stanford CoNLL output
# ROOT is a nonterminal
# which has one dependant -- actual root (predicate)
# all other words are connected to the terminal root
def parse_xml(xml_output):
tree = ET.parse(xml_output)
root_el = tree.getroot()
sentences = []
for idx, sent in enumerate(root_el.getchildren()[0].getchildren()[0].getchildren()):
if idx % 1000 == 0:
sys.stderr.write('.')
sent_tokens = {}
sent_token_pos = {}
sent_dependencies = {}
sent_root = None
sent_id = sent.attrib['id']
for field in sent.getchildren():
# get tokens and their POS
if field.tag == 'tokens':
for token in field.getchildren():
word_id = int(token.attrib['id'])
for tok_field in token.getchildren():
if tok_field.tag == 'word':
sent_tokens[word_id] = tok_field.text
elif tok_field.tag == 'POS':
sent_token_pos[word_id] = tok_field.text
# parse dependencies
elif field.tag == 'dependencies' and field.attrib['type'] == 'basic-dependencies':
for dep in field.getchildren():
d_type = dep.attrib['type']
d_head, d_child = None, None
for d_field in dep.getchildren():
if d_field.tag == 'governor':
d_head = int(d_field.attrib['idx'])
elif d_field.tag == 'dependent':
d_child = int(d_field.attrib['idx'])
if d_head is None or d_child is None:
sys.stderr.write("Wrong dependency format\n")
sys.exit()
if d_head == 0:
sent_root = d_child
sent_dependencies[d_child] = []
else:
if d_head-1 not in sent_dependencies:
sent_dependencies[d_head-1] = []
sent_dependencies[d_head-1].append({'id': d_child-1, 'type': d_type})
sentences.append({'tokens': sent_tokens, 'pos': sent_token_pos, 'dependencies': sent_dependencies, 'root': sent_root, 'id': sent_id})
return sentences
# this is valid only for parsing the output of ParZu
# it generates a tree where ROOT is a nonterminal
# all predicates and punctuation marks are connected to it
def parse_conll(conll_file):
sentences = []
sent_tokens = {}
sent_token_pos = {}
sent_dependencies = {}
sent_root = 0
sent_id = 0
for line in open(conll_file):
if line == '\n' and len(sent_tokens) > 0:
sentences.append({'tokens': sent_tokens, 'pos': sent_token_pos, 'dependencies': sent_dependencies, 'root': sent_root, 'id': sent_id})
sent_tokens = {}
sent_token_pos = {}
sent_dependencies = {}
sent_id += 1
continue
chunks = line.decode('utf-8').strip('\n').split('\t')
word_id = int(chunks[0])
sent_tokens[word_id] = chunks[1]
sent_token_pos[word_id] = chunks[4]
sent_root = 0
d_head = int(chunks[6])
if d_head-1 not in sent_dependencies:
sent_dependencies[d_head-1] = []
sent_dependencies[d_head-1].append({'id': word_id-1, 'type': chunks[7]})
if len(sent_tokens) > 0:
sentences.append({'tokens': sent_tokens, 'pos': sent_token_pos, 'dependencies': sent_dependencies, 'root': sent_root, 'id': sent_id})
return sentences
def features_one_lang(sentences, language=None):
all_features = []
for idx, sent in enumerate(sentences):
if idx % 100 == 0:
sys.stderr.write('.')
sent_features = []
sent_features.extend(get_paths(sent['dependencies'], sent['id']))
sent_features.append(get_width(sent['dependencies'], sent['root'], sent['id']))
sent_features.extend(get_connection_features(sent['dependencies'], sent['pos'], language=language))
sent_features.extend(get_pos(sent['pos']))
all_features.append(sent_features)
return all_features
def extract_syntactic_features(file_src, file_tg, output_file, ext_src='xml', ext_tg='conll'):
sys.stderr.write('Parse source file %s\n' % file_src)
if ext_src == 'xml':
sentences_src = parse_xml(file_src)
elif ext_src == 'conll':
sentences_src = parse_conll(file_src)
sys.stderr.write('Parse target file %s\n' % file_tg)
if ext_tg == 'xml':
sentences_tg = parse_xml(file_tg)
elif ext_tg == 'conll':
sentences_tg = parse_conll(file_tg)
sys.stderr.write("Extract source features\n")
all_features_src = features_one_lang(sentences_src, language='en')
sys.stderr.write("Extract target features\n")
all_features_tg = features_one_lang(sentences_tg, language='de')
sys.stderr.write("Write syntactic features\n")
output = open(output_file, 'w')
for feat_src, feat_tg in zip(all_features_src, all_features_tg):
output.write('%s\t%s\n' % ('\t'.join([str(f) for f in feat_src]), '\t'.join([str(f) for f in feat_tg])))
if __name__ == "__main__":
language = None
if len(sys.argv) == 3:
language = sys.argv[2]
sys.stderr.write("Language -- %s\n" % str(language))
if sys.argv[1].endswith('xml'):
sys.stderr.write("Parsing xml file %s" % sys.argv[1])
sentences = parse_xml(sys.argv[1])
else:
sys.stderr.write("Parsing conll file %s" % sys.argv[1])
sentences = parse_conll(sys.argv[1])
sys.stderr.write("Parsing finished")
all_features = []
for idx, sent in enumerate(sentences):
if idx % 100 == 0:
sys.stderr.write('.')
sent_features = []
sent_features.extend(get_paths(sent['dependencies'], sent['id']))
sent_features.append(get_width(sent['dependencies'], sent['root'], sent['id']))
sent_features.extend(get_connection_features(sent['dependencies'], sent['pos'], language=language))
sent_features.extend(get_pos(sent['pos']))
all_features.append(sent_features)
for feat_list in all_features:
sys.stdout.write('%s\n' % '\t'.join([str(f) for f in feat_list]))
| 12,459 | 38.935897 | 229 | py |
marmot | marmot-master/marmot/util/persist_features.py | # persist features to file
# feature output formats are different depending upon the datatype
# if it's an ndarray, write to .csv
# if it's an list of lists, write to crf++ format, with a separate file containing the feature names
# if it's a dict, write to .json or pickle the object(?), write the feature names to a separate file
import os
import sys
import errno
import pandas as pd
import numpy as np
import logging
from marmot.experiment.import_utils import list_of_lists
from marmot.util.generate_crf_template import generate_crf_template
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('experiment_logger')
# write list of lists to file in CRF++ format (one item per line, empty line between sequences)
def write_lofl(lofl, filename):
a_file = open(filename, 'w')
for seq in lofl:
for it in seq:
a_file.write('%s\n' % str(it))
a_file.write('\n')
a_file.close()
# convert an arbitrary feature value to string
def val_to_str(f_val):
if type(f_val) is str:
return f_val
elif type(f_val) is unicode:
return f_val.encode('utf-8')
else:
return str(f_val)
# <word_tags> -- list of sequences of word-level tags
# if specified - should be saved to a separate file in CRF++ format
# <phrase_lengths> -- list of phrase lengths
# needed to be able to restore word-level tags from phrase-level
# if specified - should be saved to a separate file in CRF++ format
# TODO: check if matches the number of phrases
def persist_features(dataset_name, features, persist_dir, tags=None, feature_names=None, phrase_lengths=None, file_format='crf_suite'):
'''
persist the features to persist_dir -- use dataset_name as the prefix for the persisted files
:param dataset_name: prefix of the output file
:param features: dataset
:param persist_dir: directory of output file(s)
:param tags: tags for the dataset
:param feature_names: names of features in the dataset
:param file_format: format of the output file for sequences. Values -- 'crf++', 'crf_suite', 'svm_light'
:return:
'''
try:
os.makedirs(persist_dir)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(persist_dir):
pass
else:
raise
if file_format == 'crf_suite' and feature_names is None:
print("Feature names are required to save features in CRFSuite and SVMLight formats")
return
# for the 'plain' datatype
if type(features) == np.ndarray and features.shape[1] == len(feature_names):
output_df = pd.DataFrame(data=features, columns=feature_names)
output_path = os.path.join(persist_dir, dataset_name + '.csv')
output_df.to_csv(output_path, index=False)
logger.info('saved features in: {} to file: {}'.format(dataset_name, output_path))
# for the 'sequential' datatype
elif list_of_lists(features):
if file_format == 'svm_light':
feature_names = range(1, len(features[0]) + 1)
output_path = os.path.join(persist_dir, dataset_name + '.svm')
output = open(output_path, 'w')
tags_map = {'OK': '+1', 'BAD': '-1'}
for a_tag, feat_seq in zip(tags, features):
feat_list = []
for f_name, f_val in zip(feature_names, feat_seq):
try:
if float(f_val) != 0.0:
feat_list.append(str(f_name) + ':' + val_to_str(f_val))
except ValueError:
feat_list.append(str(f_name) + ':' + val_to_str(f_val))
output.write("%s %s\n" % (tags_map[a_tag], ' '.join(feat_list)))
return
output_path = os.path.join(persist_dir, dataset_name + '.crf')
output = open(output_path, 'w')
if tags is not None:
assert(len(features) == len(tags)), "Different numbers of tag and feature sequences"
for s_idx, (seq, tag_seq) in enumerate(zip(features, tags)):
assert(len(seq) == len(tag_seq)), "Lengths of tag and feature sequences don't match in sequence {}: {} and {} ({} and {})".format(s_idx, len(seq), len(tag_seq), seq, tag_seq)
for w_idx, (feature_list, tag) in enumerate(zip(seq, tag_seq)):
if len(feature_list) != len(feature_names):
print(feature_list)
print(feature_names)
sys.exit()
tag = str(tag)
feature_str = []
for f in feature_list:
if type(f) == unicode:
feature_str.append(f.encode('utf-8'))
# else:
# feature_str.append(str(f))
else:
feature_str.append(f)
if file_format == 'crf++':
feature_str = '\t'.join([str(f) for f in feature_str])
output.write('%s\t%s\n' % (feature_str, tag))
elif file_format == 'crf_suite':
feature_str_all = []
for i in range(len(feature_str)):
# if isinstance(feature_str[i], (int, float, np.float32, np.float64, np.int32, np.int64)):
# feature_str_all.append(feature_names[i] + '=1:' + str(feature_str[i]))
# else:
feature_str_all.append(feature_names[i] + '=' + str(feature_str[i]))
# feature_str = [feature_names[i] + '=' + feature_str[i] for i in range(len(feature_str))]
feature_str = '\t'.join(feature_str_all)
output.write("%s\t%s\n" % (tag, feature_str))
else:
print("Unknown data format:", file_format)
return False
output.write("\n")
else:
for s_idx, seq in enumerate(features):
for w_idx, feature_list in enumerate(seq):
#assert(len(seq) == len(feature_names)), "Wrong number of features in sequence %d, word %d" % (s_idx, w_idx)
feature_str = []
for f in feature_list:
if type(f) == unicode:
feature_str.append(f.encode('utf-8'))
# else:
# feature_str.append(str(f))
else:
feature_str.append(f)
if file_format == 'crf++':
feature_str = '\t'.join([str(f) for f in feature_str])
elif file_format == 'crf_suite':
# feature_str = [feature_names[i] + '=' + feature_str[i] for i in range(len(feature_str))]
feature_str_all = []
for i in range(len(feature_str)):
# if isinstance(feature_str[i], (int, float, np.float32, np.float64, np.int32, np.int64)):
# feature_str_all.append(feature_names[i] + '=1:' + str(feature_str[i]))
# else:
feature_str_all.append(feature_names[i] + '=' + str(feature_str[i]))
feature_str = '\t'.join(feature_str_all)
else:
print("Unknown data format:", file_format)
return False
output.write("%s\n" % feature_str)
output.write("\n")
if feature_names is not None:
output_features = open(os.path.join(persist_dir, dataset_name + '.features'), 'w')
for f_name in feature_names:
output_features.write("%s\n" % f_name.encode('utf-8'))
output_features.close()
output.close()
# write phrase lengths
if phrase_lengths is not None:
write_lofl(phrase_lengths, os.path.join(persist_dir, dataset_name + '.phrase-lengths'))
# generate CRF++ template
if file_format == 'crf++':
feature_num = len(features[0][0])
generate_crf_template(feature_num, tmp_dir=persist_dir)
return output_path
| 8,421 | 47.682081 | 190 | py |
marmot | marmot-master/marmot/util/random_context_creator.py | import random
from context_creator import ContextCreator
# returns a random TARGET context for the wordset and parameters supplied to the constructor
class RandomContextCreator(ContextCreator):
def __init__(self, word_list, num_contexts=5000, length_bounds=[6,12], tagset=set([0])):
self.word_list = set(word_list)
self.num_contexts = num_contexts
self.length_bounds = length_bounds
self.tagset = set(tagset)
def get_contexts(self, token):
return [self.build_context_obj(token) for i in range(self.num_contexts)]
def random_context(self):
rand_length = random.randint(self.length_bounds[0],self.length_bounds[1])
# casting the set to a tuple makes this faster apparently
rand_words = [random.choice(tuple(self.word_list)) for i in range(rand_length)]
return rand_words
def build_context_obj(self, token):
rand_context = self.random_context()
# get the index of the token after we know the length of the random context
rand_idx = random.randint(0, len(rand_context)-1)
# substitute that index for our token
rand_context[rand_idx] = token
# casting the set to a tuple makes this faster apparently
random_tag = random.choice(tuple(self.tagset))
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
new_obj = { 'token': token, 'index': rand_idx, 'target': rand_context, 'tag': random_tag }
return new_obj
| 1,529 | 41.5 | 112 | py |
marmot | marmot-master/marmot/util/pos_tagging.py | import os
import time
from subprocess import Popen
def get_random_name(self, suffix=''):
return 'tmp_'+suffix+str(time.time())
def get_pos_tagging(src, tagger, par_file, tmp_dir):
print tmp_dir
# tokenize and add the sentence end marker#
# tokenization is done with nltk
tmp_tokenized_name = os.path.join(tmp_dir, get_random_name('tok'))
tmp_tok = open(tmp_tokenized_name, 'wr+')
for words in src:
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = os.path.join(tmp_dir, get_random_name('tag'))
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
# TODO: if it's not of this format, it could be the end of sequence (empty string) or an error
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
# delete all temporary files
os.remove(tmp_tokenized_name)
os.remove(tmp_tagged_name)
return output
# same as get_pos_tagging() but reads the corpus from file
def get_pos_tagging_file(src_file, tagger, par_file, tmp_dir):
print tmp_dir
# tokenize and add the sentence end marker#
# tokenization is done with nltk
tmp_tokenized_name = os.path.join(tmp_dir, get_random_name('tok'))
tmp_tok = open(tmp_tokenized_name, 'wr+')
for line in open(src_file):
words = line.strip('\n').decode('utf-8').split()
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = os.path.join(tmp_dir, get_random_name('tag'))
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
# TODO: if it's not of this format, it could be the end of sequence (empty string) or an error
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
# delete all temporary files
os.remove(tmp_tokenized_name)
os.remove(tmp_tagged_name)
return output
| 3,105 | 32.042553 | 102 | py |
marmot | marmot-master/marmot/util/context_creator.py | from abc import ABCMeta, abstractmethod
# this is an abstract class which extracts contexts according to a user-provided implementation
# a negative context is a context that is representative of a WRONG usage of a word
# a negative context for a word may have nothing to do with a positive context (i.e. it may just be random)
class ContextCreator(object):
__metaclass__ = ABCMeta
# subclasses must provide the implementation
@abstractmethod
def get_contexts(self, token, max_size=None):
pass
| 523 | 31.75 | 107 | py |
marmot | marmot-master/marmot/util/generate_crf_template.py | from __future__ import print_function
import os
# generates a template for crf++ feature extractor: all columns will be used as features,
# no combinations of columns, no contexts (it should already be in original feature set)
def generate_crf_template(feature_num, template_name='template', tmp_dir='tmp_dir'):
if not os.path.isdir(tmp_dir):
print("Wrong temporary directory: ", tmp_dir)
return
template = open(os.path.join(tmp_dir, template_name), 'w')
print("Saving template to ", os.path.join(tmp_dir, template_name))
template.write('# Unigram\n')
for i in range(feature_num):
a_str = 'U{}:%x[0,{}]'.format(i, i)
template.write('%s\n' % a_str)
template.write('\n# Bigram\nB')
template.close()
| 760 | 39.052632 | 90 | py |
marmot | marmot-master/marmot/util/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/util/simple_corpus.py | #!/usr/bin/env python
#encoding: utf-8
from __future__ import division, print_function
from gensim import utils, corpora
import numpy as np
import codecs
from nltk.tokenize import word_tokenize, WhitespaceTokenizer
from scipy import sparse
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('testlogger')
class SimpleCorpus(corpora.TextCorpus):
# make sure that the token2id dict gets created
def __init__(self, corpus_file):
# logger.info('corpus file:'
# print(type(corpus_file))
corpus_file = codecs.open(corpus_file, encoding='utf8')
super(SimpleCorpus, self).__init__(corpus_file)
self.dictionary.id2token = {v: k for k,v in self.dictionary.token2id.items()}
def get_texts(self):
"""
Parse documents from the file provided in the constructor.
format: one document per line.
nltk is used for tokenization
"""
with self.getstream() as stream:
for doc in stream:
# yield [word for word in word_tokenize(utils.to_unicode(doc).lower())]
yield [word for word in word_tokenize(utils.to_unicode(doc))]
def get_texts_raw(self):
"""
Parse documents analogously to SimpleCorpus.get_texts(),
but tokenized by whitespace only
"""
wst = WhitespaceTokenizer()
with self.getstream() as stream:
for doc in stream:
yield [word for word in wst.tokenize(utils.to_unicode(doc))]
def __len__(self):
"""Define this so we can use `len(corpus)`"""
if 'length' not in self.__dict__:
self.length = sum(1 for doc in self.get_texts())
return self.length
# get a one-hot representation as a numpy array using the corpus indices
def binary_vec_from_list(self, token_list):
vec = np.zeros(len(self.dictionary.keys()), dtype=np.int)
if token_list is not None:
for tok in token_list:
try:
col_idx = self.dictionary.token2id[tok]
vec[col_idx] = 1
except KeyError:
pass
vvec = sparse.csr_matrix( vec )
# print( "Binary vector: ", vvec.shape )
return vvec
# build a corpus from a file (one document per line)
def build_corpus(filename):
return SimpleCorpus(filename)
| 2,452 | 31.706667 | 91 | py |
marmot | marmot-master/marmot/util/ngram_window_extractor.py | #!/usr/bin/env python
#encoding: utf-8
'''
@author: Chris Hokamp
@contact: [email protected]
'''
# this only finds the first instance of the token in the sentence (see the idx= keyword arg of the extract_window function)
def locate_token(token, sentence):
try:
i = sentence.index(token)
return i
except ValueError:
return -1
# window_size is the window on both left and right of the token of interest
def extract_window(token_list, token, window_size=1, with_token=True, idx=None):
index = idx or locate_token(token, token_list)
if index != -1:
if with_token:
context_args = [token_list, token, window_size, index]
window = left_context(*context_args) + [token] + right_context(*context_args)
return window
else:
context_args = [token_list, token, window_size, index]
window = left_context(*context_args) + right_context(*context_args)
return window
return None
def left_context(token_list, token, context_size=1, idx=None):
index = idx or locate_token(token, token_list)
left_window = []
if index != -1:
for i in range(index-context_size, index):
if i < 0:
# left_window.append('_START_')
left_window.append('<s>')
else:
left_window.append(token_list[i])
return left_window
def right_context(token_list, token, context_size=1, idx=None):
index = idx or locate_token(token, token_list)
right_window = []
if index != -1:
for i in range(index+1, index+context_size+1):
if i > len(token_list)-1:
# right_window.append('_END_')
right_window.append('</s>')
else:
right_window.append(token_list[i])
return right_window
| 1,850 | 30.372881 | 123 | py |
marmot | marmot-master/marmot/util/corpus_context_creator.py | # a corpus_context_creator gets its contexts from a corpus of instances
# the list of contexts presumably come from the parser for this particular corpus
from collections import defaultdict
from context_creator import ContextCreator
class CorpusContextCreator(ContextCreator):
"""
build a corpus from a list of context_obj
- a corpus is indexed by the 'token' field of the context objects
"""
# { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
def __init__(self, all_contexts, max_instances=10000):
self.context_map = defaultdict(list)
for context in all_contexts:
if max_instances is not None:
if len(self.context_map[context['token']]) <= max_instances:
self.context_map[context['token']].append(context)
else:
self.context_map[context['token']].append(context)
def get_contexts(self, token, max_size=None):
if max_size is not None:
return self.context_map[token][:max_size]
else:
return self.context_map[token]
# TODO: get a list of contexts from a list of tokens
| 1,195 | 33.171429 | 108 | py |
marmot | marmot-master/marmot/util/force_align.py | #!/usr/bin/env python
#
# This code is partially taken from the force_align.py script of cdec project
#
import os
import subprocess
import sys
import threading
# Simplified, non-threadsafe version for force_align.py
# Use the version in realtime for development
class Aligner:
def __init__(self, fwd_params, fwd_err, rev_params, rev_err, heuristic='grow-diag-final-and'):
if not os.environ.has_key('CDEC_HOME'):
sys.stderr.write('CDEC_HOME not specified\n')
sys.exit(2)
cdec_root = os.environ['CDEC_HOME']
fast_align = os.path.join(cdec_root, 'word-aligner', 'fast_align')
atools = os.path.join(cdec_root, 'utils', 'atools')
(fwd_T, fwd_m) = self.read_err(fwd_err)
(rev_T, rev_m) = self.read_err(rev_err)
fwd_cmd = [fast_align, '-i', '-', '-d', '-T', fwd_T, '-m', fwd_m, '-f', fwd_params]
rev_cmd = [fast_align, '-i', '-', '-d', '-T', rev_T, '-m', rev_m, '-f', rev_params, '-r']
tools_cmd = [atools, '-i', '-', '-j', '-', '-c', heuristic]
self.fwd_align = popen_io(fwd_cmd)
self.rev_align = popen_io(rev_cmd)
self.tools = popen_io(tools_cmd)
def align(self, line):
self.fwd_align.stdin.write('{}\n'.format(line.encode('utf-8')))
self.rev_align.stdin.write('{}\n'.format(line.encode('utf-8')))
# f words ||| e words ||| links ||| score
fwd_line = self.fwd_align.stdout.readline().split('|||')[2].strip()
rev_line = self.rev_align.stdout.readline().split('|||')[2].strip()
self.tools.stdin.write('{}\n'.format(fwd_line))
self.tools.stdin.write('{}\n'.format(rev_line))
al_line = self.tools.stdout.readline().strip()
return al_line
def close(self):
self.fwd_align.stdin.close()
self.fwd_align.wait()
self.rev_align.stdin.close()
self.rev_align.wait()
self.tools.stdin.close()
self.tools.wait()
def read_err(self, err):
(T, m) = ('', '')
for line in open(err):
# expected target length = source length * N
if 'expected target length' in line:
m = line.split()[-1]
# final tension: N
elif 'final tension' in line:
T = line.split()[-1]
return (T, m)
def popen_io(cmd):
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def consume(s):
for _ in s:
pass
threading.Thread(target=consume, args=(p.stderr,)).start()
return p
def main():
if len(sys.argv[1:]) < 4:
sys.stderr.write('run:\n')
sys.stderr.write(' fast_align -i corpus.f-e -d -v -o -p fwd_params >fwd_align 2>fwd_err\n')
sys.stderr.write(' fast_align -i corpus.f-e -r -d -v -o -p rev_params >rev_align 2>rev_err\n')
sys.stderr.write('\n')
sys.stderr.write('then run:\n')
sys.stderr.write(' {} fwd_params fwd_err rev_params rev_err [heuristic] <in.f-e >out.f-e.gdfa\n'.format(sys.argv[0]))
sys.stderr.write('\n')
sys.stderr.write('where heuristic is one of: (intersect union grow-diag grow-diag-final grow-diag-final-and) default=grow-diag-final-and\n')
sys.exit(2)
aligner = Aligner(*sys.argv[1:])
while True:
line = sys.stdin.readline()
if not line:
break
line = line[:-1].decode('utf-8')
sys.stdout.write('{}\n'.format(aligner.align(line.strip())))
sys.stdout.flush()
aligner.close()
if __name__ == '__main__':
main()
| 3,582 | 33.786408 | 148 | py |
marmot | marmot-master/marmot/util/call_alignment.py | from __future__ import print_function
import sys
from marmot.util.alignments import align_files
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: python call_alignment.py src_file tg_file model")
sys.exit()
align_files(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[1]+'.align')
| 320 | 28.181818 | 76 | py |
marmot | marmot-master/marmot/util/tests/test_random_context_creator.py | import unittest, os
from marmot.util.random_context_creator import RandomContextCreator
class TestRunExperiment(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
self.target_vocabulary = set(['one', 'two', 'three', 'four', 'five'])
# def __init__(self, word_list, length_bounds=[6,12], tagset=set([0])):
self.random_cc = RandomContextCreator(self.target_vocabulary, num_contexts=10)
def test_get_contexts(self):
a_context = self.random_cc.get_contexts('apple')
print(a_context)
# we initialized with num_contexts=10
self.assertTrue(len(a_context) == 10)
if __name__ == '__main__':
unittest.main()
| 743 | 34.428571 | 86 | py |
marmot | marmot-master/marmot/util/tests/__init__.py | __author__ = 'chris'
| 21 | 10 | 20 | py |
marmot | marmot-master/marmot/util/tests/test_context_creator.py | # TODO: stub - implement
import unittest, os
from marmot.util.corpus_context_creator import CorpusContextCreator
class TestRunExperiment(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
# create the set of tokens we're interested in
self.important_tokens = set(['and', 'the'])
# create a testing dataset
test_contexts = [ \
{'index': 17, 'token': u'and', 'tag': 1, 'target': [u'so', u',', u'ladies', u'and', u'gentlemen', u',', u'i', u'should', u'like', u'in', u'a', u'moment', u'to', u'return', u'to', u'the', u'role', u'and', u'structure', u'of', u'the', u'guidelines', u'before', u'mentioning', u'the', u'principal', u'comments', u'and', u'criticisms', u'that', u'you', u',', u'mrs', u'schroedter', u',', u'and', u'the', u'various', u'members', u'of', u'this', u'house', u',', u'have', u'made', u'.'], 'source': None}, \
{'index': 3, 'token': u'and', 'tag': 1, 'target': [u'genuine', u'structural', u'reforms', u'and', u'a', u'competition', u'-', u'friendly', u'taxation', u'policy', u'are', u'the', u'cornerstones', u'of', u'a', u'successful', u'economic', u'base', u'.'], 'source': None}, \
{'index': 23, 'token': u'and', 'tag': 1, 'target': [u'even', u'the', u'accumulation', u'of', u'money', u'from', u'the', u'cohesion', u'funds', u'and', u'the', u'structural', u'funds', u'has', u'failed', u'to', u'have', u'the', u'desired', u'effect', u'in', u'all', u'regions', u'and', u'countries', u'.'], 'source': None}, \
{'index': 34, 'token': u'and', 'tag': 1, 'target': [u'the', u'commission', u'report', u'is', u'essentially', u'a', u'descriptive', u'report', u'detailing', u'the', u'development', u'of', u'state', u'aid', u'in', u'the', u'manufacturing', u'sector', u'and', u'certain', u'other', u'sectors', u',', u'according', u'to', u'various', u'typologies', u',', u'such', u'as', u'the', u'method', u'of', u'financing', u'and', u'the', u'objectives', u'pursued', u'.'], 'source': None}, \
{'index': 5, 'token': u'the', 'tag': 1, 'target': [u'finally', u',', u'we', u'ask', u'that', u'the', u'commission', u'ensures', u'that', u'structural', u'fund', u'monies', u'are', u'spent', u'in', u'a', u'way', u'which', u'is', u'transparent', u'.'], 'source': None}, \
{'index': 10, 'token': u'the', 'tag': 1, 'target': [u'that', u'way', u'the', u'much', u'-', u'trumpeted', u'need', u'for', u'transparency', u'in', u'the', u'use', u'of', u'these', u'funds', u'and', u'the', u'temptation', u'to', u'draw', u'unnecessarily', u'in', u'the', u'longer', u'term', u'on', u'the', u'local', u'tax', u'base', u'in', u'areas', u'where', u'such', u'projects', u'are', u'located', u'will', u'be', u'diminished', u'and', u'the', u'european', u'parliament', u'will', u'show', u'how', u'seriously', u'it', u'takes', u'the', u'need', u'for', u'such', u'reform', u'.'], 'source': None}, \
]
# build a corpus context creator from our contexts
self.corpus_cc = CorpusContextCreator(test_contexts)
def test_get_contexts(self):
and_contexts = self.corpus_cc.get_contexts('and')
# we initialized with num_contexts=10
self.assertTrue(len(and_contexts) == 4)
if __name__ == '__main__':
unittest.main()
| 3,323 | 96.764706 | 615 | py |
marmot | marmot-master/marmot/exceptions/no_data_error.py | class NoDataError(Exception):
def __init__(self, field, obj, module):
message = "Missing field '" + field + "' in the object " + str(obj) + " needed in " + module
super(NoDataError, self).__init__(message)
| 227 | 37 | 100 | py |
marmot | marmot-master/marmot/exceptions/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/exceptions/no_resource_error.py | class NoResourceError(Exception):
def __init__(self, resource, module):
message = "No " + resource + " provided in " + str(module)
super(NoResourceError, self).__init__(message)
| 198 | 38.8 | 66 | py |
marmot | marmot-master/marmot/exceptions/tests/test_features.py | import unittest
import yaml
import os
from marmot.features.alignment_feature_extractor import AlignmentFeatureExtractor
from marmot.features.pos_feature_extractor import POSFeatureExtractor
from marmot.features.google_translate_feature_extractor import GoogleTranslateFeatureExtractor
from marmot.features.source_lm_feature_extractor import SourceLMFeatureExtractor
from marmot.exceptions.no_data_error import NoDataError
from marmot.exceptions.no_resource_error import NoResourceError
class FeatureExtractorErrorTest(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
# test_config = os.path.join(module_path, 'test_data/test_config.yaml')
#
# with open(test_config, "r") as cfg_file:
# self.config = yaml.load(cfg_file.read())
def test_alignment_no_source(self):
alignmentFE = AlignmentFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoDataError):
alignmentFE.get_features(obj)
def test_alignment_no_target(self):
alignmentFE = AlignmentFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'source':[u'un', u'garcon',u'frappe', u'un', u'chien']}
with self.assertRaises(NoDataError):
alignmentFE.get_features(obj)
def test_alignment_no_alignments(self):
alignmentFE = AlignmentFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien']}
with self.assertRaises(NoDataError):
alignmentFE.get_features(obj)
def test_pos_no_source(self):
posFE = POSFeatureExtractor(tagger=os.path.join(self.module_path, '../../experiment/tiny_test/tree-tagger'), par_file_src=os.path.join(self.module_path, '../../experiment/tiny_test/spanish-par-linux-3.2-utf8.bin'), par_file_tg=os.path.join(self.module_path, '../../experiment/tiny_test/english-utf8.par'))
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoDataError):
posFE.get_features(obj)
def test_pos_no_target(self):
posFE = POSFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'source':[u'un', u'garcon',u'frappe', u'un', u'chien']}
with self.assertRaises(NoDataError):
posFE.get_features(obj)
def test_pos_no_tagger(self):
posFE = POSFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoResourceError):
posFE.get_features(obj)
def test_pos_no_tagger_params(self):
posFE = POSFeatureExtractor(tagger='../../experiment/tiny_test/tree-tagger')
obj = {'token':u'hits', 'index':2, 'source':[u'un', u'garcon',u'frappe', u'un', u'chien'], 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoResourceError):
posFE.get_features(obj)
def test_google_no_source(self):
gtFE = GoogleTranslateFeatureExtractor()
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoDataError):
gtFE.get_features(obj)
def test_source_lm_no_source(self):
slmFE = SourceLMFeatureExtractor(os.path.join(self.module_path, '../../experiment/tiny_test/europarl.1000.en'))
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog']}
with self.assertRaises(NoDataError):
slmFE.get_features(obj)
def test_source_lm_no_alignments(self):
slmFE = SourceLMFeatureExtractor(os.path.join(self.module_path, '../../experiment/tiny_test/europarl.1000.en'))
obj = {'token':u'hits', 'index':2, 'target':[u'a',u'boy',u'hits',u'a',u'dog'], 'source':[u'un', u'garcon',u'frappe', u'un', u'chien']}
with self.assertRaises(NoDataError):
slmFE.get_features(obj)
if __name__ == '__main__':
unittest.main()
| 4,172 | 45.88764 | 313 | py |
marmot | marmot-master/marmot/parsers/parser.py | from abc import ABCMeta, abstractmethod
# A parser takes one or more filenames and (optionally) keys
# returns an object containing keys which each point to a list of lists
class Parser(object):
__metaclass__ = ABCMeta
# subclasses must provide the implementation
# the flexible args and kwargs are not ideal here, but we need to keep parsers very flexible
# another problem with this approach is that we don't need to initialize most parsers
# it might be better to use a @classmethod, or @staticmethod
@abstractmethod
def parse(self, *args, **kwargs):
pass
| 598 | 36.4375 | 96 | py |
marmot | marmot-master/marmot/parsers/whitespace_tokenized_parser.py | # parse a whitespace tokenized file, return an object with the user specified key identifying the parsed data
from parser import Parser
import codecs
from nltk.tokenize import WhitespaceTokenizer
class WhitespaceTokenizedParser(Parser):
def parse(self, corpus_filename, key):
assert type(corpus_filename) == str, 'the filename must be a string'
assert type(key) == str, 'the key must be a string'
wst = WhitespaceTokenizer()
with codecs.open(corpus_filename, encoding='utf8') as input:
corpus = [wst.tokenize(l) for l in input]
return {key: corpus}
| 610 | 32.944444 | 109 | py |
marmot | marmot-master/marmot/parsers/parsers.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# A parser takes some input, and returns a list of contexts in the format: { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
# return a context object from an iterable of contexts, and a set of interesting tokens
from marmot.util.simple_corpus import SimpleCorpus
from collections import defaultdict, Counter
from nltk import word_tokenize
# TODO: begin word-specific classifiers
# TODO: this belongs in utils
def extract_important_tokens(corpus_file, min_count=1):
corpus = SimpleCorpus(corpus_file)
word_counts = defaultdict(int)
for context in corpus.get_texts():
for word in context:
word_counts[word] += 1
return set([k for k,v in word_counts.items() if v >= min_count])
# extract important tokens from WMT test format
def extract_important_tokens_wmt(corpus_file, min_count=1):
all_words = []
for line in open(corpus_file):
all_words.append(line.decode('utf-8').split('\t')[2])
word_counts = Counter(all_words)
return set([k for k,v in word_counts.items() if v > min_count])
def create_new_instance(token=None, idx=None, source=None, target=None, label=None):
return {'token': token, 'index': idx, 'source': source, 'target': target, 'tag': label}
# by default, this function returns postive contexts (tag=1), but you can specify other tags if you wish
def list_of_target_contexts(contexts, interesting_tokens, tag=1):
token_contexts = []
for doc in contexts:
for idx, tok in enumerate(doc):
if interesting_tokens is None or tok in interesting_tokens:
token_contexts.append(create_new_instance(tok, idx, target=doc, label=tag))
return token_contexts
# get all of the bad contexts in a list of contexts
def list_of_bad_contexts(contexts, labels, interesting_tokens=None):
token_contexts = []
for doc in contexts:
label_list = [unicode(l) for l in labels.next()]
for (idx, (tok,label)) in enumerate(zip(doc,label_list)):
if (interesting_tokens is None or tok in interesting_tokens) and label == 'B':
token_contexts.append(create_new_instance(tok, idx, target=doc, label=0))
return token_contexts
def parse_corpus_contexts(corpus_file, interesting_tokens=None, tag=1):
corpus = SimpleCorpus(corpus_file)
return list_of_target_contexts(corpus.get_texts(), interesting_tokens, tag=tag)
# TODO: end word-specific classifiers
def get_corpus_file(corpus_file, label):
corpus = SimpleCorpus(corpus_file)
return (label, corpus.get_texts())
from itertools import groupby
import codecs
# matching sentences may require us to include the sen id
# if source is not provided, pass an empty string ('') as <source_file>
# TODO: this doesn't conform to the new parser API -- returns a list of contexts
def parse_wmt14_data(corpus_file, source_file, interesting_tokens=None):
# recover sentences from a .tsv with senids and tokens (wmt14 format)
def group_by_senid(filename):
rows = []
for l in codecs.open(filename, encoding='utf8'):
rows.append(l.rstrip().split('\t'))
sens = []
# group by sentence id and order by word index so that we can extract the contexts
for key, group in groupby(rows, lambda x: x[0]):
sen = list(group)
sens.append(sen)
return sens
def extract_word_exchange_format(wmt_contexts, source=None, interesting_tokens=None):
word_exchange_format = []
for i,context in enumerate(wmt_contexts):
target_sen = [w[2] for w in context]
source_sen = source[i] if source else None
for row in context:
obj_items = []
word = row[2]
obj_items.append(('index', int(row[1])))
obj_items.append(('token', word))
obj_items.append(('tag', row[5]))
obj_items.append(('target', target_sen))
if source:
obj_items.append(('source', source_sen))
if not interesting_tokens or word in interesting_tokens:
word_exchange_format.append({ k:val for (k, val) in obj_items })
return word_exchange_format
sen_groups = group_by_senid(corpus_file)
source_sen_groups = None
if source_file != '':
source_sen_groups = [word_tokenize(line[:-1].split('\t')[1]) for line in codecs.open(source_file, encoding='utf-8') ]
wef = extract_word_exchange_format(sen_groups, source=source_sen_groups, interesting_tokens=interesting_tokens)
return wef
# semeval format
# A parser takes some input, and returns a list of contexts in the format: { 'token': <token>, index: <idx>, 'source': [<source toks>]', 'target': [<target toks>], 'tag': <tag>}
# semeval input looks like: <sen1>TAB<sen2>
# the scores are in a separate *.gs.* file
# TODO: this currently removes stopwords by default (despite the stops=False)
# TODO: this doesn't conform to the new parser API -- returns a list of contexts
import re
import nltk
from nltk.corpus import stopwords
english_stops = stopwords.words('english')
def parse_semeval(inputfile, scoresfile, stops=False):
# this code taken from the takelab 2012 STS framework
def fix_compounds(a, b):
sb = set(x.lower() for x in b)
a_fix = []
la = len(a)
i = 0
while i < la:
if i + 1 < la:
comb = a[i] + a[i + 1]
if comb.lower() in sb:
a_fix.append(a[i] + a[i + 1])
i += 2
continue
a_fix.append(a[i])
i += 1
return a_fix
def load_data(path, scores):
lines = list(open(path))
if scores is not None:
scores = [float(x) for x in open(scores)]
else:
scores = [0. for i in range(len(lines))]
training_data = []
assert len(scores) == len(lines), 'the scores file and the text file should have the same number of lines'
r1 = re.compile(r'\<([^ ]+)\>')
r2 = re.compile(r'\$US(\d)')
for (l, score) in zip(open(path), scores):
l = l.decode('utf-8')
l = l.replace(u'’', "'")
l = l.replace(u'``', '"')
l = l.replace(u"''", '"')
l = l.replace(u"—", '--')
l = l.replace(u"–", '--')
l = l.replace(u"´", "'")
l = l.replace(u"-", " ")
l = l.replace(u"/", " ")
l = r1.sub(r'\1', l)
l = r2.sub(r'$\1', l)
if stops:
sa, sb = tuple(nltk.word_tokenize(s) for s in l.strip().split('\t'))
sa = [w for w in sa if w not in english_stops]
sb = [w for w in sb if w not in english_stops]
else:
sa, sb = tuple(nltk.word_tokenize(s) for s in l.strip().split('\t'))
sa, sb = ([x.encode('utf-8') for x in sa],
[x.encode('utf-8') for x in sb])
for s in (sa, sb):
for i in xrange(len(s)):
if s[i] == "n't":
s[i] = "not"
elif s[i] == "'m":
s[i] = "am"
sa, sb = fix_compounds(sa, sb), fix_compounds(sb, sa)
training_data.append({'source': sa, 'target': sb, 'tag': score})
return training_data
return load_data(inputfile, scoresfile)
| 7,529 | 39.702703 | 178 | py |
marmot | marmot-master/marmot/parsers/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/parsers/generators_temp.py |
# WORKING - move representation generators out of parsers file
# TODO: these are for generating the representation
from marmot.util.force_align import Aligner
from marmot.util.alignments import train_alignments
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# copy of the function from parsers.py, but it writes the tagging to the file
# TODO: this is specifically for tree tagger, it's not a general pos tagging function
# TODO: this function should be a preprocessing option -- it's actually a representation generator
# and returns the filename
def get_pos_tagging(src_file, tagger, par_file, label):
print("Start tagging", src_file)
# tokenize and add the sentence end marker
# tokenization is done with nltk
tmp_tokenized_name = get_random_name(prefix='_tok')
tmp_tok = open(tmp_tokenized_name, 'wr+')
for line in open(src_file):
words = word_tokenize(line[:-1].decode('utf-8'))
tmp_tok.write('%s\nSentenceEndMarker\n' % '\n'.join([w.encode('utf-8') for w in words]))
tmp_tok.seek(0)
# pass to tree-tagger
tmp_tagged_name = get_random_name(prefix='_tag')
tmp_tagged = open(tmp_tagged_name, 'wr+')
tagger_call = Popen([tagger, '-token', par_file], stdin=tmp_tok, stdout=tmp_tagged)
tagger_call.wait()
tmp_tagged.seek(0)
# remove sentence markers, restore sentence structure
tmp_final_name = get_random_name(prefix='_final')
tmp_final = open(tmp_final_name, 'w')
output = []
cur_sentence = []
for line in tmp_tagged:
word_tag = line[:-1].decode('utf-8').strip().split('\t')
# each string has to be <word>\t<tag>
if len(word_tag) != 2:
continue
if word_tag[0] == 'SentenceEndMarker':
tmp_final.write('%s\n' % ' '.join([tag.encode('utf-8') for tag in cur_sentence]))
output.append(cur_sentence)
cur_sentence = []
else:
cur_sentence.append(word_tag[1])
tmp_tok.close()
tmp_tagged.close()
tmp_final.close()
# delete all temporary files
call(['rm', tmp_tokenized_name, tmp_tagged_name])
return (label, tmp_final_name)
def cur_dir():
import os
print(os.getcwd())
# generate a unique random string
# used for temporary file creation and deletion
def get_random_name(prefix=''):
return 'tmp'+prefix+str(time.time())
# TODO: this is an additional representation - requires the alignment model files to exist
# force alignment with fastalign
# if no alignment model provided, builds the alignment model first
# <align_model> - path to alignment model such that <align_model>.frd_params, .rev_params, .fwd_err, .rev_err exist
# <src_file>, <tg_file> - files to be aligned
# returns: list of lists of possible alignments for every target word:
# [ [ [0], [1,2], [], [3,4], [3,4], [7], [6], [] ... ]
# [ .... ]
# ....
# [ .... ] ]
def force_alignments(src_file, tg_file, trained_model):
alignments = []
aligner = Aligner(trained_model+'.fwd_params',trained_model+'.fwd_err',trained_model+'.rev_params',trained_model+'.rev_err')
src = open(src_file)
tg = open(tg_file)
for src_line, tg_line in zip(src, tg):
align_str = aligner.align( src_line[:-1].decode('utf-8')+u' ||| '+tg_line[:-1].decode('utf-8') )
cur_alignments = [ [] for i in range(len(tg_line.split())) ]
for pair in align_str.split():
pair = pair.split('-')
cur_alignments[int(pair[1])].append( pair[0] )
alignments.append(cur_alignments)
src.close()
tg.close()
aligner.close()
return alignments
# copy of the function from parsers.py, but it writes the tagging to the file
# and returns the filename
def get_alignments(src_file, tg_file, trained_model=None, src_train='', tg_train='', align_model='align_model', label='alignments'):
if trained_model is None:
trained_model = train_alignments(src_train, tg_train, align_model)
if trained_model == '':
sys.stderr.write('No alignment model trained\n')
return []
aligner = Aligner(trained_model+'.fwd_params', trained_model+'.fwd_err', trained_model+'.rev_params', trained_model+'.rev_err')
src = open(src_file)
tg = open(tg_file)
align_file = src_file+'_'+os.path.basename(tg_file)+'.aligned'
aligned = open(align_file, 'w')
for src_line, tg_line in zip(src, tg):
aligned.write(aligner.align(src_line[:-1].decode('utf-8')+u' ||| '+tg_line[:-1].decode('utf-8'))+u'\n')
aligned.close()
aligner.close()
return (label, align_file)
| 4,807 | 36.858268 | 132 | py |
marmot | marmot-master/marmot/parsers/tests/test_whitespace_tokenized_parser.py | import unittest
import os
import codecs
from marmot.parsers.whitespace_tokenized_parser import WhitespaceTokenizedParser
class TestWhitespaceTokenizedParser(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.module_path = module_path
self.test_data = os.path.join(module_path, 'test_data/corpus.en.1000')
def test_parse(self):
keyname = 'source'
data = WhitespaceTokenizedParser().parse(self.test_data, keyname)
with codecs.open(self.test_data) as lines:
line_count = sum(1 for l in lines)
self.assertTrue(keyname in data)
self.assertEqual(len(data[keyname]), line_count)
if __name__ == '__main__':
unittest.main()
| 742 | 26.518519 | 80 | py |
marmot | marmot-master/marmot/parsers/tests/test_parsers.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest, os, tempfile, sys
import glob
from marmot.parsers.parsers import *
from marmot.util.simple_corpus import SimpleCorpus
# TODO: none of these tests adhere to the new parser API, they need to be moved, updated, or deleted
class TestCorpusParser(unittest.TestCase):
def setUp(self):
self.interesting_tokens = set(['the','it'])
module_path = os.path.dirname(__file__)
self.corpus_path = os.path.join(module_path, 'test_data/corpus.en.1000')
self.corpus = SimpleCorpus(self.corpus_path)
def test_parse_corpus_contexts(self):
contexts = parse_corpus_contexts(self.corpus_path, self.interesting_tokens)
for context in contexts:
self.assertTrue(len(set(context['target']).intersection(self.interesting_tokens)) > 0)
all_contexts = parse_corpus_contexts(self.corpus_path)
num_toks = sum([len(sen) for sen in self.corpus.get_texts()])
self.assertTrue(num_toks == len(all_contexts))
class TestImportantTokens(unittest.TestCase):
def setUp(self):
self.interesting_tokens = set(['the','it'])
module_path = os.path.dirname(__file__)
self.corpus_path = os.path.join(module_path, 'test_data/wmt.en.1000')
def test_extract_important_tokens(self):
contexts = [['this', 'is', 'a', 'test'], ['This', 'is', 'another', 'test', '.']]
temp = tempfile.NamedTemporaryFile(delete=False)
for l in contexts:
temp.write((' ').join(l) + '\n')
temp.close()
important_tokens = extract_important_tokens(temp.name, min_count=2)
self.assertTrue('is' in important_tokens)
self.assertFalse('This' in important_tokens)
class TestParseWMT(unittest.TestCase):
def setUp(self):
self.interesting_tokens = set(['the','it'])
module_path = os.path.dirname(__file__)
self.corpus_path = os.path.join(module_path, 'test_data/DE_EN.tgt_ann.test')
self.source_path = os.path.join(module_path, 'test_data/DE_EN.source.test')
def test_parse_wmt14_data_no_source(self):
contexts = parse_wmt14_data(self.corpus_path, '')
for context in contexts:
self.assertTrue(context['token'] == context['target'][context['index']])
def test_parse_wmt14_data(self):
contexts = parse_wmt14_data(self.corpus_path, self.source_path)
for context in contexts:
self.assertTrue(context['token'] == context['target'][context['index']])
self.assertTrue(context.has_key('source'))
class TestSemevalParser(unittest.TestCase):
def setUp(self):
module_path = os.path.dirname(__file__)
self.inputfile = os.path.join(module_path, 'test_data/semeval/STS.input.MSRvid.txt')
self.scoresfile = os.path.join(module_path, 'test_data/semeval/STS.gs.MSRvid.txt')
def test_parse_semeval(self):
contexts = parse_semeval(self.inputfile, self.scoresfile)[:10]
for context in contexts:
self.assertTrue('source' in context and 'target' in context)
self.assertTrue(type(context['source']) == list and type(context['target']) == list)
self.assertTrue(len(context['source']) > 0 and len(context['target']) > 0)
if __name__ == '__main__':
unittest.main()
| 3,323 | 39.048193 | 100 | py |
marmot | marmot-master/marmot/preprocessing/double_test_data.py | import sys, os
from subprocess import check_call
import argparse
from collections import defaultdict
from parse_xml import parse_line
from get_double_corpus import get_double_corpus
#naming
#input WMT - <wmt>
#source ||| target - <wmt>.src_trg
#alignments - <wmt>.gdfa
#token-aligned file - <wmt>.double
#one word per line (test format) - <wmt>.words
#return 3 labels for every word: fine-grained, coarse-grained (fluency/adequacy/good) and binary
#write to file in WMT gold standard format:
# sentence_num<TAB>word_num<TAB>word<TAB>fine_label<TAB>coarse_label<TAB>binary_label
def get_all_labels( sentence_num, trg, corrections ):
label_fine = [u'OK' for w in trg]
label_coarse = [u'OK' for w in trg]
label_bin = [u'OK' for w in trg]
#mapping between coarse and fine-grained labels
#unknown error is aliased as 'Fluency'
coarse_map = defaultdict(lambda: u'Fluency')
for w in ['Terminology','Mistranslation','Omission','Addition','Untranslated','Accuracy']:
coarse_map[w] = u'Accuracy'
for w in ['Style/register','Capitalization','Spelling','Punctuation','Typography','Morphology_(word_form)','Part_of_speech','Agreement','Word_order','Function_words','Tense/aspect/mood','Grammar','Unintelligible','Fluency']:
coarse_map[w] = u'Fluency'
for c in corrections:
for i in range(c.start,c.end):
label_fine[i] = c.type
label_coarse[i] = coarse_map[c.type]
label_bin[i] = u'BAD'
out = []
for i in range(len(trg)):
out.append(u'\t'.join([unicode(sentence_num)+u'.1', unicode(i), trg[i], label_fine[i], label_coarse[i], label_bin[i]]))
return out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('wmt', help="WMT data in XMl format")
parser.add_argument('align', help="alignment model location")
args = parser.parse_args()
file_xml_name = args.wmt
align_model = args.align
file_src_trg_name = file_xml_name+'.src_trg'
file_src_trg_lc_name = file_xml_name+'.src_trg.lc'
file_alignments_name = file_xml_name+'.gdfa'
file_double_name = file_xml_name+'.double'
file_words_name = file_xml_name+'.words'
cdec_home = os.environ['CDEC_HOME']
if cdec_home == "":
sys.stderr.write("No CDEC_HOME variable found. Please install cdec and/or set the CDEC_HOME variable to cdec root directory\n")
sys.exit(2)
file_xml = open( file_xml_name )
file_src_trg = open( file_src_trg_name, 'w' )
file_words = open( file_words_name, 'w' )
cur_sentence = 0
sentence_map = {}
sys.stderr.write("Parsing xml\n")
for line in file_xml:
cur_sentence += 1
if cur_sentence%10 == 0:
sys.stderr.write('.')
( sentence_id, src, trg, corrections ) = parse_line( line )
# if xml is not parsed
if not sentence_id:
sys.stderr.write("Sentence %d not parsed\n" % cur_sentence)
sentence_map[cur_sentence] = u'NOT_PARSED'
continue
sentence_map[cur_sentence] = sentence_id
out = get_all_labels( cur_sentence, trg, corrections )
file_words.write("%s\n" % ('\n'.join([a.encode('utf-8') for a in out])))
file_src_trg.write( "%s ||| %s\n" % (src.encode('utf-8'), ' '.join( [ii.encode('utf-8') for ii in trg] )) )
file_xml.close()
file_src_trg.close()
file_words.close()
#lowercase
file_src_trg = open( file_src_trg_name )
file_src_trg_lc = open(file_src_trg_lc_name, 'w')
sys.stderr.write("\nForce aligning\n")
check_call([ cdec_home+'/corpus/lowercase.pl' ], stdin=file_src_trg, stdout=file_src_trg_lc)
file_src_trg_lc.close()
file_src_trg.close()
#force align
file_src_trg_lc = open( file_src_trg_lc_name )
file_alignments = open( file_alignments_name, 'w' )
check_call([ cdec_home+'/word-aligner/force_align.py', align_model+'.fwd_params', align_model+'.fwd_err', \
align_model+'.rev_params', align_model+'.rev_err' ], stdin=file_src_trg_lc, stdout=file_alignments)
file_src_trg_lc.close()
file_alignments.close()
get_double_corpus( file_alignments_name, one_file=file_src_trg_lc_name, aligned_file=file_double_name)
| 4,043 | 34.165217 | 226 | py |
marmot | marmot-master/marmot/preprocessing/preprocess_ter.py | from __future__ import print_function
import sys
import re
import numpy as np
def parse_hyp_loc_map(line):
numbers = [int(x) for x in line.split()]
orig2shifted = {i: j for (j, i) in list(enumerate(numbers))}
shifted2orig = dict(enumerate(numbers))
return (orig2shifted, shifted2orig)
def parse_sentence(line_array):
hyp, ref = [], []
orig2shifted, shifted2orig = {}, {}
align, sentence_id = "", ""
shifts = []
for line in line_array:
line_separator = line.find(':')
line_id = line[:line_separator]
if line_id == "Hypothesis":
hyp = [w for w in line[line_separator+2:-1].split()]
elif line_id == "Reference":
ref = [w for w in line[line_separator+2:-1].split()]
elif line_id == "Sentence ID":
sentence_id = line[line_separator+2:-1]
elif line_id == "Alignment":
align = line[line_separator+3:-2]
elif line_id == "HypLocMap":
(orig2shifted, shifted2orig) = parse_hyp_loc_map(line[line_separator+2:-1])
# shift description
# shift syntax:
# [i, j, m/n] (word(s)) -> (word(s))
# i -- original position of the first word
# j -- original position of the last word
# m -- position of the last word in reference
# n -- position of the last word in shifted hypothesis
elif line.startswith(' ['):
shifts.append(line)
else:
continue
# mapping between original and shifted hypotheses
mapping_hyp_shift = {i: i for i in range(len(hyp))}
mapping_shift_hyp = {i: i for i in range(len(hyp))}
for shift in shifts:
numbers = [int(n) for n in re.compile('\d+').findall(shift)]
if len(numbers) < 4:
print("Bad shift description in the source file", shift)
continue
len_shift = numbers[1] + 1 - numbers[0]
for i in range(len_shift):
shifted_pos = numbers[3] - len_shift + 1 + i
mapping_hyp_shift[numbers[0]+i] = shifted_pos
mapping_shift_hyp[shifted_pos] = numbers[0] + i
# mapping between reference and hypothesis with shifts
mapping_ref_shift = {}
mapping_shift_ref = {}
ref_cnt, shift_cnt = 0, 0
for c in align:
if c == 'D':
mapping_ref_shift[ref_cnt] = None
ref_cnt += 1
elif c == 'I':
mapping_shift_ref[shift_cnt] = None
shift_cnt += 1
else:
mapping_ref_shift[ref_cnt] = shift_cnt
mapping_shift_ref[shift_cnt] = ref_cnt
ref_cnt += 1
shift_cnt += 1
# mappings between hypothesis and reference
mapping_hyp_ref, mapping_ref_hyp = {}, {}
for i, j in mapping_hyp_shift.items():
if j in mapping_shift_ref:
mapping_hyp_ref[i] = mapping_shift_ref[j]
else:
mapping_hyp_ref[i] = None
for i, j in mapping_ref_shift.items():
if j in mapping_shift_hyp:
mapping_ref_hyp[i] = mapping_shift_hyp[j]
else:
mapping_ref_hyp[i] = None
hyp = np.array(hyp, dtype=object)
ref = np.array(ref, dtype=object)
return (sentence_id, hyp, ref, mapping_hyp_ref, mapping_ref_hyp, align)
def get_features(sentence_id, sentence, labels, good_context):
good_label = u'GOOD'
bad_label = u'BAD'
# print "Sentence: ", sentence
# print "Labels: ", labels
assert(len(sentence) == len(labels))
instances = []
for i in range(len(labels)):
prev_word, next_word = "", ""
good_left, good_right = False, False
if i == 0:
prev_word = u"START"
good_left = True
else:
prev_word = sentence[i-1]
if i+1 == len(labels):
next_word = u"END"
good_right = True
else:
next_word = sentence[i+1]
if not good_left:
good_left = (not good_context or labels[i-1] == 'G')
if not good_right:
good_right = (not good_context or labels[i+1] == 'G')
if good_left and good_right:
cur_label = good_label if labels[i] == 'G' else bad_label
instances.append(np.array([sentence_id, i, sentence[i], prev_word, next_word, sentence, cur_label, cur_label]))
return np.array(instances)
# output format: array of training instances
# each instance is an array of:
# sentence id, word id, word_i, word_i-1, word_i+1, sentence, label, label
# label appears twice for compatibility with fine-grained error classification
def parse_ter_file(pra_file_name, good_context=True):
a_file = open(pra_file_name)
sys.stderr.write("Parse file \'%s\'\n" % pra_file_name)
features = []
cur_sentence = []
for line in a_file:
cur_sentence.append(line.decode("utf-8"))
if line.startswith('Score: '):
# parse once you hit 'Score: '
(sent_id, hyp, ref, orig2shifted, align) = parse_sentence(cur_sentence)
if len(hyp) != len(align):
sys.stderr.write("Hypothesis and alignment map don't match, sentence number %s\n" % sent_id)
cur_sentence = []
continue
err_labels = ""
for i in range(len(hyp)):
if align[orig2shifted[i]] == ' ':
err_labels += 'G'
elif align[orig2shifted[i]] == 'S' or align[orig2shifted[i]] == 'I':
err_labels += 'B'
features.extend(get_features(sent_id, hyp, err_labels, good_context))
features.append([])
cur_sentence = []
return np.array(features, dtype=object)
def parse_ter_file_basic(pra_file_name):
sentences = []
cur_sentence = []
for line in open(pra_file_name):
cur_sentence.append(line.decode("utf-8"))
# parse once you hit 'Score: '
if line.startswith('Score: '):
(sent_id, hyp, ref, mapping_hyp_ref, mapping_ref_hyp, align) = parse_sentence(cur_sentence)
align_no_insertions = align.translate({ord('I'): None})
align_no_deletions = align.translate({ord('D'): None})
if len(hyp) != len(align_no_deletions) or len(ref) != len(align_no_insertions):
sys.stderr.write("Hypothesis and alignment map don't match, sentence number %s\n" % sent_id)
cur_sentence = []
continue
labels_ref = []
labels_hyp = []
labels_map = {i: i for i in ['I','D','S','H']}
labels_map[' '] = 'OK'
# labels_map = {i: u'BAD' for i in ['I', 'D', 'S', 'H']}
# labels_map[' '] = u'OK'
for i in range(len(ref)):
cur_char = align_no_insertions[i]
if cur_char != 'I':
labels_ref.append(labels_map[cur_char])
# elif cur_char == 'S' or cur_char == 'D':
# labels_ref.append(u'BAD')
for i in range(len(hyp)):
cur_char = ''
# Chris: changed the following line:
#if i not in mapping_hyp_ref:
if i not in mapping_hyp_ref or mapping_hyp_ref[i] is None:
cur_char = 'I'
else:
cur_char = align_no_insertions[mapping_hyp_ref[i]]
if cur_char != 'D':
labels_hyp.append(labels_map[cur_char])
# elif cur_char == 'S' or cur_char == 'I':
# labels_hyp.append(u'BAD')
cur_sentence = []
sentences.append({'hyp': hyp, 'ref': ref, 'labels_hyp': labels_hyp, 'labels_ref': labels_ref})
return sentences
| 7,709 | 36.609756 | 123 | py |
marmot | marmot-master/marmot/preprocessing/get_double_corpus.py | import sys
def get_double_string( words_src, words_trg, align_str, cnt=0 ):
'''
Generation of a line of double tokens.
<src_list> -- list of source tokens
<trg_list> -- list of target tokens
<align_str> -- string with alignments in format "i-j" (source-target)
Returns: list of double tokens (target_source in target word order)
'''
# align_pairs = align_str[:-1].decode('utf-8').split()
alignment = {int(i):int(j) for (j,i) in [tuple(pair.split('-')) for pair in align_str.split()]}
default = u'UNALIGNED'
new_string = []
try:
if max(alignment.keys()) >= len(words_trg):
raise IndexError('Too few words in target at line', cnt, max(alignment.keys()), len(words_trg))
if max(alignment.values()) >= len(words_src):
raise IndexError('Too few words in source at line', cnt, max(alignment.keys()), len(words_src))
for i in range(len(words_trg)):
new_token = u''
if alignment.has_key(i):
new_token = words_trg[i]+u'_'+words_src[alignment[i]]
else:
new_token = words_trg[i]+u'_'+default
new_string.append(new_token)
#one of word indices is larger than sentence length
except IndexError as e:
sys.stderr.write("%s %d: %d words, %d required\n" % (e.args[0], e.args[1], e.args[3], e.args[2]+1 ))
finally:
return new_string
def get_double_corpus( align, two_files=("",""), one_file="", aligned_file=""):
"""
Get corpus that consists of target_source tokens in target word order
<align> -- alignments in i-j format
<two_files>: pair of files (source, target)
<one_file>: single bilingual file where each string is source_sentence ||| target_sentence
the new corpus is saved to file <aligned_file>
"""
if two_files[0] and two_files[1]:
one = False
elif one_file:
one = True
else:
sys.stderr.write("No text file provided\n")
return
if not aligned_file:
aligned_file = align+'.double'
f_align = open(align)
if one:
f_double_src = open(one_file)
else:
f_src = open(two_files[0])
f_trg = open(two_files[1])
f_out = open( aligned_file, 'w' )
cnt = 0
for l_align in f_align:
if one:
line = f_double_src.readline()
if not line: break
if line.find('|||') == -1:
sys.stderr.write("Wrong text file format\n")
break
src = line[:line.find('|||')]
trg = line[line.find('|||')+4:]
else:
src = f_src.readline()
trg = f_trg.readline()
if not src or not trg:
sys.stderr.write("Lengths of text files don't match\n")
break
words_src = src[:-1].decode('utf-8').strip().split()
words_trg = trg[:-1].decode('utf-8').strip().split()
new_string = get_double_string( words_src, words_trg, l_align, cnt )
cnt += 1
f_out.write("%s\n" % (' '.join( [w.encode('utf-8') for w in new_string] )))
f_out.close()
f_align.close()
if one:
f_double_src.close()
else:
f_src.close()
f_trg.close()
| 2,963 | 28.346535 | 104 | py |
marmot | marmot-master/marmot/preprocessing/parse_xml.py | import sys
from xml.dom.minidom import parseString
import numpy as np
from subprocess import Popen, PIPE
import os
class Correction:
def __init__(self, _start, _end, _type, _id):
self.start = _start
self.end = _end
self.type = _type.replace(' ','_')
self.id = _id
def parse_line( line ):
'''parse a sentence with xml markup
line - string from the file, contains the tab-separated sentence id, source sentence and target with error markup
'''
global cdec_home
line = line[:-1].decode('utf-8')
chunks = line.split('\t')
if np.size(chunks) != 3:
sys.stderr.write("Wrong format\n")
return("","",[],[])
sentence_id = chunks[0]#.decode("utf-8")
src = chunks[1]#.decode("utf-8")
trg = []
corrections = []
annotation = '<?xml version="1.0" encoding="utf-8"?><mqm:translation xmlns:mqm=\"MQM\">'+chunks[2].encode('utf-8')+'</mqm:translation>'
try:
sentence = parseString( annotation )
# sentence = parseString( annotation )
# TODO: what is the error here and why does it happen?
except UnicodeEncodeError as e:
sys.stderr.write("Sentence \'%s\' not parsed\n" % sentence_id)
print(e)
print(annotation)
return ("", "", [], [])
except:
print(sys.exc_info()[0])
print(annotation)
return("", "", [], [])
if not "CDEC_HOME" in os.environ:
cdec_home='/home/varvara/software/cdec'
sys.stderr.write("$CDEC_HOME variable not specified, using %s\n" % cdec_home)
else:
cdec_home = os.environ['CDEC_HOME']
#tokenize source sentence
FNULL = open(os.devnull, 'w')
p = Popen([cdec_home+"/corpus/tokenize-anything.sh"], stdout=PIPE, stdin=PIPE, stderr=FNULL)
tok = p.communicate(input=src.encode('utf-8'))[0].strip()
src = tok.decode('utf-8')
FNULL.close()
curr_word = 0
opened_issues = {}
#parse sentence xml
for elem in sentence.documentElement.childNodes:
#element
if elem.nodeType == 1:
try:
el_id = int(elem.attributes["id"].value)
if elem.nodeName == "mqm:startIssue":
opened_issues[el_id] = ( curr_word, elem.attributes["type"].value )
elif elem.nodeName == "mqm:endIssue":
if not opened_issues.has_key( el_id ):
sys.stderr.write( "Inconsistent error %d\n" % el_id )
return ("", "", [], [])
a_corr = Correction( opened_issues[el_id][0], curr_word, opened_issues[el_id][1], el_id )
corrections.append( a_corr )
del opened_issues[el_id]
#some element attributes can be missing
except KeyError as e:
sys.stderr.write("Missing attribute in sentence %s: %s\n" % (sentence_id, e.args[0]))
return("", "", [], [])
except:
sys.stderr.write(sys.exc_info())
return("", "", [], [])
#text
elif elem.nodeType == 3:
FNULL = open(os.devnull, 'w')
p = Popen([cdec_home+"/corpus/tokenize-anything.sh"], stdout=PIPE, stdin=PIPE, stderr=FNULL)
tok = p.communicate(input=elem.nodeValue.encode("utf-8"))[0].strip()
FNULL.close()
words = [w.decode('utf-8') for w in tok.split()]
trg.extend( words )
curr_word += len( words )
if len( opened_issues ):
sys.stderr.write( "Inconsistent error(s): %s\n" % ( ', '.join( [str(x) for x in opened_issues.keys()] ) ) )
return ("", "", [], [])
return ( sentence_id, src, np.array(trg, dtype=object), np.array(corrections,dtype=object) )
| 3,404 | 31.740385 | 137 | py |
marmot | marmot-master/marmot/preprocessing/preprocess_wmt.py | # -*- coding: utf-8 -*-
import sys
from xml.dom.minidom import parseString
from string import punctuation
import numpy as np
from subprocess import Popen, PIPE, STDOUT
import os, codecs
from collections import defaultdict
cdec_home = ""
class Correction:
def __init__(self, _start, _end, _type, _id):
self.start = _start
self.end = _end
self.type = _type
self.id = _id
#parse sentence
#line - string from the file, contains sentence id, source sentence and target with error markup
def parse_line( line ):
global cdec_home
if line[:-1] == '\n':
line = line[:-1]
line = line.decode('utf-8')
chunks = line.split('\t')
if np.size(chunks) != 3:
sys.stderr.write("Wrong format\n")
return("","",[],[])
sentence_id = chunks[0]#.decode("utf-8")
src = chunks[1]#.decode("utf-8")
trg = []
corrections = []
annotation = '<?xml version="1.0" encoding="utf-8"?><mqm:translation xmlns:mqm=\"MQM\">'+chunks[2].encode('utf-8')+'</mqm:translation>'
try:
sentence = parseString( annotation )
# sentence = parseString( annotation )
# TODO: what is the error here and why does it happen?
except UnicodeEncodeError as e:
sys.stderr.write("Sentence \'%s\' not parsed\n" % sentence_id)
print(e)
print(annotation)
return ("", "", [], [])
except:
print(sys.exc_info()[0])
print(annotation)
return("", "", [], [])
if not "CDEC_HOME" in os.environ:
cdec_home='/home/varvara/software/cdec'
sys.stderr.write("$CDEC_HOME variable not specified, using %s\n" % cdec_home)
else:
cdec_home = os.environ['CDEC_HOME']
#tokenize source sentence
FNULL = open(os.devnull, 'w')
p = Popen([cdec_home+"/corpus/tokenize-anything.sh"], stdout=PIPE, stdin=PIPE, stderr=FNULL)
tok = p.communicate(input=src.encode('utf-8'))[0].strip()
src = tok.decode('utf-8')
FNULL.close()
curr_word = 0
opened_issues = {}
#parse sentence xml
for elem in sentence.documentElement.childNodes:
#element
if elem.nodeType == 1:
try:
el_id = int(elem.attributes["id"].value)
if elem.nodeName == "mqm:startIssue":
opened_issues[el_id] = ( curr_word, elem.attributes["type"].value )
elif elem.nodeName == "mqm:endIssue":
if not opened_issues.has_key( el_id ):
sys.stderr.write( "Inconsistent error %d\n" % el_id )
return ("", "", [], [])
a_corr = Correction( opened_issues[el_id][0], curr_word, opened_issues[el_id][1], el_id )
corrections.append( a_corr )
del opened_issues[el_id]
#some element attributes can be missing
except KeyError as e:
sys.stderr.write("Missing attribute in sentence %s: %s\n" % (sentence_id, e.args[0]))
return("", "", [], [])
except:
sys.stderr.write(sys.exc_info())
return("", "", [], [])
#text
elif elem.nodeType == 3:
FNULL = open(os.devnull, 'w')
p = Popen([cdec_home+"/corpus/tokenize-anything.sh"], stdout=PIPE, stdin=PIPE, stderr=FNULL)
tok = p.communicate(input=elem.nodeValue.encode("utf-8"))[0].strip()
FNULL.close()
words = [w.decode('utf-8') for w in tok.split()]
trg.extend( words )
curr_word += len( words )
if len( opened_issues ):
sys.stderr.write( "Inconsistent error(s): %s\n" % ( ', '.join( [str(x) for x in opened_issues.keys()] ) ) )
return ("", "", [], [])
return ( sentence_id, src, np.array(trg, dtype=object), np.array(corrections,dtype=object) )
#parse file 'file_name' and write result to 'out_file' (doesn't write if 'out_file'=='')
#if good_context==True extract only words whose contexts (w[i-1], w[i+1]) are labelled 'GOOD'
#
#return an array of errors
#every error is array = [sentence_id, word_index, w[i], w[i-1], w[i+1], sentence, label]
# word_index - integer
# sentence - array of unicode strings
# sentence_id, w[i], w[i-1], w[i+1], label - unicode strings
#
def parse_src( file_name, good_context=True, out_file="" ):
global cdec_home
cdec_home = os.environ['CDEC_HOME']
if not cdec_home:
sys.stderr.write('Cdec decoder not installed or CDEC_HOME variable not set\n')
sys.stderr.write("Please set CDEC_HOME variable so that $CDEC_HOME/corpus directory contains \'tokenize-anything.sh\'\n")
return ("","",[],[])
f_src = open(file_name)
sys.stderr.write("Parsing file \'%s\n" % (file_name))
instances = []
for line in f_src:
( sentence_id, src, trg, corrections ) = parse_line( line )
if not sentence_id: continue
instances.extend( get_instances( sentence_id, src, trg, corrections, good_context ) )
f_src.close()
if out_file:
f_out = open( out_file, 'w' )
for ii in instances:
f_out.write( "%s\t%d\t%s\t%s\t%s\t%s\t%s\n" % ( ii[0].encode("utf-8"), ii[1], ii[2].encode("utf-8"), ii[3].encode("utf-8"), ii[4].encode("utf-8"), ' '.join( [s.encode("utf-8") for s in ii[5]] ), ii[6] ))
f_out.close()
return np.array( instances )
#output format: sentence_id, word_index, word_i, word_i-1, word_i+1, sentence, binary_label, error_type
def get_instances(sentence_id, src, trg, corrections, good_context):
good_label = u'GOOD'
bad_label = u'BAD'
instances = []
word_errors = [ [] for i in range(len(trg)) ]
for err in corrections:
for i in range( err.start, err.end ):
word_errors[i].append( ( err.id, err.type ) )
for i in range(len(trg)):
first, last = False, False
if i == 0: first = True
if i + 1 == len(trg): last = True
#check if contexts contain errors
#if check not needed, set good_context=False and good_left and good_right will always be True
good_left = ( first or not len( word_errors[i-1] ) or not good_context )
good_right = ( last or not good_context )
if not good_right: good_right = ( not len(word_errors[i+1]) )
if not good_left or not good_right:
continue
if last: next_word = u'END'
else: next_word = trg[i+1]
if first: prev_word = u'START'
else: prev_word = trg[i-1]
if not len( word_errors[i] ):
instances.append( [sentence_id, i, trg[i], prev_word, next_word, trg, good_label, u'OK'] )
elif len( word_errors[i] ) == 1:
instances.append( [sentence_id, i, trg[i], prev_word, next_word, trg, bad_label, word_errors[i][0][1]] )
return np.array( instances, dtype=object )
# Convert WMT data in xml into 2 formats:
# -- plain text (only automatic translation, no error markup)
# -- word<TAB>label
# New files are saved to the directory of the source file ('file_name') with extensions 'txt' and 'words'
def convert( file_name ):
f_xml = open( file_name )
prefix = file_name[:file_name.rfind('.')]
f_plain = open( prefix+'.txt', 'w' )
f_words = open( prefix+'.words', 'w' )
for line in f_xml:
( sentence_id, src, trg, corrections ) = parse_line( line )
# if xml is not parsed
if not sentence_id:
continue
trg_label = [ u'GOOD' for w in trg ]
for c in corrections:
for i in range( c.start, c.end ):
trg_label[i] = u'BAD'
f_plain.write( "%s\n" % (' '.join( trg )) )
for i in range(len(trg)):
f_words.write( "%s\t%s\n" % ( trg[i].encode('utf-8'), trg_label[i].encode('utf-8') ) )
f_xml.close()
f_plain.close()
f_words.close()
#Convert WMT data to file accepted by fast_align: source ||| target
#only automatic translation, no error markup
def convert_to_double_file( file_name ):
f_xml = open( file_name )
# TODO: there is a bad bug here
f_double_name = file_name[:file_name.rfind('.')]+'.double'
f_double = open( f_double_name, 'w' )
for line in f_xml:
( sentence_id, src, trg, corrections ) = parse_line( line )
# if xml is not parsed
if not sentence_id:
f_double.write('\n')
continue
f_double.write( "%s ||| %s\n" % (src.encode('utf-8'), ' '.join( [ii.encode('utf-8') for ii in trg] )) )
f_xml.close()
f_double.close()
# if file_name.rfind('.') == -1
return f_double_name
#return 3 labels for every word: fine-grained, coarse-grained (fluency/adequacy/good) and binary
#write to file in WMT gold standard format:
# sentence_num<TAB>word_num<TAB>word<TAB>fine_label<TAB>coarse_label<TAB>binary_label
def get_all_labels( sentence_id, trg, corrections ):
label_fine = [u'OK' for w in trg]
label_coarse = [u'OK' for w in trg]
label_bin = [u'OK' for w in trg]
#mapping between coarse and fine-grained labels
#unknown error is aliased as 'Fluency'
coarse_map = defaultdict(lambda: u'Fluency')
for w in ['Terminology','Mistranslation','Omission','Addition','Untranslated','Accuracy']:
coarse_map[w] = u'Accuracy'
for w in ['Style/register','Capitalization','Spelling','Punctuation','Typography','Morphology_(word_form)','Part_of_speech','Agreement','Word_order','Function_words','Tense/aspect/mood','Grammar','Unintelligible','Fluency']:
coarse_map[w] = u'Fluency'
for c in corrections:
for i in range(c.start,c.end):
label_fine[i] = c.type
label_coarse[i] = coarse_map[c.type]
label_bin[i] = u'BAD'
out = []
for i in range(len(trg)):
try:
out.append(u'\t'.join([sentence_id, unicode(i), trg[i], label_fine[i], label_coarse[i], label_bin[i]]))
except IndexError as e:
print str(i)+"!!!!", len(trg), len(label_fine), len(label_coarse), len(label_bin)
return out
# convert in parallel
if __name__ == '__main__':
import multiprocessing
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, required=True, help='training data file -- .tsv with training data')
args = parser.parse_args()
file_name = args.input
prefix = file_name[:file_name.rfind('.')]
f_plain = open(prefix+'.txt', 'w')
f_words = open(prefix+'.words', 'w')
f_xml = open(file_name)
# ( sentence_id, src, trg, corrections ) = parse_line( line )
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
result = pool.map_async(parse_line, f_xml)
parsed_lines = result.get()
for l in parsed_lines:
(sentence_id, src, trg, corrections) = l
# if xml is not parsed
if not sentence_id:
continue
trg_label = [u'GOOD' for w in trg]
for c in corrections:
for i in range(c.start, c.end):
trg_label[i] = u'BAD'
f_plain.write("%s\n" % (' '.join(trg)))
for i in range(len(trg)):
f_words.write("%s\t%s\n" % (trg[i].encode('utf-8'), trg_label[i].encode('utf-8')))
f_xml.close()
f_plain.close()
f_words.close()
| 10,588 | 33.947195 | 226 | py |
marmot | marmot-master/marmot/preprocessing/__init__.py | 0 | 0 | 0 | py |
|
marmot | marmot-master/marmot/preprocessing/prepare_dataset.py | import argparse
import sys, codecs, pickle
import numpy as np
import pandas as pd
import preprocess_wmt
import preprocess_ter
# prepare a dataset for the Machine Learning component
# sample call: python prepare_dataset.py -i test_data/training -v /home/chris/programs/word2vec/trunk/vectors.bin -o 'test-'
def array_to_df(array):
df = pd.DataFrame(array, index=range(array.shape[0]), columns=range(array.shape[1]))
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', type=str, required=True, help='input file -- sentences tagged with errors')
parser.add_argument('-v','--vector', type=str, required=True, help='vectors generated by word2vec in binary format')
parser.add_argument('-t', '--test', type=str, help='test data (file in the same format as input)')
parser.add_argument('-o', '--output', type=str, default='', help='output file prefix')
parser.add_argument('-p', '--preprocessor', type=str, default='xml', choices=['ter', 'xml'], help='output file: labels for test data')
args = parser.parse_args()
text_processor = None
if args.preprocessor == 'xml':
text_processor = preprocess_wmt.parse_src
elif args.preprocessor == 'ter':
text_processor = preprocess_ter.parse_ter_file
else:
text_processor = preprocess_wmt.parse_src
# TODO: all of the following code could be parallelized
train_features = text_processor(args.input, good_context=True)
train_tokens = [x[2] for x in train_features]
sentence_ids = [x[0] for x in train_features]
(train_vecs, train_labels) = get_features.get_features(args.vector, feature_array=train_features)
# Create dataframes
train_df = array_to_df(train_vecs)
train_df['sentence_id'] = pd.Series(sentence_ids, index=train_df.index)
train_df['token'] = pd.Series(train_tokens, index=train_df.index)
# add labels column to dataframe
train_df['label'] = pd.Series(train_labels, index=train_df.index)
# save dataframes as csv
train_df.to_csv(args.output + 'train.csv', encoding='utf-8')
# pickle train_features for later
with open('train_features.pickle', 'w') as out:
pickle.dump(train_features, out)
if args.test:
test_features = text_processor( args.test, good_context=True)
(test_vecs, test_labels) = get_features.get_features(args.vector, feature_array=test_features)
test_tokens = [x[2] for x in test_features]
test_df = array_to_df(test_vecs)
test_df['token'] = pd.Series(test_tokens, index=test_df.index)
test_df['label'] = pd.Series(test_labels, index=test_df.index)
test_df.to_csv(args.output + 'test.csv', encoding='utf-8')
sys.stderr.write("Finished preprocessing test/train data, and extracting vectors")
| 2,823 | 44.548387 | 138 | py |
marmot | marmot-master/marmot/preprocessing/words_from_file.py | # get the utf8 words from a text file
from nltk.tokenize import word_tokenize
import codecs
def get_tokens(filename):
with codecs.open(filename, encoding='utf8') as input:
all_lines = ' '.join(input.read().splitlines())
for word in word_tokenize(all_lines):
yield word
| 305 | 22.538462 | 57 | py |
marmot | marmot-master/marmot/preprocessing/get_suffixes.py | import sys
from collections import defaultdict
from gensim import corpora
# find the longest suffix the word contains
def find_suffix( word, suffix_list, prefix=u'__' ):
#start searching from the longest suffixes (length of word - 2)
for i in range( min(max(suffix_list.keys()),len(word)-2), min(suffix_list.keys())-1, -1 ):
for s in suffix_list[i]:
if word.endswith(s):
return prefix+s
#plural nouns
elif word.endswith(s+u's'):
return prefix+s+u's'
elif word.endswith(s+u'es'):
return prefix+s+u'es'
return word
# suffix list - dictionary: {<suffix_length>:[list of suffixes of this length]}
def form_suffix_list( suffix_file ):
suffix_list = defaultdict(lambda: set())
for line in open( suffix_file ):
suffix = line[:-1].decode('utf-8')
suffix_list[len(suffix)].add( suffix )
return suffix_list
def get_suffixes( txt_file, suffix_file, stdout_file="", threshold=sys.maxint, prefix=u'__' ):
"""
Replace all words in <txt_file> with suffixes where possible.
Set of suffixes must be provided in <suffix_file>
The new corpus is written to <stdout_file> or to standard output if no file provided
<prefix> -- string to replace the non-suffix part of the word (default '__': information -> __tion)
Words are replaced with suffixes only if occurred in corpus less times than <threshold>
Default: no threshold (all words replaced)
"""
out = open( stdout_file, 'w' ) if stdout_file else sys.stdout
sys.stderr.write('Loading corpus\n')
my_corp = corpora.TextCorpus(txt_file)
sys.stderr.write('Building suffix list\n')
suffix_list = form_suffix_list(suffix_file)
sys.stderr.write('Suffix search\n')
#replace only words that occur in corpus less times than threshold
#default - no threshold (all words are replaced with suffix)
dict_copy = dict( [ (token,find_suffix(token, suffix_list, prefix=prefix)) if my_corp.dictionary.dfs[id] < threshold else (token,token) for (id, token) in my_corp.dictionary.items() ] )
print dict_copy
sys.stderr.write('Output\n')
cnt = 0
in_file = open(txt_file)
for line in in_file:
cnt += 1
if cnt%10000 == 0:
sys.stderr.write('.')
words = line[:-1].decode('utf-8').split()
for w in words:
try:
out.write("%s " % dict_copy[w].encode('utf-8'))
except KeyError:
dict_copy[w] = w
out.write("%s " % dict_copy[w].encode('utf-8'))
out.write("\n")
in_file.close()
if stdout_file: out.close()
| 2,509 | 32.918919 | 187 | py |
marmot | marmot-master/marmot/preprocessing/tests/test_preprocess_wmt.py | # -*- coding: utf-8 -*-
import unittest
import sys
import StringIO
import numpy as np
from marmot.preprocessing import preprocess_wmt
class TestPreprocessWMT(unittest.TestCase):
def test_wrong_format(self):
a_stream = StringIO.StringIO()
sys.stderr = a_stream
self.assertTrue(preprocess_wmt.parse_line( "This is not a valid format" ) == ("","",[],[]))
self.assertTrue(a_stream.getvalue() == "Wrong format\n")
a_stream.close()
def test_inconsistent_issues(self):
a_stream = StringIO.StringIO()
sys.stderr = a_stream
self.assertTrue(preprocess_wmt.parse_line( """z/2012/12/01/198819-18_de_MT2\tDas Wort orientiert sich an "Bang Dakuan".\tThe word <mqm:startIssue type="Terminology" severity="critical" note="" agent="annot16" id="4279"/>orientates<mqm:endIssue id="4279"/> itself <mqm:startIssue type="Function words" severity="critical" note="" agent="annot16" id="4280"/>by "Bang Dakuan".""" ) == ("","",[],[]) )
self.assertTrue(preprocess_wmt.parse_line( """derstandart.at/2012/12/01/141907-37_de_MT2\tDas ist billig und zeiteffizient.\tThis is cheap and time-efficient<mqm:endIssue id="4281"/>.""") == ("","",[],[]) )
# self.assertTrue( a_stream.getvalue() == "Inconsistent error(s): 4280\nInconsistent error 4281\n" )
print 'inconsistent issues: ', a_stream.getvalue()
a_stream.close()
def test_invalid_xml(self):
a_stream = StringIO.StringIO()
sys.stderr = a_stream
self.assertTrue(preprocess_wmt.parse_line("""faz/2012/12/01/198819-55_de_MT2\tMan muss höllisch aufpassen\tOne must pay attention <mqm:startIssue type="Mistranslation" severity="critical" note="" agent="annot16" id="4290"/ >like hell<mqm:endIssue id="4290"/>""" ) == ("","",[],[]) )
print 'invalid xml: ', a_stream.getvalue()
a_stream.close()
def test_cyrillic_str(self):
# preprocess.parse_line( open('test_data.txt').readline()[:-1] )
preprocess_wmt.parse_line( """z/2012/12/01/198819-18_de_MT2\tDas Wort orientiert sich an "Bang Dakuan".\tФарш невозможно <mqm:startIssue type="Terminology" severity="critical" note="" agent="annot16" id="4279"/>провернуть<mqm:endIssue id="4279"/> назад <mqm:startIssue type="Function words" severity="critical" note="" agent="annot16" id="4280"/>и<mqm:endIssue id="4280"/> мясо из котлет не востановишь.""" )
def test_tokenizer(self):
(a, b, a_list, aa_list) = preprocess_wmt.parse_line( """z/2012/12/01/198819-18_de_MT2\tDas Wort orientiert sich an "Bang Dakuan".\tThis, sentence, very-very <mqm:startIssue type="Terminology" severity="critical" note="" agent="annot16" id="4279"/>http://website.com (complicated)<mqm:endIssue id="4279"/> 10,000 and 0.1: to tokenize; don't <mqm:startIssue type="Function words" severity="critical" note="" agent="annot16" id="4280"/>and wouldn't e.g.<mqm:endIssue id="4280"/> he'll "many" John's $200 other things.""")
self.assertTrue(np.array_equal( a_list, [u'This', u',', u'sentence', u',', u'very', u'-', u'very', u'http://website.com', u'(', u'complicated', u')', u'10,000', u'and', u'0.1', u':', u'to', u'tokenize', u';', u'do', u"n't", u'and', u'would', u"n't", u'e.g.', u'he', u"'ll", u'"', u'many', u'"', u'John', u"'s", u'$', u'200', u'other', u'things',u'.']))
self.assertTrue(aa_list[0].start == 7 and aa_list[0].end == 11)
self.assertTrue(aa_list[1].start == 20 and aa_list[1].end == 24)
# for err in aa_list:
# sys.stdout.write("Error %s: from %d to %d: \'%s\'\n" % (err.id, err.start, err.end, ' '.join(a_list[err.start:err.end]) ))
if __name__ == "__main__":
unittest.main()
| 3,657 | 72.16 | 526 | py |
marmot | marmot-master/marmot/preprocessing/tests/test_words_from_file.py | import unittest, os
from marmot.preprocessing.words_from_file import get_tokens
class WordsFromFileTests(unittest.TestCase):
def setUp(self):
self.interesting_tokens = set(['the','it'])
module_path = os.path.dirname(__file__)
self.corpus_file = os.path.join(module_path, 'test_data/corpus.en.1000')
def test_words_from_file(self):
token_generator = get_tokens(self.corpus_file)
token_set = set(token_generator)
self.assertTrue(len(token_set) > 0)
for word in token_set:
self.assertTrue(type(word) == unicode)
if __name__ == '__main__':
unittest.main()
| 636 | 30.85 | 80 | py |
marmot | marmot-master/marmot/preprocessing/tests/test_get_double_corpus.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os, re
from subprocess import call
from marmot.preprocessing.get_double_corpus import get_double_string, get_double_corpus
class GetDoubleCorpusTests(unittest.TestCase):
def setUp(self):
self.test_dir = os.path.dirname(os.path.realpath(__file__))
self.align = os.path.join(self.test_dir, 'test_data/alignments/test.de-en.gdfa')
self.one = os.path.join(self.test_dir, 'test_data/alignments/test.de-en')
self.two = (os.path.join(self.test_dir, 'test_data/alignments/test.de'), os.path.join(self.test_dir, 'test_data/alignments/test.en'))
self.test_str = u'three_drei in_zehn ten_zehn south_südafrikanern africans_jünger are_sind younger_15 than_als 15_das ,_, meaning_, that_dass they_sie did_UNALIGNED not_nicht live_tag a_der day_apartheid under_gelebt apartheid_haben ._.'
# remove the tmp_* files created by the tests
def tearDown(self):
for f in os.listdir(self.test_dir):
if re.search("^tmp_*", f):
os.remove(os.path.join(self.test_dir, f))
def test_get_double_string_wrong(self):
get_double_string( ['hallo','welt'], ['hello',',','world'],'0-0 0-1 1-2 2-2' )
def test_get_double_string_right(self):
all_str='Unsere Einblicke ins All : Die wichtigsten Teleskope ||| Our insights in all : The most important telescopes'
src = all_str[:all_str.find('|||')].strip().split()
trg = all_str[all_str.find('|||')+3:].strip().split()
align = '0-0 1-1 2-2 3-3 4-4 5-5 6-6 6-7 7-8'
new_line = get_double_string(src, trg, align)
self.assertEqual(new_line, ['Our_Unsere', 'insights_Einblicke', 'in_ins', 'all_All', ':_:', 'The_Die', 'most_wichtigsten', 'important_wichtigsten', 'telescopes_Teleskope'])
def test_get_double_corpus_one(self):
alignment_file = os.path.join(self.test_dir, 'test_data/alignments/test.de-en.gdfa.double')
if os.path.isfile(alignment_file):
call(['rm', alignment_file])
get_double_corpus(self.align, one_file=self.one)
a_str = open(alignment_file).readline()[:-1].decode('utf-8')
self.assertEqual(a_str, self.test_str)
def test_get_double_corpus_two(self):
alignment_file = os.path.join(self.test_dir, 'test_data/alignments/test.de-en.gdfa.double')
if os.path.isfile(alignment_file):
call(['rm', alignment_file])
get_double_corpus(self.align, two_files=self.two)
a_str = open(alignment_file).readline()[:-1].decode('utf-8')
self.assertEqual(a_str, self.test_str)
if __name__ == '__main__':
unittest.main()
| 2,669 | 46.678571 | 245 | py |
marmot | marmot-master/marmot/representations/word_qe_and_pseudo_ref_representation_generator.py | import codecs
from nltk import wordpunct_tokenize
from marmot.representations.representation_generator import RepresentationGenerator
class WordQEAndPseudoRefRepresentationGenerator(RepresentationGenerator):
'''
Generate the standard word-level format: 3 files, source, target, tags, one line per file, whitespace tokenized
Also add the un-tokenized pseudo-references to the dataset
'''
def __init__(self, source_file, target_file, tags_file, pseudo_ref_file):
self.data = self.parse_files(source_file, target_file, tags_file, pseudo_ref_file)
@staticmethod
def parse_files(source_file, target_file, tags_file, pseudo_ref_file):
with codecs.open(source_file, encoding='utf8') as source:
source_lines = [line.split() for line in source]
with codecs.open(target_file, encoding='utf8') as target:
target_lines = [line.split() for line in target]
with codecs.open(tags_file, encoding='utf8') as tags:
tags_lines = [line.split() for line in tags]
with codecs.open(pseudo_ref_file, encoding='utf8') as pseudo_ref:
pseudo_ref_lines = [wordpunct_tokenize(line.strip()) for line in pseudo_ref]
assert len(source_lines) == len(target_lines) == len(tags_lines) == len(pseudo_ref_lines)
return {'target': target_lines, 'source': source_lines, 'tags': tags_lines, 'pseudo_ref': pseudo_ref_lines}
def generate(self, data_obj=None):
return self.data
| 1,491 | 39.324324 | 115 | py |
marmot | marmot-master/marmot/representations/alignment_representation_generator.py | from __future__ import print_function
import os
import time
import numpy as np
from collections import defaultdict
from marmot.util.alignments import train_alignments
from marmot.util.force_align import Aligner
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class AlignmentRepresentationGenerator(RepresentationGenerator):
def __init__(self, lex_file, align_model=None, src_file=None, tg_file=None, tmp_dir=None):
tmp_dir = mk_tmp_dir(tmp_dir)
self.tmp = tmp_dir
if align_model is None:
if src_file is not None and tg_file is not None:
align_model = 'align_model'
self.align_model = train_alignments(src_file, tg_file, tmp_dir, align_model=align_model)
else:
print("Alignment model not defined, no files for training")
return
else:
self.align_model = align_model
self.lex_prob = self.get_align_prob(lex_file)
# src, tg - lists of lists
# each inner list is a sentence
def get_alignments(self, src, tg, out, align_model):
out_file = open(out, 'w')
alignments = [[[] for j in range(len(tg[i]))] for i in range(len(tg))]
aligner = Aligner(align_model+'.fwd_params', align_model+'.fwd_err', align_model+'.rev_params', align_model+'.rev_err')
for idx, (src_list, tg_list) in enumerate(zip(src, tg)):
align_string = aligner.align(' '.join(src_list) + ' ||| ' + ' '.join(tg_list))
out_file.write('%s\n' % align_string)
pairs = align_string.split()
for p_str in pairs:
p = p_str.split('-')
alignments[idx][int(p[1])].append(int(p[0]))
aligner.close()
out_file.close()
return alignments
# parse lex.f2e file
# format of self.lex_prob: dictionary of target words
# every value of the target dictionary is a dictionary of source words
# every value of the source dictionary is a probability p(target|source):
# self.lex_prob['el']['he'] = 0.5
def get_align_prob(self, lex_file):
lex_dict = defaultdict(lambda: defaultdict(float))
for line in open(lex_file):
chunks = line[:-1].decode('utf-8').split()
assert(len(chunks) == 3), "Wrong format of the lex file: \n{}".format(line)
val = float(chunks[2])
lex_dict[chunks[0]][chunks[1]] = val
return lex_dict
def generate(self, data_obj):
if 'target' not in data_obj or 'source' not in data_obj:
print("No target or source")
assert(len(data_obj['target']) == len(data_obj['source']))
output = os.path.join(self.tmp, 'data' + str(time.time()) + '.align')
all_alignments = self.get_alignments(data_obj['source'], data_obj['target'], output, self.align_model)
# print("All alignments: ", all_alignments)
unique_alignments = []
for seq_idx, al_sequence in enumerate(all_alignments):
seq_alignments = []
for w_idx, al_list in enumerate(al_sequence):
if len(al_list) > 1:
# choose the alignment with the highest probability
# print("Multiple alignments: ", al_list)
target_word = data_obj['target'][seq_idx][w_idx]
source_words = [data_obj['source'][seq_idx][i] for i in al_list]
probs = [self.lex_prob[target_word][s] for s in source_words]
# print("Probabilities: ", probs)
seq_alignments.append(al_list[np.argmax(probs)])
elif len(al_list) == 0:
seq_alignments.append(None)
elif len(al_list) == 1:
seq_alignments.append(al_list[0])
else:
print("Golakteko opasnoste!")
unique_alignments.append(seq_alignments)
data_obj['alignments'] = unique_alignments
return data_obj
| 4,073 | 43.282609 | 127 | py |
marmot | marmot-master/marmot/representations/wmt_representation_generator.py | import os
from nltk import word_tokenize
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class WMTRepresentationGenerator(RepresentationGenerator):
def _write_to_file(self, filename, lofl):
a_file = open(filename, 'w')
for sentence in lofl:
a_file.write('%s\n' % (' '.join([w.encode('utf-8') for w in sentence])))
a_file.close()
def _parse_wmt_to_text(self, wmt_file, wmt_source_file, tmp_dir, persist=False):
# parse source files
source_sents = {}
for line in open(wmt_source_file):
str_num = line.decode('utf-8').strip().split('\t')
source_sents[str_num[0]] = word_tokenize(str_num[1])
# parse target file and write new source, target, and tag files
target, source, tags = [], [], []
cur_num = None
cur_sent, cur_tags = [], []
for line in open(wmt_file):
chunks = line[:-1].decode('utf-8').split('\t')
if chunks[0] != cur_num:
if len(cur_sent) > 0:
# check that the sentence is in source
if cur_num in source_sents:
source.append(source_sents[cur_num])
target.append(cur_sent)
tags.append(cur_tags)
cur_sent = []
cur_tags = []
cur_num = chunks[0]
cur_sent.append(chunks[2])
cur_tags.append(chunks[5])
# last sentence
if len(cur_sent) > 0 and cur_num in source_sents:
source.append(source_sents[cur_num])
target.append(cur_sent)
tags.append(cur_tags)
if persist:
tmp_dir = mk_tmp_dir(tmp_dir)
target_file = tmp_dir+'/'+os.path.basename(wmt_file)+'.target'
tags_file = tmp_dir+'/'+os.path.basename(wmt_file)+'.tags'
source_file = tmp_dir+'/'+os.path.basename(wmt_source_file)+'.txt'
self._write_to_file(target_file, target)
self._write_to_file(source_file, source)
self._write_to_file(tags_file, tags)
return {'target': target, 'source': source, 'tags': tags}
def __init__(self, tg_file, src_file, tmp_dir=None, persist=False):
self.data = self._parse_wmt_to_text(tg_file, src_file, tmp_dir, persist=persist)
def generate(self):
return self.data
| 2,487 | 37.875 | 88 | py |
marmot | marmot-master/marmot/representations/alignment_double_representation_generator.py | from __future__ import print_function
import numpy as np
from collections import defaultdict
from marmot.util.alignments import train_alignments
from marmot.util.force_align import Aligner
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class AlignmentDoubleRepresentationGenerator(RepresentationGenerator):
'''
Extract two types of alignments:
- all alignments for every word (list of lists for a sentence)
- only alignments with the highest confidence are kept for a word (flat list for a sentence)
The first type is needed for the majority of the features,
but the PhraseAlignmentFeatureExtractor needs all the possible alignments
'''
def __init__(self, lex_file, align_model=None, src_file=None, tg_file=None, tmp_dir=None):
tmp_dir = mk_tmp_dir(tmp_dir)
if align_model is None:
if src_file is not None and tg_file is not None:
self.align_model = train_alignments(src_file, tg_file, tmp_dir, align_model=align_model)
else:
print("Alignment model not defined, no files for training")
return
else:
self.align_model = align_model
self.lex_prob = self.get_align_prob(lex_file)
# src, tg - lists of lists
# each inner list is a sentence
def get_alignments(self, src, tg, align_model):
alignments = [[[] for j in range(len(tg[i]))] for i in range(len(tg))]
aligner = Aligner(align_model+'.fwd_params', align_model+'.fwd_err', align_model+'.rev_params', align_model+'.rev_err')
for idx, (src_list, tg_list) in enumerate(zip(src, tg)):
align_string = aligner.align(' '.join(src_list) + ' ||| ' + ' '.join(tg_list))
pairs = align_string.split()
for p_str in pairs:
p = p_str.split('-')
alignments[idx][int(p[1])].append(int(p[0]))
aligner.close()
return alignments
# parse lex.f2e file
# format of self.lex_prob: dictionary of target words
# every value of the target dictionary is a dictionary of source words
# every value of the source dictionary is a probability p(target|source):
# self.lex_prob['el']['he'] = 0.5
def get_align_prob(self, lex_file):
lex_dict = defaultdict(lambda: defaultdict(float))
for line in open(lex_file):
chunks = line[:-1].decode('utf-8').split()
assert(len(chunks) == 3), "Wrong format of the lex file: \n{}".format(line)
val = float(chunks[2])
lex_dict[chunks[0]][chunks[1]] = val
return lex_dict
def generate(self, data_obj):
if 'alignments' in data_obj:
print("ALIGNMENTS already exist!")
if 'target' not in data_obj or 'source' not in data_obj:
print("No target or source")
assert(len(data_obj['target']) == len(data_obj['source']))
all_alignments = self.get_alignments(data_obj['source'], data_obj['target'], self.align_model)
# print("All alignments: ", all_alignments)
unique_alignments = []
for seq_idx, al_sequence in enumerate(all_alignments):
seq_alignments = []
for w_idx, al_list in enumerate(al_sequence):
if len(al_list) > 1:
# choose the alignment with the highest probability
# print("Multiple alignments: ", al_list)
target_word = data_obj['target'][seq_idx][w_idx]
source_words = [data_obj['source'][seq_idx][i] for i in al_list]
probs = [self.lex_prob[target_word][s] for s in source_words]
# print("Probabilities: ", probs)
seq_alignments.append(al_list[np.argmax(probs)])
elif len(al_list) == 0:
seq_alignments.append(None)
elif len(al_list) == 1:
seq_alignments.append(al_list[0])
else:
print("Golakteko opasnoste!")
unique_alignments.append(seq_alignments)
if 'alignments' not in data_obj:
data_obj['alignments'] = unique_alignments
data_obj['alignments_all'] = all_alignments
return data_obj
| 4,330 | 44.114583 | 127 | py |
marmot | marmot-master/marmot/representations/word_qe_representation_generator.py | import codecs
from marmot.representations.representation_generator import RepresentationGenerator
class WordQERepresentationGenerator(RepresentationGenerator):
'''
The standard word-level format: 3 files, source, target, tags, one line per file, whitespace tokenized
'''
def __init__(self, source_file, target_file, tags_file, return_files=True):
self.data = self.parse_files(source_file, target_file, tags_file, return_files=return_files)
@staticmethod
def parse_files(source_file, target_file, tags_file, return_files=True):
with codecs.open(source_file, encoding='utf8') as source:
source_lines = [line.split() for line in source]
with codecs.open(target_file, encoding='utf8') as target:
target_lines = [line.split() for line in target]
with codecs.open(tags_file, encoding='utf8') as tags:
tags_lines = [line.split() for line in tags]
return {'target': target_lines, 'source': source_lines, 'tags': tags_lines}
def generate(self, data_obj=None):
return self.data
| 1,090 | 35.366667 | 106 | py |
marmot | marmot-master/marmot/representations/word_qe_additional_representation_generator.py | from __future__ import print_function
import codecs
import sys
from marmot.representations.representation_generator import RepresentationGenerator
class WordQEAdditionalRepresentationGenerator(RepresentationGenerator):
'''
The standard word-level format + additional file(s): filename saved
'''
def __init__(self, source_file, target_file, tags_file, additional_files=None, additional_names=None):
self.data = self.parse_files(source_file, target_file, tags_file, additional_files=additional_files, additional_names=additional_names)
@staticmethod
def parse_files(source_file, target_file, tags_file, additional_files=None, additional_names=None):
with codecs.open(source_file, encoding='utf8') as source:
source_lines = [line.split() for line in source]
with codecs.open(target_file, encoding='utf8') as target:
target_lines = [line.split() for line in target]
with codecs.open(tags_file, encoding='utf8') as tags:
tags_lines = [line.split() for line in tags]
data_obj = {'target': target_lines, 'source': source_lines, 'tags': tags_lines}
if additional_files is not None and additional_names is not None:
for add_file, add_name in zip(additional_files, additional_names):
data_obj[add_name] = add_file
print("Alignments file: ", data_obj['alignments_file'])
return data_obj
def generate(self, data_obj=None):
return self.data
| 1,503 | 38.578947 | 143 | py |
marmot | marmot-master/marmot/representations/segmentation_representation_generator.py | from __future__ import print_function
from subprocess import call
import time
import re
import os
import codecs
from marmot.util.alignments import train_alignments
from marmot.util.force_align import Aligner
from marmot.representations.representation_generator import RepresentationGenerator
from marmot.experiment.import_utils import mk_tmp_dir
class SegmentationRepresentationGenerator(RepresentationGenerator):
def __init__(self, align_model=None, src_file=None, tg_file=None, lex_prefix=None, tmp_dir=None, moses_dir=None, moses_config=None, workers=1):
self.tmp_dir = mk_tmp_dir(tmp_dir)
self.time_stamp = str(time.time())
self.moses_dir = moses_dir
self.moses_config = moses_config
self.workers = workers
self.lex_prob = lex_prefix
if align_model is None:
if src_file is not None and tg_file is not None:
self.align_model = train_alignments(src_file, tg_file, tmp_dir, align_model=align_model)
else:
print("Alignment model not defined, no files for training")
return
else:
self.align_model = align_model
# write a bash file for the phrase extraction
def write_command_file(self, data_obj, alignments_file):
command_name = os.path.join(self.tmp_dir, 'extract_phrases.'+self.time_stamp+'.sh')
command = open(command_name, 'w')
# cd to the dir of the script (it doesn't work from any other places because of gzip)
# TODO: what's the problem with gzip?
command.write("CUR_DIR=$PWD\ncd %s\n" % self.tmp_dir)
# extract phrases
command.write('%s/scripts/generic/extract-parallel.perl 1 split "sort " %s/bin/extract %s %s %s %s/extract.%s 5 orientation --model wbe-msd --GZOutput\n' % (self.moses_dir, self.moses_dir, data_obj['target_file'], data_obj['source_file'], alignments_file, self.tmp_dir, self.time_stamp))
# score phrase table halves
command.write('%s/bin/score extract.%s.sorted.gz %s.f2e %s/phrase-table.%s.half.f2e.gz --GoodTuring 2>> /dev/stderr\n' % (self.moses_dir, self.time_stamp, self.lex_prob, self.tmp_dir, self.time_stamp))
command.write('%s/bin/score extract.%s.inv.sorted.gz %s.e2f %s/phrase-table.%s.half.e2f.gz --Inverse 2>> /dev/stderr\n' % (self.moses_dir, self.time_stamp, self.lex_prob, self.tmp_dir, self.time_stamp))
# sort phrase table halves
command.write('gunzip -c %s/phrase-table.%s.half.f2e.gz | LC_ALL=C sort | gzip -c > %s/phrase-table.%s.half.f2e.sorted.gz\n' % (self.tmp_dir, self.time_stamp, self.tmp_dir, self.time_stamp))
command.write('gunzip -c %s/phrase-table.%s.half.e2f.gz | LC_ALL=C sort | gzip -c > %s/phrase-table.%s.half.e2f.sorted.gz\n' % (self.tmp_dir, self.time_stamp, self.tmp_dir, self.time_stamp))
# consolidate halves
command.write('%s/bin/consolidate %s/phrase-table.%s.half.f2e.sorted.gz %s/phrase-table.%s.half.e2f.sorted.gz /dev/stdout --GoodTuring %s/phrase-table.%s.half.f2e.gz.coc | gzip -c > %s/phrase-table.%s.gz\n' % (self.moses_dir, self.tmp_dir, self.time_stamp, self.tmp_dir, self.time_stamp, self.tmp_dir, self.time_stamp, self.tmp_dir, self.time_stamp))
command.write('mkdir -p %s/binarized\n' % self.tmp_dir)
# binarize the phrase table
command.write('gzip -cd %s/phrase-table.%s.gz | LC_ALL=C sort -T %s/binarized | %s/bin/processPhraseTable -ttable 0 0 - -nscores 4 -out %s/binarized/phrase-table.%s\n' % (self.tmp_dir, self.time_stamp, self.tmp_dir, self.moses_dir, self.tmp_dir, self.time_stamp))
command.write('rm %s/phrase-table.%s.half.*\n' % (self.tmp_dir, self.time_stamp))
# return back to where the script was run from
command.write('cd $CUR_DIR\n')
command.close()
phrase_table = os.path.join(self.tmp_dir, 'binarized/phrase-table.{}'.format(self.time_stamp))
return command_name, phrase_table
# write Moses config for the current run
def write_moses_config(self, phrase_table, target_file):
new_config_name = os.path.join(self.tmp_dir, 'moses.'+self.time_stamp+'.ini')
new_config = open(new_config_name, 'w')
constrained = False
for line in open(self.moses_config):
if line.startswith("PhraseDictionaryBinary"):
good_line = [s for s in line.strip().split() if not s.startswith('path')]
new_config.write("%s path=%s\n" % (' '.join(good_line), phrase_table))
elif line.startswith("ConstrainedDecoding"):
new_config.write("ConstrainedDecoding path=%s max-unknowns=-1\n" % target_file)
elif line.startswith("[weight]") and not constrained:
new_config.write("ConstrainedDecoding path=%s max-unknowns=-1\n\n" % target_file)
new_config.write("[weight]\n")
else:
new_config.write(line)
new_config.close()
return new_config_name
# src, tg - lists of lists
# align_file - new file to store the alignments
# each inner list is a sentence
def get_alignments(self, src, tg, align_model, align_file):
alignments = [[[] for j in range(len(tg[i]))] for i in range(len(tg))]
align_stream = open(align_file, 'w')
aligner = Aligner(align_model+'.fwd_params', align_model+'.fwd_err', align_model+'.rev_params', align_model+'.rev_err')
for idx, (src_list, tg_list) in enumerate(zip(src, tg)):
align_string = aligner.align(' '.join(src_list) + ' ||| ' + ' '.join(tg_list))
align_stream.write('%s\n' % align_string)
pairs = align_string.split()
for p_str in pairs:
p = p_str.split('-')
alignments[idx][int(p[1])].append(int(p[0]))
aligner.close()
align_stream.close()
return alignments
def get_segments(self, data_obj, segmentation_file):
seg_regexp = re.compile("\|\d+-\d+\|")
source_segments = []
target_segments = []
with codecs.open(segmentation_file, encoding='utf-8') as segmentation:
for idx, line in enumerate(segmentation):
# no Moses output for this line - every word is a separate segment
if line == "\n":
source_segments.append([])
target_segments.append([(i, i+1) for i in range(len(data_obj['target'][idx]))])
continue
# get source segments
source_seg_strings = seg_regexp.findall(line)
source_seg_list = []
for a_seg in source_seg_strings:
a_pair = a_seg.strip('|').split('-')
source_seg_list.append((int(a_pair[0]), int(a_pair[1])+1))
# get target segments
target_seg_strings = [ll for ll in [l.strip() for l in seg_regexp.split(line)] if ll != '']
target_seg_list = []
cur_pos = 0
for a_seg in target_seg_strings:
seg_words = a_seg.split()
for (s_w, t_w) in zip(seg_words, data_obj['target'][idx][cur_pos:len(seg_words)]):
assert(s_w.lower() == t_w.lower()), "Words don't match at line {}: {} and {}".format(idx, s_w.lower(), t_w.lower())
# assert(all([s_w.lower() == t_w.lower() for (s_w, t_w) in zip(seg_words, data_obj['target'][idx][cur_pos:len(seg_words)])])), "Words don't match at line {}: {} and {}".format(idx, )
target_seg_list.append((cur_pos, cur_pos+len(seg_words)))
cur_pos += len(seg_words)
# compare source and target segments
# the number of segments for the source and the target should match
assert(len(source_seg_list) == len(target_seg_list)), "The numbers of source and target segments don't match: {} and {}".format(len(source_seg_list), len(target_seg_list))
source_segments.append(source_seg_list)
target_segments.append(target_seg_list)
return target_segments, source_segments
def generate(self, data_obj):
data_time_stamp = str(time.time())
if 'target' not in data_obj or 'source' not in data_obj:
print("No target or source")
assert(len(data_obj['target']) == len(data_obj['source']))
# alignments
alignments_file = os.path.join(self.tmp_dir, 'align.'+self.time_stamp)
all_alignments = self.get_alignments(data_obj['source'], data_obj['target'], self.align_model, alignments_file)
data_obj['alignments'] = all_alignments
# segmentation
# call Moses phrase extractor
command, phrase_table = self.write_command_file(data_obj, alignments_file)
call(['bash', command])
# call Moses MT
moses_config = self.write_moses_config(phrase_table, data_obj['target_file'])
moses_seg_file_name = os.path.join(self.tmp_dir, 'segmentation.'+self.time_stamp+'.'+data_time_stamp)
moses_seg_file = open(moses_seg_file_name, 'w')
src = open(data_obj['source_file'])
call([os.path.join(self.moses_dir, 'bin/moses'), '-f', moses_config, '-v', '0', '-t'], stdin=src, stdout=moses_seg_file)
moses_seg_file.close()
src.close()
data_obj['segmentation'], data_obj['source_segmentation'] = self.get_segments(data_obj, moses_seg_file_name)
return data_obj
| 9,490 | 54.829412 | 358 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.